cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vgic-mmio-v2.c (15048B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * VGICv2 MMIO handling functions
      4 */
      5
      6#include <linux/irqchip/arm-gic.h>
      7#include <linux/kvm.h>
      8#include <linux/kvm_host.h>
      9#include <linux/nospec.h>
     10
     11#include <kvm/iodev.h>
     12#include <kvm/arm_vgic.h>
     13
     14#include "vgic.h"
     15#include "vgic-mmio.h"
     16
     17/*
     18 * The Revision field in the IIDR have the following meanings:
     19 *
     20 * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
     21 * Revision 2: Interrupt groups are guest-configurable and signaled using
     22 * 	       their configured groups.
     23 */
     24
     25static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
     26					    gpa_t addr, unsigned int len)
     27{
     28	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
     29	u32 value;
     30
     31	switch (addr & 0x0c) {
     32	case GIC_DIST_CTRL:
     33		value = vgic->enabled ? GICD_ENABLE : 0;
     34		break;
     35	case GIC_DIST_CTR:
     36		value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
     37		value = (value >> 5) - 1;
     38		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
     39		break;
     40	case GIC_DIST_IIDR:
     41		value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
     42			(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
     43			(IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
     44		break;
     45	default:
     46		return 0;
     47	}
     48
     49	return value;
     50}
     51
     52static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
     53				    gpa_t addr, unsigned int len,
     54				    unsigned long val)
     55{
     56	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
     57	bool was_enabled = dist->enabled;
     58
     59	switch (addr & 0x0c) {
     60	case GIC_DIST_CTRL:
     61		dist->enabled = val & GICD_ENABLE;
     62		if (!was_enabled && dist->enabled)
     63			vgic_kick_vcpus(vcpu->kvm);
     64		break;
     65	case GIC_DIST_CTR:
     66	case GIC_DIST_IIDR:
     67		/* Nothing to do */
     68		return;
     69	}
     70}
     71
     72static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
     73					   gpa_t addr, unsigned int len,
     74					   unsigned long val)
     75{
     76	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
     77	u32 reg;
     78
     79	switch (addr & 0x0c) {
     80	case GIC_DIST_IIDR:
     81		reg = vgic_mmio_read_v2_misc(vcpu, addr, len);
     82		if ((reg ^ val) & ~GICD_IIDR_REVISION_MASK)
     83			return -EINVAL;
     84
     85		/*
     86		 * If we observe a write to GICD_IIDR we know that userspace
     87		 * has been updated and has had a chance to cope with older
     88		 * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
     89		 * interrupts as group 1, and therefore we now allow groups to
     90		 * be user writable.  Doing this by default would break
     91		 * migration from old kernels to new kernels with legacy
     92		 * userspace.
     93		 */
     94		reg = FIELD_GET(GICD_IIDR_REVISION_MASK, reg);
     95		switch (reg) {
     96		case KVM_VGIC_IMP_REV_2:
     97		case KVM_VGIC_IMP_REV_3:
     98			vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
     99			dist->implementation_rev = reg;
    100			return 0;
    101		default:
    102			return -EINVAL;
    103		}
    104	}
    105
    106	vgic_mmio_write_v2_misc(vcpu, addr, len, val);
    107	return 0;
    108}
    109
    110static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
    111					    gpa_t addr, unsigned int len,
    112					    unsigned long val)
    113{
    114	if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
    115		vgic_mmio_write_group(vcpu, addr, len, val);
    116
    117	return 0;
    118}
    119
    120static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
    121				 gpa_t addr, unsigned int len,
    122				 unsigned long val)
    123{
    124	int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
    125	int intid = val & 0xf;
    126	int targets = (val >> 16) & 0xff;
    127	int mode = (val >> 24) & 0x03;
    128	struct kvm_vcpu *vcpu;
    129	unsigned long flags, c;
    130
    131	switch (mode) {
    132	case 0x0:		/* as specified by targets */
    133		break;
    134	case 0x1:
    135		targets = (1U << nr_vcpus) - 1;			/* all, ... */
    136		targets &= ~(1U << source_vcpu->vcpu_id);	/* but self */
    137		break;
    138	case 0x2:		/* this very vCPU only */
    139		targets = (1U << source_vcpu->vcpu_id);
    140		break;
    141	case 0x3:		/* reserved */
    142		return;
    143	}
    144
    145	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
    146		struct vgic_irq *irq;
    147
    148		if (!(targets & (1U << c)))
    149			continue;
    150
    151		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
    152
    153		raw_spin_lock_irqsave(&irq->irq_lock, flags);
    154		irq->pending_latch = true;
    155		irq->source |= 1U << source_vcpu->vcpu_id;
    156
    157		vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
    158		vgic_put_irq(source_vcpu->kvm, irq);
    159	}
    160}
    161
    162static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
    163					   gpa_t addr, unsigned int len)
    164{
    165	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
    166	int i;
    167	u64 val = 0;
    168
    169	for (i = 0; i < len; i++) {
    170		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
    171
    172		val |= (u64)irq->targets << (i * 8);
    173
    174		vgic_put_irq(vcpu->kvm, irq);
    175	}
    176
    177	return val;
    178}
    179
    180static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
    181				   gpa_t addr, unsigned int len,
    182				   unsigned long val)
    183{
    184	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
    185	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
    186	int i;
    187	unsigned long flags;
    188
    189	/* GICD_ITARGETSR[0-7] are read-only */
    190	if (intid < VGIC_NR_PRIVATE_IRQS)
    191		return;
    192
    193	for (i = 0; i < len; i++) {
    194		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
    195		int target;
    196
    197		raw_spin_lock_irqsave(&irq->irq_lock, flags);
    198
    199		irq->targets = (val >> (i * 8)) & cpu_mask;
    200		target = irq->targets ? __ffs(irq->targets) : 0;
    201		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
    202
    203		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
    204		vgic_put_irq(vcpu->kvm, irq);
    205	}
    206}
    207
    208static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
    209					    gpa_t addr, unsigned int len)
    210{
    211	u32 intid = addr & 0x0f;
    212	int i;
    213	u64 val = 0;
    214
    215	for (i = 0; i < len; i++) {
    216		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
    217
    218		val |= (u64)irq->source << (i * 8);
    219
    220		vgic_put_irq(vcpu->kvm, irq);
    221	}
    222	return val;
    223}
    224
    225static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
    226				     gpa_t addr, unsigned int len,
    227				     unsigned long val)
    228{
    229	u32 intid = addr & 0x0f;
    230	int i;
    231	unsigned long flags;
    232
    233	for (i = 0; i < len; i++) {
    234		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
    235
    236		raw_spin_lock_irqsave(&irq->irq_lock, flags);
    237
    238		irq->source &= ~((val >> (i * 8)) & 0xff);
    239		if (!irq->source)
    240			irq->pending_latch = false;
    241
    242		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
    243		vgic_put_irq(vcpu->kvm, irq);
    244	}
    245}
    246
    247static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
    248				     gpa_t addr, unsigned int len,
    249				     unsigned long val)
    250{
    251	u32 intid = addr & 0x0f;
    252	int i;
    253	unsigned long flags;
    254
    255	for (i = 0; i < len; i++) {
    256		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
    257
    258		raw_spin_lock_irqsave(&irq->irq_lock, flags);
    259
    260		irq->source |= (val >> (i * 8)) & 0xff;
    261
    262		if (irq->source) {
    263			irq->pending_latch = true;
    264			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
    265		} else {
    266			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
    267		}
    268		vgic_put_irq(vcpu->kvm, irq);
    269	}
    270}
    271
    272#define GICC_ARCH_VERSION_V2	0x2
    273
    274/* These are for userland accesses only, there is no guest-facing emulation. */
    275static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
    276					   gpa_t addr, unsigned int len)
    277{
    278	struct vgic_vmcr vmcr;
    279	u32 val;
    280
    281	vgic_get_vmcr(vcpu, &vmcr);
    282
    283	switch (addr & 0xff) {
    284	case GIC_CPU_CTRL:
    285		val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
    286		val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
    287		val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
    288		val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
    289		val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
    290		val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
    291
    292		break;
    293	case GIC_CPU_PRIMASK:
    294		/*
    295		 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
    296		 * PMR field as GICH_VMCR.VMPriMask rather than
    297		 * GICC_PMR.Priority, so we expose the upper five bits of
    298		 * priority mask to userspace using the lower bits in the
    299		 * unsigned long.
    300		 */
    301		val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
    302			GICV_PMR_PRIORITY_SHIFT;
    303		break;
    304	case GIC_CPU_BINPOINT:
    305		val = vmcr.bpr;
    306		break;
    307	case GIC_CPU_ALIAS_BINPOINT:
    308		val = vmcr.abpr;
    309		break;
    310	case GIC_CPU_IDENT:
    311		val = ((PRODUCT_ID_KVM << 20) |
    312		       (GICC_ARCH_VERSION_V2 << 16) |
    313		       IMPLEMENTER_ARM);
    314		break;
    315	default:
    316		return 0;
    317	}
    318
    319	return val;
    320}
    321
    322static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
    323				   gpa_t addr, unsigned int len,
    324				   unsigned long val)
    325{
    326	struct vgic_vmcr vmcr;
    327
    328	vgic_get_vmcr(vcpu, &vmcr);
    329
    330	switch (addr & 0xff) {
    331	case GIC_CPU_CTRL:
    332		vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
    333		vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
    334		vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
    335		vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
    336		vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
    337		vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
    338
    339		break;
    340	case GIC_CPU_PRIMASK:
    341		/*
    342		 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
    343		 * PMR field as GICH_VMCR.VMPriMask rather than
    344		 * GICC_PMR.Priority, so we expose the upper five bits of
    345		 * priority mask to userspace using the lower bits in the
    346		 * unsigned long.
    347		 */
    348		vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
    349			GICV_PMR_PRIORITY_MASK;
    350		break;
    351	case GIC_CPU_BINPOINT:
    352		vmcr.bpr = val;
    353		break;
    354	case GIC_CPU_ALIAS_BINPOINT:
    355		vmcr.abpr = val;
    356		break;
    357	}
    358
    359	vgic_set_vmcr(vcpu, &vmcr);
    360}
    361
    362static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
    363					gpa_t addr, unsigned int len)
    364{
    365	int n; /* which APRn is this */
    366
    367	n = (addr >> 2) & 0x3;
    368
    369	if (kvm_vgic_global_state.type == VGIC_V2) {
    370		/* GICv2 hardware systems support max. 32 groups */
    371		if (n != 0)
    372			return 0;
    373		return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
    374	} else {
    375		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
    376
    377		if (n > vgic_v3_max_apr_idx(vcpu))
    378			return 0;
    379
    380		n = array_index_nospec(n, 4);
    381
    382		/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
    383		return vgicv3->vgic_ap1r[n];
    384	}
    385}
    386
    387static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
    388				gpa_t addr, unsigned int len,
    389				unsigned long val)
    390{
    391	int n; /* which APRn is this */
    392
    393	n = (addr >> 2) & 0x3;
    394
    395	if (kvm_vgic_global_state.type == VGIC_V2) {
    396		/* GICv2 hardware systems support max. 32 groups */
    397		if (n != 0)
    398			return;
    399		vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
    400	} else {
    401		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
    402
    403		if (n > vgic_v3_max_apr_idx(vcpu))
    404			return;
    405
    406		n = array_index_nospec(n, 4);
    407
    408		/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
    409		vgicv3->vgic_ap1r[n] = val;
    410	}
    411}
    412
    413static const struct vgic_register_region vgic_v2_dist_registers[] = {
    414	REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL,
    415		vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc,
    416		NULL, vgic_mmio_uaccess_write_v2_misc,
    417		12, VGIC_ACCESS_32bit),
    418	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
    419		vgic_mmio_read_group, vgic_mmio_write_group,
    420		NULL, vgic_mmio_uaccess_write_v2_group, 1,
    421		VGIC_ACCESS_32bit),
    422	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
    423		vgic_mmio_read_enable, vgic_mmio_write_senable,
    424		NULL, vgic_uaccess_write_senable, 1,
    425		VGIC_ACCESS_32bit),
    426	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
    427		vgic_mmio_read_enable, vgic_mmio_write_cenable,
    428		NULL, vgic_uaccess_write_cenable, 1,
    429		VGIC_ACCESS_32bit),
    430	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
    431		vgic_mmio_read_pending, vgic_mmio_write_spending,
    432		vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1,
    433		VGIC_ACCESS_32bit),
    434	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
    435		vgic_mmio_read_pending, vgic_mmio_write_cpending,
    436		vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1,
    437		VGIC_ACCESS_32bit),
    438	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
    439		vgic_mmio_read_active, vgic_mmio_write_sactive,
    440		vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
    441		VGIC_ACCESS_32bit),
    442	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
    443		vgic_mmio_read_active, vgic_mmio_write_cactive,
    444		vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
    445		VGIC_ACCESS_32bit),
    446	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
    447		vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
    448		8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
    449	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
    450		vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
    451		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
    452	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
    453		vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
    454		VGIC_ACCESS_32bit),
    455	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
    456		vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
    457		VGIC_ACCESS_32bit),
    458	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
    459		vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
    460		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
    461	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
    462		vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
    463		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
    464};
    465
    466static const struct vgic_register_region vgic_v2_cpu_registers[] = {
    467	REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
    468		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
    469		VGIC_ACCESS_32bit),
    470	REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
    471		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
    472		VGIC_ACCESS_32bit),
    473	REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
    474		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
    475		VGIC_ACCESS_32bit),
    476	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
    477		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
    478		VGIC_ACCESS_32bit),
    479	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
    480		vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
    481		VGIC_ACCESS_32bit),
    482	REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
    483		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
    484		VGIC_ACCESS_32bit),
    485};
    486
    487unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
    488{
    489	dev->regions = vgic_v2_dist_registers;
    490	dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
    491
    492	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
    493
    494	return SZ_4K;
    495}
    496
    497int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
    498{
    499	const struct vgic_register_region *region;
    500	struct vgic_io_device iodev;
    501	struct vgic_reg_attr reg_attr;
    502	struct kvm_vcpu *vcpu;
    503	gpa_t addr;
    504	int ret;
    505
    506	ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
    507	if (ret)
    508		return ret;
    509
    510	vcpu = reg_attr.vcpu;
    511	addr = reg_attr.addr;
    512
    513	switch (attr->group) {
    514	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
    515		iodev.regions = vgic_v2_dist_registers;
    516		iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
    517		iodev.base_addr = 0;
    518		break;
    519	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
    520		iodev.regions = vgic_v2_cpu_registers;
    521		iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
    522		iodev.base_addr = 0;
    523		break;
    524	default:
    525		return -ENXIO;
    526	}
    527
    528	/* We only support aligned 32-bit accesses. */
    529	if (addr & 3)
    530		return -ENXIO;
    531
    532	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
    533	if (!region)
    534		return -ENXIO;
    535
    536	return 0;
    537}
    538
    539int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
    540			  int offset, u32 *val)
    541{
    542	struct vgic_io_device dev = {
    543		.regions = vgic_v2_cpu_registers,
    544		.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
    545		.iodev_type = IODEV_CPUIF,
    546	};
    547
    548	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
    549}
    550
    551int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
    552			 int offset, u32 *val)
    553{
    554	struct vgic_io_device dev = {
    555		.regions = vgic_v2_dist_registers,
    556		.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
    557		.iodev_type = IODEV_DIST,
    558	};
    559
    560	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
    561}