cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vgic_irq.c (23060B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * vgic_irq.c - Test userspace injection of IRQs
      4 *
      5 * This test validates the injection of IRQs from userspace using various
      6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
      7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that
      8 * it received it.
      9 */
     10
     11#include <asm/kvm.h>
     12#include <asm/kvm_para.h>
     13#include <sys/eventfd.h>
     14#include <linux/sizes.h>
     15
     16#include "processor.h"
     17#include "test_util.h"
     18#include "kvm_util.h"
     19#include "gic.h"
     20#include "gic_v3.h"
     21#include "vgic.h"
     22
     23#define GICD_BASE_GPA		0x08000000ULL
     24#define GICR_BASE_GPA		0x080A0000ULL
     25#define VCPU_ID			0
     26
     27/*
     28 * Stores the user specified args; it's passed to the guest and to every test
     29 * function.
     30 */
     31struct test_args {
     32	uint32_t nr_irqs; /* number of KVM supported IRQs. */
     33	bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
     34	bool level_sensitive; /* 1 is level, 0 is edge */
     35	int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
     36	bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
     37};
     38
     39/*
     40 * KVM implements 32 priority levels:
     41 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
     42 *
     43 * Note that these macros will still be correct in the case that KVM implements
     44 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
     45 */
     46#define KVM_NUM_PRIOS		32
     47#define KVM_PRIO_SHIFT		3 /* steps of 8 = 1 << 3 */
     48#define KVM_PRIO_STEPS		(1 << KVM_PRIO_SHIFT) /* 8 */
     49#define LOWEST_PRIO		(KVM_NUM_PRIOS - 1)
     50#define CPU_PRIO_MASK		(LOWEST_PRIO << KVM_PRIO_SHIFT)	/* 0xf8 */
     51#define IRQ_DEFAULT_PRIO	(LOWEST_PRIO - 1)
     52#define IRQ_DEFAULT_PRIO_REG	(IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
     53
     54static void *dist = (void *)GICD_BASE_GPA;
     55static void *redist = (void *)GICR_BASE_GPA;
     56
     57/*
     58 * The kvm_inject_* utilities are used by the guest to ask the host to inject
     59 * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
     60 */
     61
     62typedef enum {
     63	KVM_INJECT_EDGE_IRQ_LINE = 1,
     64	KVM_SET_IRQ_LINE,
     65	KVM_SET_IRQ_LINE_HIGH,
     66	KVM_SET_LEVEL_INFO_HIGH,
     67	KVM_INJECT_IRQFD,
     68	KVM_WRITE_ISPENDR,
     69	KVM_WRITE_ISACTIVER,
     70} kvm_inject_cmd;
     71
     72struct kvm_inject_args {
     73	kvm_inject_cmd cmd;
     74	uint32_t first_intid;
     75	uint32_t num;
     76	int level;
     77	bool expect_failure;
     78};
     79
     80/* Used on the guest side to perform the hypercall. */
     81static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
     82		uint32_t num, int level, bool expect_failure);
     83
     84/* Used on the host side to get the hypercall info. */
     85static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
     86		struct kvm_inject_args *args);
     87
     88#define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure)			\
     89	kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
     90
     91#define KVM_INJECT_MULTI(cmd, intid, num)					\
     92	_KVM_INJECT_MULTI(cmd, intid, num, false)
     93
     94#define _KVM_INJECT(cmd, intid, expect_failure)					\
     95	_KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
     96
     97#define KVM_INJECT(cmd, intid)							\
     98	_KVM_INJECT_MULTI(cmd, intid, 1, false)
     99
    100#define KVM_ACTIVATE(cmd, intid)						\
    101	kvm_inject_call(cmd, intid, 1, 1, false);
    102
    103struct kvm_inject_desc {
    104	kvm_inject_cmd cmd;
    105	/* can inject PPIs, PPIs, and/or SPIs. */
    106	bool sgi, ppi, spi;
    107};
    108
    109static struct kvm_inject_desc inject_edge_fns[] = {
    110	/*                                      sgi    ppi    spi */
    111	{ KVM_INJECT_EDGE_IRQ_LINE,		false, false, true },
    112	{ KVM_INJECT_IRQFD,			false, false, true },
    113	{ KVM_WRITE_ISPENDR,			true,  false, true },
    114	{ 0, },
    115};
    116
    117static struct kvm_inject_desc inject_level_fns[] = {
    118	/*                                      sgi    ppi    spi */
    119	{ KVM_SET_IRQ_LINE_HIGH,		false, true,  true },
    120	{ KVM_SET_LEVEL_INFO_HIGH,		false, true,  true },
    121	{ KVM_INJECT_IRQFD,			false, false, true },
    122	{ KVM_WRITE_ISPENDR,			false, true,  true },
    123	{ 0, },
    124};
    125
    126static struct kvm_inject_desc set_active_fns[] = {
    127	/*                                      sgi    ppi    spi */
    128	{ KVM_WRITE_ISACTIVER,			true,  true,  true },
    129	{ 0, },
    130};
    131
    132#define for_each_inject_fn(t, f)						\
    133	for ((f) = (t); (f)->cmd; (f)++)
    134
    135#define for_each_supported_inject_fn(args, t, f)				\
    136	for_each_inject_fn(t, f)						\
    137		if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
    138
    139#define for_each_supported_activate_fn(args, t, f)				\
    140	for_each_supported_inject_fn((args), (t), (f))
    141
    142/* Shared between the guest main thread and the IRQ handlers. */
    143volatile uint64_t irq_handled;
    144volatile uint32_t irqnr_received[MAX_SPI + 1];
    145
    146static void reset_stats(void)
    147{
    148	int i;
    149
    150	irq_handled = 0;
    151	for (i = 0; i <= MAX_SPI; i++)
    152		irqnr_received[i] = 0;
    153}
    154
    155static uint64_t gic_read_ap1r0(void)
    156{
    157	uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
    158
    159	dsb(sy);
    160	return reg;
    161}
    162
    163static void gic_write_ap1r0(uint64_t val)
    164{
    165	write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
    166	isb();
    167}
    168
    169static void guest_set_irq_line(uint32_t intid, uint32_t level);
    170
    171static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
    172{
    173	uint32_t intid = gic_get_and_ack_irq();
    174
    175	if (intid == IAR_SPURIOUS)
    176		return;
    177
    178	GUEST_ASSERT(gic_irq_get_active(intid));
    179
    180	if (!level_sensitive)
    181		GUEST_ASSERT(!gic_irq_get_pending(intid));
    182
    183	if (level_sensitive)
    184		guest_set_irq_line(intid, 0);
    185
    186	GUEST_ASSERT(intid < MAX_SPI);
    187	irqnr_received[intid] += 1;
    188	irq_handled += 1;
    189
    190	gic_set_eoi(intid);
    191	GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
    192	if (eoi_split)
    193		gic_set_dir(intid);
    194
    195	GUEST_ASSERT(!gic_irq_get_active(intid));
    196	GUEST_ASSERT(!gic_irq_get_pending(intid));
    197}
    198
    199static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
    200		uint32_t num, int level, bool expect_failure)
    201{
    202	struct kvm_inject_args args = {
    203		.cmd = cmd,
    204		.first_intid = first_intid,
    205		.num = num,
    206		.level = level,
    207		.expect_failure = expect_failure,
    208	};
    209	GUEST_SYNC(&args);
    210}
    211
    212#define GUEST_ASSERT_IAR_EMPTY()						\
    213do { 										\
    214	uint32_t _intid;							\
    215	_intid = gic_get_and_ack_irq();						\
    216	GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS);			\
    217} while (0)
    218
    219#define CAT_HELPER(a, b) a ## b
    220#define CAT(a, b) CAT_HELPER(a, b)
    221#define PREFIX guest_irq_handler_
    222#define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
    223#define GENERATE_GUEST_IRQ_HANDLER(split, lev)					\
    224static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs)			\
    225{										\
    226	guest_irq_generic_handler(split, lev);					\
    227}
    228
    229GENERATE_GUEST_IRQ_HANDLER(0, 0);
    230GENERATE_GUEST_IRQ_HANDLER(0, 1);
    231GENERATE_GUEST_IRQ_HANDLER(1, 0);
    232GENERATE_GUEST_IRQ_HANDLER(1, 1);
    233
    234static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
    235	{GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
    236	{GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
    237};
    238
    239static void reset_priorities(struct test_args *args)
    240{
    241	int i;
    242
    243	for (i = 0; i < args->nr_irqs; i++)
    244		gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
    245}
    246
    247static void guest_set_irq_line(uint32_t intid, uint32_t level)
    248{
    249	kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
    250}
    251
    252static void test_inject_fail(struct test_args *args,
    253		uint32_t intid, kvm_inject_cmd cmd)
    254{
    255	reset_stats();
    256
    257	_KVM_INJECT(cmd, intid, true);
    258	/* no IRQ to handle on entry */
    259
    260	GUEST_ASSERT_EQ(irq_handled, 0);
    261	GUEST_ASSERT_IAR_EMPTY();
    262}
    263
    264static void guest_inject(struct test_args *args,
    265		uint32_t first_intid, uint32_t num,
    266		kvm_inject_cmd cmd)
    267{
    268	uint32_t i;
    269
    270	reset_stats();
    271
    272	/* Cycle over all priorities to make things more interesting. */
    273	for (i = first_intid; i < num + first_intid; i++)
    274		gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
    275
    276	asm volatile("msr daifset, #2" : : : "memory");
    277	KVM_INJECT_MULTI(cmd, first_intid, num);
    278
    279	while (irq_handled < num) {
    280		asm volatile("wfi\n"
    281			     "msr daifclr, #2\n"
    282			     /* handle IRQ */
    283			     "msr daifset, #2\n"
    284			     : : : "memory");
    285	}
    286	asm volatile("msr daifclr, #2" : : : "memory");
    287
    288	GUEST_ASSERT_EQ(irq_handled, num);
    289	for (i = first_intid; i < num + first_intid; i++)
    290		GUEST_ASSERT_EQ(irqnr_received[i], 1);
    291	GUEST_ASSERT_IAR_EMPTY();
    292
    293	reset_priorities(args);
    294}
    295
    296/*
    297 * Restore the active state of multiple concurrent IRQs (given by
    298 * concurrent_irqs).  This does what a live-migration would do on the
    299 * destination side assuming there are some active IRQs that were not
    300 * deactivated yet.
    301 */
    302static void guest_restore_active(struct test_args *args,
    303		uint32_t first_intid, uint32_t num,
    304		kvm_inject_cmd cmd)
    305{
    306	uint32_t prio, intid, ap1r;
    307	int i;
    308
    309	/*
    310	 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
    311	 * in descending order, so intid+1 can preempt intid.
    312	 */
    313	for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
    314		GUEST_ASSERT(prio >= 0);
    315		intid = i + first_intid;
    316		gic_set_priority(intid, prio);
    317	}
    318
    319	/*
    320	 * In a real migration, KVM would restore all GIC state before running
    321	 * guest code.
    322	 */
    323	for (i = 0; i < num; i++) {
    324		intid = i + first_intid;
    325		KVM_ACTIVATE(cmd, intid);
    326		ap1r = gic_read_ap1r0();
    327		ap1r |= 1U << i;
    328		gic_write_ap1r0(ap1r);
    329	}
    330
    331	/* This is where the "migration" would occur. */
    332
    333	/* finish handling the IRQs starting with the highest priority one. */
    334	for (i = 0; i < num; i++) {
    335		intid = num - i - 1 + first_intid;
    336		gic_set_eoi(intid);
    337		if (args->eoi_split)
    338			gic_set_dir(intid);
    339	}
    340
    341	for (i = 0; i < num; i++)
    342		GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
    343	GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
    344	GUEST_ASSERT_IAR_EMPTY();
    345}
    346
    347/*
    348 * Polls the IAR until it's not a spurious interrupt.
    349 *
    350 * This function should only be used in test_inject_preemption (with IRQs
    351 * masked).
    352 */
    353static uint32_t wait_for_and_activate_irq(void)
    354{
    355	uint32_t intid;
    356
    357	do {
    358		asm volatile("wfi" : : : "memory");
    359		intid = gic_get_and_ack_irq();
    360	} while (intid == IAR_SPURIOUS);
    361
    362	return intid;
    363}
    364
    365/*
    366 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
    367 * handle them without handling the actual exceptions.  This is done by masking
    368 * interrupts for the whole test.
    369 */
    370static void test_inject_preemption(struct test_args *args,
    371		uint32_t first_intid, int num,
    372		kvm_inject_cmd cmd)
    373{
    374	uint32_t intid, prio, step = KVM_PRIO_STEPS;
    375	int i;
    376
    377	/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
    378	 * in descending order, so intid+1 can preempt intid.
    379	 */
    380	for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
    381		GUEST_ASSERT(prio >= 0);
    382		intid = i + first_intid;
    383		gic_set_priority(intid, prio);
    384	}
    385
    386	local_irq_disable();
    387
    388	for (i = 0; i < num; i++) {
    389		uint32_t tmp;
    390		intid = i + first_intid;
    391		KVM_INJECT(cmd, intid);
    392		/* Each successive IRQ will preempt the previous one. */
    393		tmp = wait_for_and_activate_irq();
    394		GUEST_ASSERT_EQ(tmp, intid);
    395		if (args->level_sensitive)
    396			guest_set_irq_line(intid, 0);
    397	}
    398
    399	/* finish handling the IRQs starting with the highest priority one. */
    400	for (i = 0; i < num; i++) {
    401		intid = num - i - 1 + first_intid;
    402		gic_set_eoi(intid);
    403		if (args->eoi_split)
    404			gic_set_dir(intid);
    405	}
    406
    407	local_irq_enable();
    408
    409	for (i = 0; i < num; i++)
    410		GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
    411	GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
    412	GUEST_ASSERT_IAR_EMPTY();
    413
    414	reset_priorities(args);
    415}
    416
    417static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
    418{
    419	uint32_t nr_irqs = args->nr_irqs;
    420
    421	if (f->sgi) {
    422		guest_inject(args, MIN_SGI, 1, f->cmd);
    423		guest_inject(args, 0, 16, f->cmd);
    424	}
    425
    426	if (f->ppi)
    427		guest_inject(args, MIN_PPI, 1, f->cmd);
    428
    429	if (f->spi) {
    430		guest_inject(args, MIN_SPI, 1, f->cmd);
    431		guest_inject(args, nr_irqs - 1, 1, f->cmd);
    432		guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
    433	}
    434}
    435
    436static void test_injection_failure(struct test_args *args,
    437		struct kvm_inject_desc *f)
    438{
    439	uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
    440	int i;
    441
    442	for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
    443		test_inject_fail(args, bad_intid[i], f->cmd);
    444}
    445
    446static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
    447{
    448	/*
    449	 * Test up to 4 levels of preemption. The reason is that KVM doesn't
    450	 * currently implement the ability to have more than the number-of-LRs
    451	 * number of concurrently active IRQs. The number of LRs implemented is
    452	 * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
    453	 */
    454	if (f->sgi)
    455		test_inject_preemption(args, MIN_SGI, 4, f->cmd);
    456
    457	if (f->ppi)
    458		test_inject_preemption(args, MIN_PPI, 4, f->cmd);
    459
    460	if (f->spi)
    461		test_inject_preemption(args, MIN_SPI, 4, f->cmd);
    462}
    463
    464static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
    465{
    466	/* Test up to 4 active IRQs. Same reason as in test_preemption. */
    467	if (f->sgi)
    468		guest_restore_active(args, MIN_SGI, 4, f->cmd);
    469
    470	if (f->ppi)
    471		guest_restore_active(args, MIN_PPI, 4, f->cmd);
    472
    473	if (f->spi)
    474		guest_restore_active(args, MIN_SPI, 4, f->cmd);
    475}
    476
    477static void guest_code(struct test_args *args)
    478{
    479	uint32_t i, nr_irqs = args->nr_irqs;
    480	bool level_sensitive = args->level_sensitive;
    481	struct kvm_inject_desc *f, *inject_fns;
    482
    483	gic_init(GIC_V3, 1, dist, redist);
    484
    485	for (i = 0; i < nr_irqs; i++)
    486		gic_irq_enable(i);
    487
    488	for (i = MIN_SPI; i < nr_irqs; i++)
    489		gic_irq_set_config(i, !level_sensitive);
    490
    491	gic_set_eoi_split(args->eoi_split);
    492
    493	reset_priorities(args);
    494	gic_set_priority_mask(CPU_PRIO_MASK);
    495
    496	inject_fns  = level_sensitive ? inject_level_fns
    497				      : inject_edge_fns;
    498
    499	local_irq_enable();
    500
    501	/* Start the tests. */
    502	for_each_supported_inject_fn(args, inject_fns, f) {
    503		test_injection(args, f);
    504		test_preemption(args, f);
    505		test_injection_failure(args, f);
    506	}
    507
    508	/*
    509	 * Restore the active state of IRQs. This would happen when live
    510	 * migrating IRQs in the middle of being handled.
    511	 */
    512	for_each_supported_activate_fn(args, set_active_fns, f)
    513		test_restore_active(args, f);
    514
    515	GUEST_DONE();
    516}
    517
    518static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
    519			struct test_args *test_args, bool expect_failure)
    520{
    521	int ret;
    522
    523	if (!expect_failure) {
    524		kvm_arm_irq_line(vm, intid, level);
    525	} else {
    526		/* The interface doesn't allow larger intid's. */
    527		if (intid > KVM_ARM_IRQ_NUM_MASK)
    528			return;
    529
    530		ret = _kvm_arm_irq_line(vm, intid, level);
    531		TEST_ASSERT(ret != 0 && errno == EINVAL,
    532				"Bad intid %i did not cause KVM_IRQ_LINE "
    533				"error: rc: %i errno: %i", intid, ret, errno);
    534	}
    535}
    536
    537void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
    538			bool expect_failure)
    539{
    540	if (!expect_failure) {
    541		kvm_irq_set_level_info(gic_fd, intid, level);
    542	} else {
    543		int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
    544		/*
    545		 * The kernel silently fails for invalid SPIs and SGIs (which
    546		 * are not level-sensitive). It only checks for intid to not
    547		 * spill over 1U << 10 (the max reserved SPI). Also, callers
    548		 * are supposed to mask the intid with 0x3ff (1023).
    549		 */
    550		if (intid > VGIC_MAX_RESERVED)
    551			TEST_ASSERT(ret != 0 && errno == EINVAL,
    552				"Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
    553				"error: rc: %i errno: %i", intid, ret, errno);
    554		else
    555			TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
    556				"for intid %i failed, rc: %i errno: %i",
    557				intid, ret, errno);
    558	}
    559}
    560
    561static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
    562		uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
    563		bool expect_failure)
    564{
    565	struct kvm_irq_routing *routing;
    566	int ret;
    567	uint64_t i;
    568
    569	assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
    570
    571	routing = kvm_gsi_routing_create();
    572	for (i = intid; i < (uint64_t)intid + num; i++)
    573		kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
    574
    575	if (!expect_failure) {
    576		kvm_gsi_routing_write(vm, routing);
    577	} else {
    578		ret = _kvm_gsi_routing_write(vm, routing);
    579		/* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
    580		if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
    581			TEST_ASSERT(ret != 0 && errno == EINVAL,
    582				"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
    583				"error: rc: %i errno: %i", intid, ret, errno);
    584		else
    585			TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
    586				"for intid %i failed, rc: %i errno: %i",
    587				intid, ret, errno);
    588	}
    589}
    590
    591static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
    592			uint32_t vcpu, bool expect_failure)
    593{
    594	/*
    595	 * Ignore this when expecting failure as invalid intids will lead to
    596	 * either trying to inject SGIs when we configured the test to be
    597	 * level_sensitive (or the reverse), or inject large intids which
    598	 * will lead to writing above the ISPENDR register space (and we
    599	 * don't want to do that either).
    600	 */
    601	if (!expect_failure)
    602		kvm_irq_write_ispendr(gic_fd, intid, vcpu);
    603}
    604
    605static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
    606		uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
    607		bool expect_failure)
    608{
    609	int fd[MAX_SPI];
    610	uint64_t val;
    611	int ret, f;
    612	uint64_t i;
    613
    614	/*
    615	 * There is no way to try injecting an SGI or PPI as the interface
    616	 * starts counting from the first SPI (above the private ones), so just
    617	 * exit.
    618	 */
    619	if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
    620		return;
    621
    622	kvm_set_gsi_routing_irqchip_check(vm, intid, num,
    623			kvm_max_routes, expect_failure);
    624
    625	/*
    626	 * If expect_failure, then just to inject anyway. These
    627	 * will silently fail. And in any case, the guest will check
    628	 * that no actual interrupt was injected for those cases.
    629	 */
    630
    631	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
    632		fd[f] = eventfd(0, 0);
    633		TEST_ASSERT(fd[f] != -1,
    634			"eventfd failed, errno: %i\n", errno);
    635	}
    636
    637	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
    638		struct kvm_irqfd irqfd = {
    639			.fd  = fd[f],
    640			.gsi = i - MIN_SPI,
    641		};
    642		assert(i <= (uint64_t)UINT_MAX);
    643		vm_ioctl(vm, KVM_IRQFD, &irqfd);
    644	}
    645
    646	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
    647		val = 1;
    648		ret = write(fd[f], &val, sizeof(uint64_t));
    649		TEST_ASSERT(ret == sizeof(uint64_t),
    650			"Write to KVM_IRQFD failed with ret: %d\n", ret);
    651	}
    652
    653	for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
    654		close(fd[f]);
    655}
    656
    657/* handles the valid case: intid=0xffffffff num=1 */
    658#define for_each_intid(first, num, tmp, i)					\
    659	for ((tmp) = (i) = (first);						\
    660		(tmp) < (uint64_t)(first) + (uint64_t)(num);			\
    661		(tmp)++, (i)++)
    662
    663static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
    664		struct kvm_inject_args *inject_args,
    665		struct test_args *test_args)
    666{
    667	kvm_inject_cmd cmd = inject_args->cmd;
    668	uint32_t intid = inject_args->first_intid;
    669	uint32_t num = inject_args->num;
    670	int level = inject_args->level;
    671	bool expect_failure = inject_args->expect_failure;
    672	uint64_t tmp;
    673	uint32_t i;
    674
    675	/* handles the valid case: intid=0xffffffff num=1 */
    676	assert(intid < UINT_MAX - num || num == 1);
    677
    678	switch (cmd) {
    679	case KVM_INJECT_EDGE_IRQ_LINE:
    680		for_each_intid(intid, num, tmp, i)
    681			kvm_irq_line_check(vm, i, 1, test_args,
    682					expect_failure);
    683		for_each_intid(intid, num, tmp, i)
    684			kvm_irq_line_check(vm, i, 0, test_args,
    685					expect_failure);
    686		break;
    687	case KVM_SET_IRQ_LINE:
    688		for_each_intid(intid, num, tmp, i)
    689			kvm_irq_line_check(vm, i, level, test_args,
    690					expect_failure);
    691		break;
    692	case KVM_SET_IRQ_LINE_HIGH:
    693		for_each_intid(intid, num, tmp, i)
    694			kvm_irq_line_check(vm, i, 1, test_args,
    695					expect_failure);
    696		break;
    697	case KVM_SET_LEVEL_INFO_HIGH:
    698		for_each_intid(intid, num, tmp, i)
    699			kvm_irq_set_level_info_check(gic_fd, i, 1,
    700					expect_failure);
    701		break;
    702	case KVM_INJECT_IRQFD:
    703		kvm_routing_and_irqfd_check(vm, intid, num,
    704					test_args->kvm_max_routes,
    705					expect_failure);
    706		break;
    707	case KVM_WRITE_ISPENDR:
    708		for (i = intid; i < intid + num; i++)
    709			kvm_irq_write_ispendr_check(gic_fd, i,
    710					VCPU_ID, expect_failure);
    711		break;
    712	case KVM_WRITE_ISACTIVER:
    713		for (i = intid; i < intid + num; i++)
    714			kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
    715		break;
    716	default:
    717		break;
    718	}
    719}
    720
    721static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
    722		struct kvm_inject_args *args)
    723{
    724	struct kvm_inject_args *kvm_args_hva;
    725	vm_vaddr_t kvm_args_gva;
    726
    727	kvm_args_gva = uc->args[1];
    728	kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
    729	memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
    730}
    731
    732static void print_args(struct test_args *args)
    733{
    734	printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
    735			args->nr_irqs, args->level_sensitive,
    736			args->eoi_split);
    737}
    738
    739static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
    740{
    741	struct ucall uc;
    742	int gic_fd;
    743	struct kvm_vm *vm;
    744	struct kvm_inject_args inject_args;
    745	vm_vaddr_t args_gva;
    746
    747	struct test_args args = {
    748		.nr_irqs = nr_irqs,
    749		.level_sensitive = level_sensitive,
    750		.eoi_split = eoi_split,
    751		.kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
    752		.kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
    753	};
    754
    755	print_args(&args);
    756
    757	vm = vm_create_default(VCPU_ID, 0, guest_code);
    758	ucall_init(vm, NULL);
    759
    760	vm_init_descriptor_tables(vm);
    761	vcpu_init_descriptor_tables(vm, VCPU_ID);
    762
    763	/* Setup the guest args page (so it gets the args). */
    764	args_gva = vm_vaddr_alloc_page(vm);
    765	memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
    766	vcpu_args_set(vm, 0, 1, args_gva);
    767
    768	gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
    769			GICD_BASE_GPA, GICR_BASE_GPA);
    770	if (gic_fd < 0) {
    771		print_skip("Failed to create vgic-v3, skipping");
    772		exit(KSFT_SKIP);
    773	}
    774
    775	vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
    776		guest_irq_handlers[args.eoi_split][args.level_sensitive]);
    777
    778	while (1) {
    779		vcpu_run(vm, VCPU_ID);
    780
    781		switch (get_ucall(vm, VCPU_ID, &uc)) {
    782		case UCALL_SYNC:
    783			kvm_inject_get_call(vm, &uc, &inject_args);
    784			run_guest_cmd(vm, gic_fd, &inject_args, &args);
    785			break;
    786		case UCALL_ABORT:
    787			TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
    788					(const char *)uc.args[0],
    789					__FILE__, uc.args[1], uc.args[2], uc.args[3]);
    790			break;
    791		case UCALL_DONE:
    792			goto done;
    793		default:
    794			TEST_FAIL("Unknown ucall %lu", uc.cmd);
    795		}
    796	}
    797
    798done:
    799	close(gic_fd);
    800	kvm_vm_free(vm);
    801}
    802
    803static void help(const char *name)
    804{
    805	printf(
    806	"\n"
    807	"usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
    808	printf(" -n: specify number of IRQs to setup the vgic with. "
    809		"It has to be a multiple of 32 and between 64 and 1024.\n");
    810	printf(" -e: if 1 then EOI is split into a write to DIR on top "
    811		"of writing EOI.\n");
    812	printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
    813	puts("");
    814	exit(1);
    815}
    816
    817int main(int argc, char **argv)
    818{
    819	uint32_t nr_irqs = 64;
    820	bool default_args = true;
    821	bool level_sensitive = false;
    822	int opt;
    823	bool eoi_split = false;
    824
    825	/* Tell stdout not to buffer its content */
    826	setbuf(stdout, NULL);
    827
    828	while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
    829		switch (opt) {
    830		case 'n':
    831			nr_irqs = atoi(optarg);
    832			if (nr_irqs > 1024 || nr_irqs % 32)
    833				help(argv[0]);
    834			break;
    835		case 'e':
    836			eoi_split = (bool)atoi(optarg);
    837			default_args = false;
    838			break;
    839		case 'l':
    840			level_sensitive = (bool)atoi(optarg);
    841			default_args = false;
    842			break;
    843		case 'h':
    844		default:
    845			help(argv[0]);
    846			break;
    847		}
    848	}
    849
    850	/*
    851	 * If the user just specified nr_irqs and/or gic_version, then run all
    852	 * combinations.
    853	 */
    854	if (default_args) {
    855		test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
    856		test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
    857		test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
    858		test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
    859	} else {
    860		test_vgic(nr_irqs, level_sensitive, eoi_split);
    861	}
    862
    863	return 0;
    864}