cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vgic-v3.c (20010B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2
      3#include <linux/irqchip/arm-gic-v3.h>
      4#include <linux/irq.h>
      5#include <linux/irqdomain.h>
      6#include <linux/kvm.h>
      7#include <linux/kvm_host.h>
      8#include <kvm/arm_vgic.h>
      9#include <asm/kvm_hyp.h>
     10#include <asm/kvm_mmu.h>
     11#include <asm/kvm_asm.h>
     12
     13#include "vgic.h"
     14
     15static bool group0_trap;
     16static bool group1_trap;
     17static bool common_trap;
     18static bool dir_trap;
     19static bool gicv4_enable;
     20
     21void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
     22{
     23	struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
     24
     25	cpuif->vgic_hcr |= ICH_HCR_UIE;
     26}
     27
     28static bool lr_signals_eoi_mi(u64 lr_val)
     29{
     30	return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
     31	       !(lr_val & ICH_LR_HW);
     32}
     33
     34void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
     35{
     36	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
     37	struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
     38	u32 model = vcpu->kvm->arch.vgic.vgic_model;
     39	int lr;
     40
     41	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
     42
     43	cpuif->vgic_hcr &= ~ICH_HCR_UIE;
     44
     45	for (lr = 0; lr < cpuif->used_lrs; lr++) {
     46		u64 val = cpuif->vgic_lr[lr];
     47		u32 intid, cpuid;
     48		struct vgic_irq *irq;
     49		bool is_v2_sgi = false;
     50		bool deactivated;
     51
     52		cpuid = val & GICH_LR_PHYSID_CPUID;
     53		cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
     54
     55		if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
     56			intid = val & ICH_LR_VIRTUAL_ID_MASK;
     57		} else {
     58			intid = val & GICH_LR_VIRTUALID;
     59			is_v2_sgi = vgic_irq_is_sgi(intid);
     60		}
     61
     62		/* Notify fds when the guest EOI'ed a level-triggered IRQ */
     63		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
     64			kvm_notify_acked_irq(vcpu->kvm, 0,
     65					     intid - VGIC_NR_PRIVATE_IRQS);
     66
     67		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
     68		if (!irq)	/* An LPI could have been unmapped. */
     69			continue;
     70
     71		raw_spin_lock(&irq->irq_lock);
     72
     73		/* Always preserve the active bit, note deactivation */
     74		deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
     75		irq->active = !!(val & ICH_LR_ACTIVE_BIT);
     76
     77		if (irq->active && is_v2_sgi)
     78			irq->active_source = cpuid;
     79
     80		/* Edge is the only case where we preserve the pending bit */
     81		if (irq->config == VGIC_CONFIG_EDGE &&
     82		    (val & ICH_LR_PENDING_BIT)) {
     83			irq->pending_latch = true;
     84
     85			if (is_v2_sgi)
     86				irq->source |= (1 << cpuid);
     87		}
     88
     89		/*
     90		 * Clear soft pending state when level irqs have been acked.
     91		 */
     92		if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
     93			irq->pending_latch = false;
     94
     95		/* Handle resampling for mapped interrupts if required */
     96		vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
     97
     98		raw_spin_unlock(&irq->irq_lock);
     99		vgic_put_irq(vcpu->kvm, irq);
    100	}
    101
    102	cpuif->used_lrs = 0;
    103}
    104
    105/* Requires the irq to be locked already */
    106void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
    107{
    108	u32 model = vcpu->kvm->arch.vgic.vgic_model;
    109	u64 val = irq->intid;
    110	bool allow_pending = true, is_v2_sgi;
    111
    112	is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
    113		     model == KVM_DEV_TYPE_ARM_VGIC_V2);
    114
    115	if (irq->active) {
    116		val |= ICH_LR_ACTIVE_BIT;
    117		if (is_v2_sgi)
    118			val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
    119		if (vgic_irq_is_multi_sgi(irq)) {
    120			allow_pending = false;
    121			val |= ICH_LR_EOI;
    122		}
    123	}
    124
    125	if (irq->hw && !vgic_irq_needs_resampling(irq)) {
    126		val |= ICH_LR_HW;
    127		val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
    128		/*
    129		 * Never set pending+active on a HW interrupt, as the
    130		 * pending state is kept at the physical distributor
    131		 * level.
    132		 */
    133		if (irq->active)
    134			allow_pending = false;
    135	} else {
    136		if (irq->config == VGIC_CONFIG_LEVEL) {
    137			val |= ICH_LR_EOI;
    138
    139			/*
    140			 * Software resampling doesn't work very well
    141			 * if we allow P+A, so let's not do that.
    142			 */
    143			if (irq->active)
    144				allow_pending = false;
    145		}
    146	}
    147
    148	if (allow_pending && irq_is_pending(irq)) {
    149		val |= ICH_LR_PENDING_BIT;
    150
    151		if (irq->config == VGIC_CONFIG_EDGE)
    152			irq->pending_latch = false;
    153
    154		if (vgic_irq_is_sgi(irq->intid) &&
    155		    model == KVM_DEV_TYPE_ARM_VGIC_V2) {
    156			u32 src = ffs(irq->source);
    157
    158			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
    159					   irq->intid))
    160				return;
    161
    162			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
    163			irq->source &= ~(1 << (src - 1));
    164			if (irq->source) {
    165				irq->pending_latch = true;
    166				val |= ICH_LR_EOI;
    167			}
    168		}
    169	}
    170
    171	/*
    172	 * Level-triggered mapped IRQs are special because we only observe
    173	 * rising edges as input to the VGIC.  We therefore lower the line
    174	 * level here, so that we can take new virtual IRQs.  See
    175	 * vgic_v3_fold_lr_state for more info.
    176	 */
    177	if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
    178		irq->line_level = false;
    179
    180	if (irq->group)
    181		val |= ICH_LR_GROUP;
    182
    183	val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
    184
    185	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
    186}
    187
    188void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
    189{
    190	vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
    191}
    192
    193void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
    194{
    195	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
    196	u32 model = vcpu->kvm->arch.vgic.vgic_model;
    197	u32 vmcr;
    198
    199	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
    200		vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
    201			ICH_VMCR_ACK_CTL_MASK;
    202		vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
    203			ICH_VMCR_FIQ_EN_MASK;
    204	} else {
    205		/*
    206		 * When emulating GICv3 on GICv3 with SRE=1 on the
    207		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
    208		 */
    209		vmcr = ICH_VMCR_FIQ_EN_MASK;
    210	}
    211
    212	vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
    213	vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
    214	vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
    215	vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
    216	vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
    217	vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
    218	vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
    219
    220	cpu_if->vgic_vmcr = vmcr;
    221}
    222
    223void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
    224{
    225	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
    226	u32 model = vcpu->kvm->arch.vgic.vgic_model;
    227	u32 vmcr;
    228
    229	vmcr = cpu_if->vgic_vmcr;
    230
    231	if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
    232		vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
    233			ICH_VMCR_ACK_CTL_SHIFT;
    234		vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
    235			ICH_VMCR_FIQ_EN_SHIFT;
    236	} else {
    237		/*
    238		 * When emulating GICv3 on GICv3 with SRE=1 on the
    239		 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
    240		 */
    241		vmcrp->fiqen = 1;
    242		vmcrp->ackctl = 0;
    243	}
    244
    245	vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
    246	vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
    247	vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
    248	vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
    249	vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
    250	vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
    251	vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
    252}
    253
    254#define INITIAL_PENDBASER_VALUE						  \
    255	(GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)		| \
    256	GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner)	| \
    257	GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
    258
    259void vgic_v3_enable(struct kvm_vcpu *vcpu)
    260{
    261	struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
    262
    263	/*
    264	 * By forcing VMCR to zero, the GIC will restore the binary
    265	 * points to their reset values. Anything else resets to zero
    266	 * anyway.
    267	 */
    268	vgic_v3->vgic_vmcr = 0;
    269
    270	/*
    271	 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
    272	 * way, so we force SRE to 1 to demonstrate this to the guest.
    273	 * Also, we don't support any form of IRQ/FIQ bypass.
    274	 * This goes with the spec allowing the value to be RAO/WI.
    275	 */
    276	if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
    277		vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
    278				     ICC_SRE_EL1_DFB |
    279				     ICC_SRE_EL1_SRE);
    280		vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
    281	} else {
    282		vgic_v3->vgic_sre = 0;
    283	}
    284
    285	vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
    286					   ICH_VTR_ID_BITS_MASK) >>
    287					   ICH_VTR_ID_BITS_SHIFT;
    288	vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
    289					    ICH_VTR_PRI_BITS_MASK) >>
    290					    ICH_VTR_PRI_BITS_SHIFT) + 1;
    291
    292	/* Get the show on the road... */
    293	vgic_v3->vgic_hcr = ICH_HCR_EN;
    294	if (group0_trap)
    295		vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
    296	if (group1_trap)
    297		vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
    298	if (common_trap)
    299		vgic_v3->vgic_hcr |= ICH_HCR_TC;
    300	if (dir_trap)
    301		vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
    302}
    303
    304int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
    305{
    306	struct kvm_vcpu *vcpu;
    307	int byte_offset, bit_nr;
    308	gpa_t pendbase, ptr;
    309	bool status;
    310	u8 val;
    311	int ret;
    312	unsigned long flags;
    313
    314retry:
    315	vcpu = irq->target_vcpu;
    316	if (!vcpu)
    317		return 0;
    318
    319	pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
    320
    321	byte_offset = irq->intid / BITS_PER_BYTE;
    322	bit_nr = irq->intid % BITS_PER_BYTE;
    323	ptr = pendbase + byte_offset;
    324
    325	ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
    326	if (ret)
    327		return ret;
    328
    329	status = val & (1 << bit_nr);
    330
    331	raw_spin_lock_irqsave(&irq->irq_lock, flags);
    332	if (irq->target_vcpu != vcpu) {
    333		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
    334		goto retry;
    335	}
    336	irq->pending_latch = status;
    337	vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
    338
    339	if (status) {
    340		/* clear consumed data */
    341		val &= ~(1 << bit_nr);
    342		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
    343		if (ret)
    344			return ret;
    345	}
    346	return 0;
    347}
    348
    349/*
    350 * The deactivation of the doorbell interrupt will trigger the
    351 * unmapping of the associated vPE.
    352 */
    353static void unmap_all_vpes(struct vgic_dist *dist)
    354{
    355	struct irq_desc *desc;
    356	int i;
    357
    358	for (i = 0; i < dist->its_vm.nr_vpes; i++) {
    359		desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
    360		irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
    361	}
    362}
    363
    364static void map_all_vpes(struct vgic_dist *dist)
    365{
    366	struct irq_desc *desc;
    367	int i;
    368
    369	for (i = 0; i < dist->its_vm.nr_vpes; i++) {
    370		desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
    371		irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
    372	}
    373}
    374
    375/**
    376 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
    377 * kvm lock and all vcpu lock must be held
    378 */
    379int vgic_v3_save_pending_tables(struct kvm *kvm)
    380{
    381	struct vgic_dist *dist = &kvm->arch.vgic;
    382	struct vgic_irq *irq;
    383	gpa_t last_ptr = ~(gpa_t)0;
    384	bool vlpi_avail = false;
    385	int ret = 0;
    386	u8 val;
    387
    388	if (unlikely(!vgic_initialized(kvm)))
    389		return -ENXIO;
    390
    391	/*
    392	 * A preparation for getting any VLPI states.
    393	 * The above vgic initialized check also ensures that the allocation
    394	 * and enabling of the doorbells have already been done.
    395	 */
    396	if (kvm_vgic_global_state.has_gicv4_1) {
    397		unmap_all_vpes(dist);
    398		vlpi_avail = true;
    399	}
    400
    401	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
    402		int byte_offset, bit_nr;
    403		struct kvm_vcpu *vcpu;
    404		gpa_t pendbase, ptr;
    405		bool is_pending;
    406		bool stored;
    407
    408		vcpu = irq->target_vcpu;
    409		if (!vcpu)
    410			continue;
    411
    412		pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
    413
    414		byte_offset = irq->intid / BITS_PER_BYTE;
    415		bit_nr = irq->intid % BITS_PER_BYTE;
    416		ptr = pendbase + byte_offset;
    417
    418		if (ptr != last_ptr) {
    419			ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
    420			if (ret)
    421				goto out;
    422			last_ptr = ptr;
    423		}
    424
    425		stored = val & (1U << bit_nr);
    426
    427		is_pending = irq->pending_latch;
    428
    429		if (irq->hw && vlpi_avail)
    430			vgic_v4_get_vlpi_state(irq, &is_pending);
    431
    432		if (stored == is_pending)
    433			continue;
    434
    435		if (is_pending)
    436			val |= 1 << bit_nr;
    437		else
    438			val &= ~(1 << bit_nr);
    439
    440		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
    441		if (ret)
    442			goto out;
    443	}
    444
    445out:
    446	if (vlpi_avail)
    447		map_all_vpes(dist);
    448
    449	return ret;
    450}
    451
    452/**
    453 * vgic_v3_rdist_overlap - check if a region overlaps with any
    454 * existing redistributor region
    455 *
    456 * @kvm: kvm handle
    457 * @base: base of the region
    458 * @size: size of region
    459 *
    460 * Return: true if there is an overlap
    461 */
    462bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
    463{
    464	struct vgic_dist *d = &kvm->arch.vgic;
    465	struct vgic_redist_region *rdreg;
    466
    467	list_for_each_entry(rdreg, &d->rd_regions, list) {
    468		if ((base + size > rdreg->base) &&
    469			(base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
    470			return true;
    471	}
    472	return false;
    473}
    474
    475/*
    476 * Check for overlapping regions and for regions crossing the end of memory
    477 * for base addresses which have already been set.
    478 */
    479bool vgic_v3_check_base(struct kvm *kvm)
    480{
    481	struct vgic_dist *d = &kvm->arch.vgic;
    482	struct vgic_redist_region *rdreg;
    483
    484	if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
    485	    d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
    486		return false;
    487
    488	list_for_each_entry(rdreg, &d->rd_regions, list) {
    489		size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
    490
    491		if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF,
    492				       rdreg->base, SZ_64K, sz))
    493			return false;
    494	}
    495
    496	if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
    497		return true;
    498
    499	return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
    500				      KVM_VGIC_V3_DIST_SIZE);
    501}
    502
    503/**
    504 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
    505 * which has free space to put a new rdist region.
    506 *
    507 * @rd_regions: redistributor region list head
    508 *
    509 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
    510 * Stride between redistributors is 0 and regions are filled in the index order.
    511 *
    512 * Return: the redist region handle, if any, that has space to map a new rdist
    513 * region.
    514 */
    515struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
    516{
    517	struct vgic_redist_region *rdreg;
    518
    519	list_for_each_entry(rdreg, rd_regions, list) {
    520		if (!vgic_v3_redist_region_full(rdreg))
    521			return rdreg;
    522	}
    523	return NULL;
    524}
    525
    526struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
    527							   u32 index)
    528{
    529	struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
    530	struct vgic_redist_region *rdreg;
    531
    532	list_for_each_entry(rdreg, rd_regions, list) {
    533		if (rdreg->index == index)
    534			return rdreg;
    535	}
    536	return NULL;
    537}
    538
    539
    540int vgic_v3_map_resources(struct kvm *kvm)
    541{
    542	struct vgic_dist *dist = &kvm->arch.vgic;
    543	struct kvm_vcpu *vcpu;
    544	int ret = 0;
    545	unsigned long c;
    546
    547	kvm_for_each_vcpu(c, vcpu, kvm) {
    548		struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
    549
    550		if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
    551			kvm_debug("vcpu %ld redistributor base not set\n", c);
    552			return -ENXIO;
    553		}
    554	}
    555
    556	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
    557		kvm_debug("Need to set vgic distributor addresses first\n");
    558		return -ENXIO;
    559	}
    560
    561	if (!vgic_v3_check_base(kvm)) {
    562		kvm_debug("VGIC redist and dist frames overlap\n");
    563		return -EINVAL;
    564	}
    565
    566	/*
    567	 * For a VGICv3 we require the userland to explicitly initialize
    568	 * the VGIC before we need to use it.
    569	 */
    570	if (!vgic_initialized(kvm)) {
    571		return -EBUSY;
    572	}
    573
    574	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
    575	if (ret) {
    576		kvm_err("Unable to register VGICv3 dist MMIO regions\n");
    577		return ret;
    578	}
    579
    580	if (kvm_vgic_global_state.has_gicv4_1)
    581		vgic_v4_configure_vsgis(kvm);
    582
    583	return 0;
    584}
    585
    586DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
    587
    588static int __init early_group0_trap_cfg(char *buf)
    589{
    590	return strtobool(buf, &group0_trap);
    591}
    592early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
    593
    594static int __init early_group1_trap_cfg(char *buf)
    595{
    596	return strtobool(buf, &group1_trap);
    597}
    598early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
    599
    600static int __init early_common_trap_cfg(char *buf)
    601{
    602	return strtobool(buf, &common_trap);
    603}
    604early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
    605
    606static int __init early_gicv4_enable(char *buf)
    607{
    608	return strtobool(buf, &gicv4_enable);
    609}
    610early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
    611
    612static const struct midr_range broken_seis[] = {
    613	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
    614	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
    615	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
    616	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
    617	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
    618	MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
    619	{},
    620};
    621
    622static bool vgic_v3_broken_seis(void)
    623{
    624	return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
    625		is_midr_in_range_list(read_cpuid_id(), broken_seis));
    626}
    627
    628/**
    629 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
    630 * @info:	pointer to the GIC description
    631 *
    632 * Returns 0 if the VGICv3 has been probed successfully, returns an error code
    633 * otherwise
    634 */
    635int vgic_v3_probe(const struct gic_kvm_info *info)
    636{
    637	u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
    638	bool has_v2;
    639	int ret;
    640
    641	has_v2 = ich_vtr_el2 >> 63;
    642	ich_vtr_el2 = (u32)ich_vtr_el2;
    643
    644	/*
    645	 * The ListRegs field is 5 bits, but there is an architectural
    646	 * maximum of 16 list registers. Just ignore bit 4...
    647	 */
    648	kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
    649	kvm_vgic_global_state.can_emulate_gicv2 = false;
    650	kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
    651
    652	/* GICv4 support? */
    653	if (info->has_v4) {
    654		kvm_vgic_global_state.has_gicv4 = gicv4_enable;
    655		kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
    656		kvm_info("GICv4%s support %sabled\n",
    657			 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
    658			 gicv4_enable ? "en" : "dis");
    659	}
    660
    661	kvm_vgic_global_state.vcpu_base = 0;
    662
    663	if (!info->vcpu.start) {
    664		kvm_info("GICv3: no GICV resource entry\n");
    665	} else if (!has_v2) {
    666		pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
    667	} else if (!PAGE_ALIGNED(info->vcpu.start)) {
    668		pr_warn("GICV physical address 0x%llx not page aligned\n",
    669			(unsigned long long)info->vcpu.start);
    670	} else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
    671		kvm_vgic_global_state.vcpu_base = info->vcpu.start;
    672		kvm_vgic_global_state.can_emulate_gicv2 = true;
    673		ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
    674		if (ret) {
    675			kvm_err("Cannot register GICv2 KVM device.\n");
    676			return ret;
    677		}
    678		kvm_info("vgic-v2@%llx\n", info->vcpu.start);
    679	}
    680	ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
    681	if (ret) {
    682		kvm_err("Cannot register GICv3 KVM device.\n");
    683		kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
    684		return ret;
    685	}
    686
    687	if (kvm_vgic_global_state.vcpu_base == 0)
    688		kvm_info("disabling GICv2 emulation\n");
    689
    690	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
    691		group0_trap = true;
    692		group1_trap = true;
    693	}
    694
    695	if (vgic_v3_broken_seis()) {
    696		kvm_info("GICv3 with broken locally generated SEI\n");
    697
    698		kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
    699		group0_trap = true;
    700		group1_trap = true;
    701		if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
    702			dir_trap = true;
    703		else
    704			common_trap = true;
    705	}
    706
    707	if (group0_trap || group1_trap || common_trap | dir_trap) {
    708		kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
    709			 group0_trap ? "G0" : "",
    710			 group1_trap ? "G1" : "",
    711			 common_trap ? "C"  : "",
    712			 dir_trap    ? "D"  : "");
    713		static_branch_enable(&vgic_v3_cpuif_trap);
    714	}
    715
    716	kvm_vgic_global_state.vctrl_base = NULL;
    717	kvm_vgic_global_state.type = VGIC_V3;
    718	kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
    719
    720	return 0;
    721}
    722
    723void vgic_v3_load(struct kvm_vcpu *vcpu)
    724{
    725	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
    726
    727	/*
    728	 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
    729	 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
    730	 * VMCR_EL2 save/restore in the world switch.
    731	 */
    732	if (likely(cpu_if->vgic_sre))
    733		kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
    734
    735	kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
    736
    737	if (has_vhe())
    738		__vgic_v3_activate_traps(cpu_if);
    739
    740	WARN_ON(vgic_v4_load(vcpu));
    741}
    742
    743void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
    744{
    745	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
    746
    747	if (likely(cpu_if->vgic_sre))
    748		cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
    749}
    750
    751void vgic_v3_put(struct kvm_vcpu *vcpu)
    752{
    753	struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
    754
    755	WARN_ON(vgic_v4_put(vcpu, false));
    756
    757	vgic_v3_vmcr_sync(vcpu);
    758
    759	kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
    760
    761	if (has_vhe())
    762		__vgic_v3_deactivate_traps(cpu_if);
    763}