cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vgic-v2.c (13000B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2015, 2016 ARM Ltd.
      4 */
      5
      6#include <linux/irqchip/arm-gic.h>
      7#include <linux/kvm.h>
      8#include <linux/kvm_host.h>
      9#include <kvm/arm_vgic.h>
     10#include <asm/kvm_mmu.h>
     11
     12#include "vgic.h"
     13
     14static inline void vgic_v2_write_lr(int lr, u32 val)
     15{
     16	void __iomem *base = kvm_vgic_global_state.vctrl_base;
     17
     18	writel_relaxed(val, base + GICH_LR0 + (lr * 4));
     19}
     20
     21void vgic_v2_init_lrs(void)
     22{
     23	int i;
     24
     25	for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
     26		vgic_v2_write_lr(i, 0);
     27}
     28
     29void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
     30{
     31	struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
     32
     33	cpuif->vgic_hcr |= GICH_HCR_UIE;
     34}
     35
     36static bool lr_signals_eoi_mi(u32 lr_val)
     37{
     38	return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
     39	       !(lr_val & GICH_LR_HW);
     40}
     41
     42/*
     43 * transfer the content of the LRs back into the corresponding ap_list:
     44 * - active bit is transferred as is
     45 * - pending bit is
     46 *   - transferred as is in case of edge sensitive IRQs
     47 *   - set to the line-level (resample time) for level sensitive IRQs
     48 */
     49void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
     50{
     51	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
     52	struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
     53	int lr;
     54
     55	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
     56
     57	cpuif->vgic_hcr &= ~GICH_HCR_UIE;
     58
     59	for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
     60		u32 val = cpuif->vgic_lr[lr];
     61		u32 cpuid, intid = val & GICH_LR_VIRTUALID;
     62		struct vgic_irq *irq;
     63		bool deactivated;
     64
     65		/* Extract the source vCPU id from the LR */
     66		cpuid = val & GICH_LR_PHYSID_CPUID;
     67		cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
     68		cpuid &= 7;
     69
     70		/* Notify fds when the guest EOI'ed a level-triggered SPI */
     71		if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
     72			kvm_notify_acked_irq(vcpu->kvm, 0,
     73					     intid - VGIC_NR_PRIVATE_IRQS);
     74
     75		irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
     76
     77		raw_spin_lock(&irq->irq_lock);
     78
     79		/* Always preserve the active bit, note deactivation */
     80		deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
     81		irq->active = !!(val & GICH_LR_ACTIVE_BIT);
     82
     83		if (irq->active && vgic_irq_is_sgi(intid))
     84			irq->active_source = cpuid;
     85
     86		/* Edge is the only case where we preserve the pending bit */
     87		if (irq->config == VGIC_CONFIG_EDGE &&
     88		    (val & GICH_LR_PENDING_BIT)) {
     89			irq->pending_latch = true;
     90
     91			if (vgic_irq_is_sgi(intid))
     92				irq->source |= (1 << cpuid);
     93		}
     94
     95		/*
     96		 * Clear soft pending state when level irqs have been acked.
     97		 */
     98		if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
     99			irq->pending_latch = false;
    100
    101		/* Handle resampling for mapped interrupts if required */
    102		vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
    103
    104		raw_spin_unlock(&irq->irq_lock);
    105		vgic_put_irq(vcpu->kvm, irq);
    106	}
    107
    108	cpuif->used_lrs = 0;
    109}
    110
    111/*
    112 * Populates the particular LR with the state of a given IRQ:
    113 * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
    114 * - for a level sensitive IRQ the pending state value is unchanged;
    115 *   it is dictated directly by the input level
    116 *
    117 * If @irq describes an SGI with multiple sources, we choose the
    118 * lowest-numbered source VCPU and clear that bit in the source bitmap.
    119 *
    120 * The irq_lock must be held by the caller.
    121 */
    122void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
    123{
    124	u32 val = irq->intid;
    125	bool allow_pending = true;
    126
    127	if (irq->active) {
    128		val |= GICH_LR_ACTIVE_BIT;
    129		if (vgic_irq_is_sgi(irq->intid))
    130			val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
    131		if (vgic_irq_is_multi_sgi(irq)) {
    132			allow_pending = false;
    133			val |= GICH_LR_EOI;
    134		}
    135	}
    136
    137	if (irq->group)
    138		val |= GICH_LR_GROUP1;
    139
    140	if (irq->hw && !vgic_irq_needs_resampling(irq)) {
    141		val |= GICH_LR_HW;
    142		val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
    143		/*
    144		 * Never set pending+active on a HW interrupt, as the
    145		 * pending state is kept at the physical distributor
    146		 * level.
    147		 */
    148		if (irq->active)
    149			allow_pending = false;
    150	} else {
    151		if (irq->config == VGIC_CONFIG_LEVEL) {
    152			val |= GICH_LR_EOI;
    153
    154			/*
    155			 * Software resampling doesn't work very well
    156			 * if we allow P+A, so let's not do that.
    157			 */
    158			if (irq->active)
    159				allow_pending = false;
    160		}
    161	}
    162
    163	if (allow_pending && irq_is_pending(irq)) {
    164		val |= GICH_LR_PENDING_BIT;
    165
    166		if (irq->config == VGIC_CONFIG_EDGE)
    167			irq->pending_latch = false;
    168
    169		if (vgic_irq_is_sgi(irq->intid)) {
    170			u32 src = ffs(irq->source);
    171
    172			if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
    173					   irq->intid))
    174				return;
    175
    176			val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
    177			irq->source &= ~(1 << (src - 1));
    178			if (irq->source) {
    179				irq->pending_latch = true;
    180				val |= GICH_LR_EOI;
    181			}
    182		}
    183	}
    184
    185	/*
    186	 * Level-triggered mapped IRQs are special because we only observe
    187	 * rising edges as input to the VGIC.  We therefore lower the line
    188	 * level here, so that we can take new virtual IRQs.  See
    189	 * vgic_v2_fold_lr_state for more info.
    190	 */
    191	if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
    192		irq->line_level = false;
    193
    194	/* The GICv2 LR only holds five bits of priority. */
    195	val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
    196
    197	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
    198}
    199
    200void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
    201{
    202	vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
    203}
    204
    205void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
    206{
    207	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
    208	u32 vmcr;
    209
    210	vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
    211		GICH_VMCR_ENABLE_GRP0_MASK;
    212	vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
    213		GICH_VMCR_ENABLE_GRP1_MASK;
    214	vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
    215		GICH_VMCR_ACK_CTL_MASK;
    216	vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
    217		GICH_VMCR_FIQ_EN_MASK;
    218	vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
    219		GICH_VMCR_CBPR_MASK;
    220	vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
    221		GICH_VMCR_EOI_MODE_MASK;
    222	vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
    223		GICH_VMCR_ALIAS_BINPOINT_MASK;
    224	vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
    225		GICH_VMCR_BINPOINT_MASK;
    226	vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
    227		 GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
    228
    229	cpu_if->vgic_vmcr = vmcr;
    230}
    231
    232void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
    233{
    234	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
    235	u32 vmcr;
    236
    237	vmcr = cpu_if->vgic_vmcr;
    238
    239	vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
    240		GICH_VMCR_ENABLE_GRP0_SHIFT;
    241	vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
    242		GICH_VMCR_ENABLE_GRP1_SHIFT;
    243	vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
    244		GICH_VMCR_ACK_CTL_SHIFT;
    245	vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
    246		GICH_VMCR_FIQ_EN_SHIFT;
    247	vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
    248		GICH_VMCR_CBPR_SHIFT;
    249	vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
    250		GICH_VMCR_EOI_MODE_SHIFT;
    251
    252	vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
    253			GICH_VMCR_ALIAS_BINPOINT_SHIFT;
    254	vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
    255			GICH_VMCR_BINPOINT_SHIFT;
    256	vmcrp->pmr  = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
    257			GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
    258}
    259
    260void vgic_v2_enable(struct kvm_vcpu *vcpu)
    261{
    262	/*
    263	 * By forcing VMCR to zero, the GIC will restore the binary
    264	 * points to their reset values. Anything else resets to zero
    265	 * anyway.
    266	 */
    267	vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
    268
    269	/* Get the show on the road... */
    270	vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
    271}
    272
    273/* check for overlapping regions and for regions crossing the end of memory */
    274static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
    275{
    276	if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
    277		return false;
    278	if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
    279		return false;
    280
    281	if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
    282		return true;
    283	if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
    284		return true;
    285
    286	return false;
    287}
    288
    289int vgic_v2_map_resources(struct kvm *kvm)
    290{
    291	struct vgic_dist *dist = &kvm->arch.vgic;
    292	int ret = 0;
    293
    294	if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
    295	    IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
    296		kvm_debug("Need to set vgic cpu and dist addresses first\n");
    297		return -ENXIO;
    298	}
    299
    300	if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
    301		kvm_debug("VGIC CPU and dist frames overlap\n");
    302		return -EINVAL;
    303	}
    304
    305	/*
    306	 * Initialize the vgic if this hasn't already been done on demand by
    307	 * accessing the vgic state from userspace.
    308	 */
    309	ret = vgic_init(kvm);
    310	if (ret) {
    311		kvm_err("Unable to initialize VGIC dynamic data structures\n");
    312		return ret;
    313	}
    314
    315	ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
    316	if (ret) {
    317		kvm_err("Unable to register VGIC MMIO regions\n");
    318		return ret;
    319	}
    320
    321	if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
    322		ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
    323					    kvm_vgic_global_state.vcpu_base,
    324					    KVM_VGIC_V2_CPU_SIZE, true);
    325		if (ret) {
    326			kvm_err("Unable to remap VGIC CPU to VCPU\n");
    327			return ret;
    328		}
    329	}
    330
    331	return 0;
    332}
    333
    334DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
    335
    336/**
    337 * vgic_v2_probe - probe for a VGICv2 compatible interrupt controller
    338 * @info:	pointer to the GIC description
    339 *
    340 * Returns 0 if the VGICv2 has been probed successfully, returns an error code
    341 * otherwise
    342 */
    343int vgic_v2_probe(const struct gic_kvm_info *info)
    344{
    345	int ret;
    346	u32 vtr;
    347
    348	if (is_protected_kvm_enabled()) {
    349		kvm_err("GICv2 not supported in protected mode\n");
    350		return -ENXIO;
    351	}
    352
    353	if (!info->vctrl.start) {
    354		kvm_err("GICH not present in the firmware table\n");
    355		return -ENXIO;
    356	}
    357
    358	if (!PAGE_ALIGNED(info->vcpu.start) ||
    359	    !PAGE_ALIGNED(resource_size(&info->vcpu))) {
    360		kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
    361
    362		ret = create_hyp_io_mappings(info->vcpu.start,
    363					     resource_size(&info->vcpu),
    364					     &kvm_vgic_global_state.vcpu_base_va,
    365					     &kvm_vgic_global_state.vcpu_hyp_va);
    366		if (ret) {
    367			kvm_err("Cannot map GICV into hyp\n");
    368			goto out;
    369		}
    370
    371		static_branch_enable(&vgic_v2_cpuif_trap);
    372	}
    373
    374	ret = create_hyp_io_mappings(info->vctrl.start,
    375				     resource_size(&info->vctrl),
    376				     &kvm_vgic_global_state.vctrl_base,
    377				     &kvm_vgic_global_state.vctrl_hyp);
    378	if (ret) {
    379		kvm_err("Cannot map VCTRL into hyp\n");
    380		goto out;
    381	}
    382
    383	vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
    384	kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
    385
    386	ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
    387	if (ret) {
    388		kvm_err("Cannot register GICv2 KVM device\n");
    389		goto out;
    390	}
    391
    392	kvm_vgic_global_state.can_emulate_gicv2 = true;
    393	kvm_vgic_global_state.vcpu_base = info->vcpu.start;
    394	kvm_vgic_global_state.type = VGIC_V2;
    395	kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
    396
    397	kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
    398
    399	return 0;
    400out:
    401	if (kvm_vgic_global_state.vctrl_base)
    402		iounmap(kvm_vgic_global_state.vctrl_base);
    403	if (kvm_vgic_global_state.vcpu_base_va)
    404		iounmap(kvm_vgic_global_state.vcpu_base_va);
    405
    406	return ret;
    407}
    408
    409static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
    410{
    411	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
    412	u64 used_lrs = cpu_if->used_lrs;
    413	u64 elrsr;
    414	int i;
    415
    416	elrsr = readl_relaxed(base + GICH_ELRSR0);
    417	if (unlikely(used_lrs > 32))
    418		elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
    419
    420	for (i = 0; i < used_lrs; i++) {
    421		if (elrsr & (1UL << i))
    422			cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
    423		else
    424			cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
    425
    426		writel_relaxed(0, base + GICH_LR0 + (i * 4));
    427	}
    428}
    429
    430void vgic_v2_save_state(struct kvm_vcpu *vcpu)
    431{
    432	void __iomem *base = kvm_vgic_global_state.vctrl_base;
    433	u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
    434
    435	if (!base)
    436		return;
    437
    438	if (used_lrs) {
    439		save_lrs(vcpu, base);
    440		writel_relaxed(0, base + GICH_HCR);
    441	}
    442}
    443
    444void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
    445{
    446	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
    447	void __iomem *base = kvm_vgic_global_state.vctrl_base;
    448	u64 used_lrs = cpu_if->used_lrs;
    449	int i;
    450
    451	if (!base)
    452		return;
    453
    454	if (used_lrs) {
    455		writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
    456		for (i = 0; i < used_lrs; i++) {
    457			writel_relaxed(cpu_if->vgic_lr[i],
    458				       base + GICH_LR0 + (i * 4));
    459		}
    460	}
    461}
    462
    463void vgic_v2_load(struct kvm_vcpu *vcpu)
    464{
    465	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
    466
    467	writel_relaxed(cpu_if->vgic_vmcr,
    468		       kvm_vgic_global_state.vctrl_base + GICH_VMCR);
    469	writel_relaxed(cpu_if->vgic_apr,
    470		       kvm_vgic_global_state.vctrl_base + GICH_APR);
    471}
    472
    473void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
    474{
    475	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
    476
    477	cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
    478}
    479
    480void vgic_v2_put(struct kvm_vcpu *vcpu)
    481{
    482	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
    483
    484	vgic_v2_vmcr_sync(vcpu);
    485	cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
    486}