cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vgic-sys-reg-v3.c (7798B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * VGIC system registers handling functions for AArch64 mode
      4 */
      5
      6#include <linux/irqchip/arm-gic-v3.h>
      7#include <linux/kvm.h>
      8#include <linux/kvm_host.h>
      9#include <asm/kvm_emulate.h>
     10#include "vgic/vgic.h"
     11#include "sys_regs.h"
     12
     13static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
     14			    const struct sys_reg_desc *r)
     15{
     16	u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
     17	struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
     18	struct vgic_vmcr vmcr;
     19	u64 val;
     20
     21	vgic_get_vmcr(vcpu, &vmcr);
     22	if (p->is_write) {
     23		val = p->regval;
     24
     25		/*
     26		 * Disallow restoring VM state if not supported by this
     27		 * hardware.
     28		 */
     29		host_pri_bits = ((val & ICC_CTLR_EL1_PRI_BITS_MASK) >>
     30				 ICC_CTLR_EL1_PRI_BITS_SHIFT) + 1;
     31		if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
     32			return false;
     33
     34		vgic_v3_cpu->num_pri_bits = host_pri_bits;
     35
     36		host_id_bits = (val & ICC_CTLR_EL1_ID_BITS_MASK) >>
     37				ICC_CTLR_EL1_ID_BITS_SHIFT;
     38		if (host_id_bits > vgic_v3_cpu->num_id_bits)
     39			return false;
     40
     41		vgic_v3_cpu->num_id_bits = host_id_bits;
     42
     43		host_seis = ((kvm_vgic_global_state.ich_vtr_el2 &
     44			     ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT);
     45		seis = (val & ICC_CTLR_EL1_SEIS_MASK) >>
     46			ICC_CTLR_EL1_SEIS_SHIFT;
     47		if (host_seis != seis)
     48			return false;
     49
     50		host_a3v = ((kvm_vgic_global_state.ich_vtr_el2 &
     51			    ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT);
     52		a3v = (val & ICC_CTLR_EL1_A3V_MASK) >> ICC_CTLR_EL1_A3V_SHIFT;
     53		if (host_a3v != a3v)
     54			return false;
     55
     56		/*
     57		 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
     58		 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
     59		 */
     60		vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
     61		vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
     62		vgic_set_vmcr(vcpu, &vmcr);
     63	} else {
     64		val = 0;
     65		val |= (vgic_v3_cpu->num_pri_bits - 1) <<
     66			ICC_CTLR_EL1_PRI_BITS_SHIFT;
     67		val |= vgic_v3_cpu->num_id_bits << ICC_CTLR_EL1_ID_BITS_SHIFT;
     68		val |= ((kvm_vgic_global_state.ich_vtr_el2 &
     69			ICH_VTR_SEIS_MASK) >> ICH_VTR_SEIS_SHIFT) <<
     70			ICC_CTLR_EL1_SEIS_SHIFT;
     71		val |= ((kvm_vgic_global_state.ich_vtr_el2 &
     72			ICH_VTR_A3V_MASK) >> ICH_VTR_A3V_SHIFT) <<
     73			ICC_CTLR_EL1_A3V_SHIFT;
     74		/*
     75		 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
     76		 * Extract it directly using ICC_CTLR_EL1 reg definitions.
     77		 */
     78		val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
     79		val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
     80
     81		p->regval = val;
     82	}
     83
     84	return true;
     85}
     86
     87static bool access_gic_pmr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
     88			   const struct sys_reg_desc *r)
     89{
     90	struct vgic_vmcr vmcr;
     91
     92	vgic_get_vmcr(vcpu, &vmcr);
     93	if (p->is_write) {
     94		vmcr.pmr = (p->regval & ICC_PMR_EL1_MASK) >> ICC_PMR_EL1_SHIFT;
     95		vgic_set_vmcr(vcpu, &vmcr);
     96	} else {
     97		p->regval = (vmcr.pmr << ICC_PMR_EL1_SHIFT) & ICC_PMR_EL1_MASK;
     98	}
     99
    100	return true;
    101}
    102
    103static bool access_gic_bpr0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    104			    const struct sys_reg_desc *r)
    105{
    106	struct vgic_vmcr vmcr;
    107
    108	vgic_get_vmcr(vcpu, &vmcr);
    109	if (p->is_write) {
    110		vmcr.bpr = (p->regval & ICC_BPR0_EL1_MASK) >>
    111			    ICC_BPR0_EL1_SHIFT;
    112		vgic_set_vmcr(vcpu, &vmcr);
    113	} else {
    114		p->regval = (vmcr.bpr << ICC_BPR0_EL1_SHIFT) &
    115			     ICC_BPR0_EL1_MASK;
    116	}
    117
    118	return true;
    119}
    120
    121static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    122			    const struct sys_reg_desc *r)
    123{
    124	struct vgic_vmcr vmcr;
    125
    126	if (!p->is_write)
    127		p->regval = 0;
    128
    129	vgic_get_vmcr(vcpu, &vmcr);
    130	if (!vmcr.cbpr) {
    131		if (p->is_write) {
    132			vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
    133				     ICC_BPR1_EL1_SHIFT;
    134			vgic_set_vmcr(vcpu, &vmcr);
    135		} else {
    136			p->regval = (vmcr.abpr << ICC_BPR1_EL1_SHIFT) &
    137				     ICC_BPR1_EL1_MASK;
    138		}
    139	} else {
    140		if (!p->is_write)
    141			p->regval = min((vmcr.bpr + 1), 7U);
    142	}
    143
    144	return true;
    145}
    146
    147static bool access_gic_grpen0(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    148			      const struct sys_reg_desc *r)
    149{
    150	struct vgic_vmcr vmcr;
    151
    152	vgic_get_vmcr(vcpu, &vmcr);
    153	if (p->is_write) {
    154		vmcr.grpen0 = (p->regval & ICC_IGRPEN0_EL1_MASK) >>
    155			       ICC_IGRPEN0_EL1_SHIFT;
    156		vgic_set_vmcr(vcpu, &vmcr);
    157	} else {
    158		p->regval = (vmcr.grpen0 << ICC_IGRPEN0_EL1_SHIFT) &
    159			     ICC_IGRPEN0_EL1_MASK;
    160	}
    161
    162	return true;
    163}
    164
    165static bool access_gic_grpen1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    166			      const struct sys_reg_desc *r)
    167{
    168	struct vgic_vmcr vmcr;
    169
    170	vgic_get_vmcr(vcpu, &vmcr);
    171	if (p->is_write) {
    172		vmcr.grpen1 = (p->regval & ICC_IGRPEN1_EL1_MASK) >>
    173			       ICC_IGRPEN1_EL1_SHIFT;
    174		vgic_set_vmcr(vcpu, &vmcr);
    175	} else {
    176		p->regval = (vmcr.grpen1 << ICC_IGRPEN1_EL1_SHIFT) &
    177			     ICC_IGRPEN1_EL1_MASK;
    178	}
    179
    180	return true;
    181}
    182
    183static void vgic_v3_access_apr_reg(struct kvm_vcpu *vcpu,
    184				   struct sys_reg_params *p, u8 apr, u8 idx)
    185{
    186	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
    187	uint32_t *ap_reg;
    188
    189	if (apr)
    190		ap_reg = &vgicv3->vgic_ap1r[idx];
    191	else
    192		ap_reg = &vgicv3->vgic_ap0r[idx];
    193
    194	if (p->is_write)
    195		*ap_reg = p->regval;
    196	else
    197		p->regval = *ap_reg;
    198}
    199
    200static bool access_gic_aprn(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    201			    const struct sys_reg_desc *r, u8 apr)
    202{
    203	u8 idx = r->Op2 & 3;
    204
    205	if (idx > vgic_v3_max_apr_idx(vcpu))
    206		goto err;
    207
    208	vgic_v3_access_apr_reg(vcpu, p, apr, idx);
    209	return true;
    210err:
    211	if (!p->is_write)
    212		p->regval = 0;
    213
    214	return false;
    215}
    216
    217static bool access_gic_ap0r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    218			    const struct sys_reg_desc *r)
    219
    220{
    221	return access_gic_aprn(vcpu, p, r, 0);
    222}
    223
    224static bool access_gic_ap1r(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    225			    const struct sys_reg_desc *r)
    226{
    227	return access_gic_aprn(vcpu, p, r, 1);
    228}
    229
    230static bool access_gic_sre(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
    231			   const struct sys_reg_desc *r)
    232{
    233	struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
    234
    235	/* Validate SRE bit */
    236	if (p->is_write) {
    237		if (!(p->regval & ICC_SRE_EL1_SRE))
    238			return false;
    239	} else {
    240		p->regval = vgicv3->vgic_sre;
    241	}
    242
    243	return true;
    244}
    245static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
    246	{ SYS_DESC(SYS_ICC_PMR_EL1), access_gic_pmr },
    247	{ SYS_DESC(SYS_ICC_BPR0_EL1), access_gic_bpr0 },
    248	{ SYS_DESC(SYS_ICC_AP0R0_EL1), access_gic_ap0r },
    249	{ SYS_DESC(SYS_ICC_AP0R1_EL1), access_gic_ap0r },
    250	{ SYS_DESC(SYS_ICC_AP0R2_EL1), access_gic_ap0r },
    251	{ SYS_DESC(SYS_ICC_AP0R3_EL1), access_gic_ap0r },
    252	{ SYS_DESC(SYS_ICC_AP1R0_EL1), access_gic_ap1r },
    253	{ SYS_DESC(SYS_ICC_AP1R1_EL1), access_gic_ap1r },
    254	{ SYS_DESC(SYS_ICC_AP1R2_EL1), access_gic_ap1r },
    255	{ SYS_DESC(SYS_ICC_AP1R3_EL1), access_gic_ap1r },
    256	{ SYS_DESC(SYS_ICC_BPR1_EL1), access_gic_bpr1 },
    257	{ SYS_DESC(SYS_ICC_CTLR_EL1), access_gic_ctlr },
    258	{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
    259	{ SYS_DESC(SYS_ICC_IGRPEN0_EL1), access_gic_grpen0 },
    260	{ SYS_DESC(SYS_ICC_IGRPEN1_EL1), access_gic_grpen1 },
    261};
    262
    263int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
    264				u64 *reg)
    265{
    266	struct sys_reg_params params;
    267	u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
    268
    269	params.regval = *reg;
    270	params.is_write = is_write;
    271
    272	if (find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
    273			      ARRAY_SIZE(gic_v3_icc_reg_descs)))
    274		return 0;
    275
    276	return -ENXIO;
    277}
    278
    279int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
    280				u64 *reg)
    281{
    282	struct sys_reg_params params;
    283	const struct sys_reg_desc *r;
    284	u64 sysreg = (id & KVM_DEV_ARM_VGIC_SYSREG_MASK) | KVM_REG_SIZE_U64;
    285
    286	if (is_write)
    287		params.regval = *reg;
    288	params.is_write = is_write;
    289
    290	r = find_reg_by_id(sysreg, &params, gic_v3_icc_reg_descs,
    291			   ARRAY_SIZE(gic_v3_icc_reg_descs));
    292	if (!r)
    293		return -ENXIO;
    294
    295	if (!r->access(vcpu, &params, r))
    296		return -EINVAL;
    297
    298	if (!is_write)
    299		*reg = params.regval;
    300
    301	return 0;
    302}