cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pmu.c (5301B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright 2019 Arm Limited
      4 * Author: Andrew Murray <Andrew.Murray@arm.com>
      5 */
      6#include <linux/kvm_host.h>
      7#include <linux/perf_event.h>
      8
      9static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
     10
     11/*
     12 * Given the perf event attributes and system type, determine
     13 * if we are going to need to switch counters at guest entry/exit.
     14 */
     15static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
     16{
     17	/**
     18	 * With VHE the guest kernel runs at EL1 and the host at EL2,
     19	 * where user (EL0) is excluded then we have no reason to switch
     20	 * counters.
     21	 */
     22	if (has_vhe() && attr->exclude_user)
     23		return false;
     24
     25	/* Only switch if attributes are different */
     26	return (attr->exclude_host != attr->exclude_guest);
     27}
     28
     29struct kvm_pmu_events *kvm_get_pmu_events(void)
     30{
     31	return this_cpu_ptr(&kvm_pmu_events);
     32}
     33
     34/*
     35 * Add events to track that we may want to switch at guest entry/exit
     36 * time.
     37 */
     38void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
     39{
     40	struct kvm_pmu_events *pmu = kvm_get_pmu_events();
     41
     42	if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
     43		return;
     44
     45	if (!attr->exclude_host)
     46		pmu->events_host |= set;
     47	if (!attr->exclude_guest)
     48		pmu->events_guest |= set;
     49}
     50
     51/*
     52 * Stop tracking events
     53 */
     54void kvm_clr_pmu_events(u32 clr)
     55{
     56	struct kvm_pmu_events *pmu = kvm_get_pmu_events();
     57
     58	if (!kvm_arm_support_pmu_v3() || !pmu)
     59		return;
     60
     61	pmu->events_host &= ~clr;
     62	pmu->events_guest &= ~clr;
     63}
     64
     65#define PMEVTYPER_READ_CASE(idx)				\
     66	case idx:						\
     67		return read_sysreg(pmevtyper##idx##_el0)
     68
     69#define PMEVTYPER_WRITE_CASE(idx)				\
     70	case idx:						\
     71		write_sysreg(val, pmevtyper##idx##_el0);	\
     72		break
     73
     74#define PMEVTYPER_CASES(readwrite)				\
     75	PMEVTYPER_##readwrite##_CASE(0);			\
     76	PMEVTYPER_##readwrite##_CASE(1);			\
     77	PMEVTYPER_##readwrite##_CASE(2);			\
     78	PMEVTYPER_##readwrite##_CASE(3);			\
     79	PMEVTYPER_##readwrite##_CASE(4);			\
     80	PMEVTYPER_##readwrite##_CASE(5);			\
     81	PMEVTYPER_##readwrite##_CASE(6);			\
     82	PMEVTYPER_##readwrite##_CASE(7);			\
     83	PMEVTYPER_##readwrite##_CASE(8);			\
     84	PMEVTYPER_##readwrite##_CASE(9);			\
     85	PMEVTYPER_##readwrite##_CASE(10);			\
     86	PMEVTYPER_##readwrite##_CASE(11);			\
     87	PMEVTYPER_##readwrite##_CASE(12);			\
     88	PMEVTYPER_##readwrite##_CASE(13);			\
     89	PMEVTYPER_##readwrite##_CASE(14);			\
     90	PMEVTYPER_##readwrite##_CASE(15);			\
     91	PMEVTYPER_##readwrite##_CASE(16);			\
     92	PMEVTYPER_##readwrite##_CASE(17);			\
     93	PMEVTYPER_##readwrite##_CASE(18);			\
     94	PMEVTYPER_##readwrite##_CASE(19);			\
     95	PMEVTYPER_##readwrite##_CASE(20);			\
     96	PMEVTYPER_##readwrite##_CASE(21);			\
     97	PMEVTYPER_##readwrite##_CASE(22);			\
     98	PMEVTYPER_##readwrite##_CASE(23);			\
     99	PMEVTYPER_##readwrite##_CASE(24);			\
    100	PMEVTYPER_##readwrite##_CASE(25);			\
    101	PMEVTYPER_##readwrite##_CASE(26);			\
    102	PMEVTYPER_##readwrite##_CASE(27);			\
    103	PMEVTYPER_##readwrite##_CASE(28);			\
    104	PMEVTYPER_##readwrite##_CASE(29);			\
    105	PMEVTYPER_##readwrite##_CASE(30)
    106
    107/*
    108 * Read a value direct from PMEVTYPER<idx> where idx is 0-30
    109 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
    110 */
    111static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
    112{
    113	switch (idx) {
    114	PMEVTYPER_CASES(READ);
    115	case ARMV8_PMU_CYCLE_IDX:
    116		return read_sysreg(pmccfiltr_el0);
    117	default:
    118		WARN_ON(1);
    119	}
    120
    121	return 0;
    122}
    123
    124/*
    125 * Write a value direct to PMEVTYPER<idx> where idx is 0-30
    126 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
    127 */
    128static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
    129{
    130	switch (idx) {
    131	PMEVTYPER_CASES(WRITE);
    132	case ARMV8_PMU_CYCLE_IDX:
    133		write_sysreg(val, pmccfiltr_el0);
    134		break;
    135	default:
    136		WARN_ON(1);
    137	}
    138}
    139
    140/*
    141 * Modify ARMv8 PMU events to include EL0 counting
    142 */
    143static void kvm_vcpu_pmu_enable_el0(unsigned long events)
    144{
    145	u64 typer;
    146	u32 counter;
    147
    148	for_each_set_bit(counter, &events, 32) {
    149		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
    150		typer &= ~ARMV8_PMU_EXCLUDE_EL0;
    151		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
    152	}
    153}
    154
    155/*
    156 * Modify ARMv8 PMU events to exclude EL0 counting
    157 */
    158static void kvm_vcpu_pmu_disable_el0(unsigned long events)
    159{
    160	u64 typer;
    161	u32 counter;
    162
    163	for_each_set_bit(counter, &events, 32) {
    164		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
    165		typer |= ARMV8_PMU_EXCLUDE_EL0;
    166		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
    167	}
    168}
    169
    170/*
    171 * On VHE ensure that only guest events have EL0 counting enabled.
    172 * This is called from both vcpu_{load,put} and the sysreg handling.
    173 * Since the latter is preemptible, special care must be taken to
    174 * disable preemption.
    175 */
    176void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
    177{
    178	struct kvm_pmu_events *pmu;
    179	u32 events_guest, events_host;
    180
    181	if (!kvm_arm_support_pmu_v3() || !has_vhe())
    182		return;
    183
    184	preempt_disable();
    185	pmu = kvm_get_pmu_events();
    186	events_guest = pmu->events_guest;
    187	events_host = pmu->events_host;
    188
    189	kvm_vcpu_pmu_enable_el0(events_guest);
    190	kvm_vcpu_pmu_disable_el0(events_host);
    191	preempt_enable();
    192}
    193
    194/*
    195 * On VHE ensure that only host events have EL0 counting enabled
    196 */
    197void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
    198{
    199	struct kvm_pmu_events *pmu;
    200	u32 events_guest, events_host;
    201
    202	if (!kvm_arm_support_pmu_v3() || !has_vhe())
    203		return;
    204
    205	pmu = kvm_get_pmu_events();
    206	events_guest = pmu->events_guest;
    207	events_host = pmu->events_host;
    208
    209	kvm_vcpu_pmu_enable_el0(events_host);
    210	kvm_vcpu_pmu_disable_el0(events_guest);
    211}