cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

arm_pmu.h (5201B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2015 Linaro Ltd.
      4 * Author: Shannon Zhao <shannon.zhao@linaro.org>
      5 */
      6
      7#ifndef __ASM_ARM_KVM_PMU_H
      8#define __ASM_ARM_KVM_PMU_H
      9
     10#include <linux/perf_event.h>
     11#include <asm/perf_event.h>
     12
     13#define ARMV8_PMU_CYCLE_IDX		(ARMV8_PMU_MAX_COUNTERS - 1)
     14#define ARMV8_PMU_MAX_COUNTER_PAIRS	((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
     15
     16#ifdef CONFIG_HW_PERF_EVENTS
     17
     18struct kvm_pmc {
     19	u8 idx;	/* index into the pmu->pmc array */
     20	struct perf_event *perf_event;
     21};
     22
     23struct kvm_pmu_events {
     24	u32 events_host;
     25	u32 events_guest;
     26};
     27
     28struct kvm_pmu {
     29	struct irq_work overflow_work;
     30	struct kvm_pmu_events events;
     31	struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
     32	DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
     33	int irq_num;
     34	bool created;
     35	bool irq_level;
     36};
     37
     38struct arm_pmu_entry {
     39	struct list_head entry;
     40	struct arm_pmu *arm_pmu;
     41};
     42
     43DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
     44
     45static __always_inline bool kvm_arm_support_pmu_v3(void)
     46{
     47	return static_branch_likely(&kvm_arm_pmu_available);
     48}
     49
     50#define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
     51u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
     52void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
     53u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
     54u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
     55void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
     56void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
     57void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
     58void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
     59void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
     60void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
     61void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
     62bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
     63void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
     64void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
     65void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
     66void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
     67				    u64 select_idx);
     68int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
     69			    struct kvm_device_attr *attr);
     70int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
     71			    struct kvm_device_attr *attr);
     72int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
     73			    struct kvm_device_attr *attr);
     74int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
     75
     76struct kvm_pmu_events *kvm_get_pmu_events(void);
     77void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
     78void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
     79
     80#define kvm_vcpu_has_pmu(vcpu)					\
     81	(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
     82
     83/*
     84 * Updates the vcpu's view of the pmu events for this cpu.
     85 * Must be called before every vcpu run after disabling interrupts, to ensure
     86 * that an interrupt cannot fire and update the structure.
     87 */
     88#define kvm_pmu_update_vcpu_events(vcpu)				\
     89	do {								\
     90		if (!has_vhe() && kvm_vcpu_has_pmu(vcpu))		\
     91			vcpu->arch.pmu.events = *kvm_get_pmu_events();	\
     92	} while (0)
     93
     94#else
     95struct kvm_pmu {
     96};
     97
     98static inline bool kvm_arm_support_pmu_v3(void)
     99{
    100	return false;
    101}
    102
    103#define kvm_arm_pmu_irq_initialized(v)	(false)
    104static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
    105					    u64 select_idx)
    106{
    107	return 0;
    108}
    109static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
    110					     u64 select_idx, u64 val) {}
    111static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
    112{
    113	return 0;
    114}
    115static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
    116static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
    117static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
    118static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
    119static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
    120static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
    121static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
    122static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
    123{
    124	return false;
    125}
    126static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
    127static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
    128static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
    129static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
    130						  u64 data, u64 select_idx) {}
    131static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
    132					  struct kvm_device_attr *attr)
    133{
    134	return -ENXIO;
    135}
    136static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
    137					  struct kvm_device_attr *attr)
    138{
    139	return -ENXIO;
    140}
    141static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
    142					  struct kvm_device_attr *attr)
    143{
    144	return -ENXIO;
    145}
    146static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
    147{
    148	return 0;
    149}
    150static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
    151{
    152	return 0;
    153}
    154
    155#define kvm_vcpu_has_pmu(vcpu)		({ false; })
    156static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
    157static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
    158static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
    159
    160#endif
    161
    162#endif