cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pmu.h (5115B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __KVM_X86_PMU_H
      3#define __KVM_X86_PMU_H
      4
      5#include <linux/nospec.h>
      6
      7#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
      8#define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
      9#define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
     10
     11/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
     12#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
     13
     14#define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
     15#define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
     16#define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
     17
     18struct kvm_event_hw_type_mapping {
     19	u8 eventsel;
     20	u8 unit_mask;
     21	unsigned event_type;
     22};
     23
     24struct kvm_pmu_ops {
     25	unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
     26	bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
     27	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
     28	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
     29		unsigned int idx, u64 *mask);
     30	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
     31	bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
     32	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
     33	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
     34	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
     35	void (*refresh)(struct kvm_vcpu *vcpu);
     36	void (*init)(struct kvm_vcpu *vcpu);
     37	void (*reset)(struct kvm_vcpu *vcpu);
     38	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
     39	void (*cleanup)(struct kvm_vcpu *vcpu);
     40};
     41
     42void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
     43
     44static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
     45{
     46	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
     47
     48	return pmu->counter_bitmask[pmc->type];
     49}
     50
     51static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
     52{
     53	u64 counter, enabled, running;
     54
     55	counter = pmc->counter;
     56	if (pmc->perf_event && !pmc->is_paused)
     57		counter += perf_event_read_value(pmc->perf_event,
     58						 &enabled, &running);
     59	/* FIXME: Scaling needed? */
     60	return counter & pmc_bitmask(pmc);
     61}
     62
     63static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
     64{
     65	if (pmc->perf_event) {
     66		perf_event_release_kernel(pmc->perf_event);
     67		pmc->perf_event = NULL;
     68		pmc->current_config = 0;
     69		pmc_to_pmu(pmc)->event_count--;
     70	}
     71}
     72
     73static inline void pmc_stop_counter(struct kvm_pmc *pmc)
     74{
     75	if (pmc->perf_event) {
     76		pmc->counter = pmc_read_counter(pmc);
     77		pmc_release_perf_event(pmc);
     78	}
     79}
     80
     81static inline bool pmc_is_gp(struct kvm_pmc *pmc)
     82{
     83	return pmc->type == KVM_PMC_GP;
     84}
     85
     86static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
     87{
     88	return pmc->type == KVM_PMC_FIXED;
     89}
     90
     91static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
     92						 u64 data)
     93{
     94	return !(pmu->global_ctrl_mask & data);
     95}
     96
     97/* returns general purpose PMC with the specified MSR. Note that it can be
     98 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
     99 * parameter to tell them apart.
    100 */
    101static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
    102					 u32 base)
    103{
    104	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
    105		u32 index = array_index_nospec(msr - base,
    106					       pmu->nr_arch_gp_counters);
    107
    108		return &pmu->gp_counters[index];
    109	}
    110
    111	return NULL;
    112}
    113
    114/* returns fixed PMC with the specified MSR */
    115static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
    116{
    117	int base = MSR_CORE_PERF_FIXED_CTR0;
    118
    119	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
    120		u32 index = array_index_nospec(msr - base,
    121					       pmu->nr_arch_fixed_counters);
    122
    123		return &pmu->fixed_counters[index];
    124	}
    125
    126	return NULL;
    127}
    128
    129static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
    130{
    131	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
    132
    133	if (!sample_period)
    134		sample_period = pmc_bitmask(pmc) + 1;
    135	return sample_period;
    136}
    137
    138static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
    139{
    140	if (!pmc->perf_event || pmc->is_paused)
    141		return;
    142
    143	perf_event_period(pmc->perf_event,
    144			  get_sample_period(pmc, pmc->counter));
    145}
    146
    147void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
    148void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
    149void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
    150
    151void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
    152void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
    153int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
    154bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
    155bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
    156int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
    157int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
    158void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
    159void kvm_pmu_reset(struct kvm_vcpu *vcpu);
    160void kvm_pmu_init(struct kvm_vcpu *vcpu);
    161void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
    162void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
    163int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
    164void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
    165
    166bool is_vmware_backdoor_pmc(u32 pmc_idx);
    167
    168extern struct kvm_pmu_ops intel_pmu_ops;
    169extern struct kvm_pmu_ops amd_pmu_ops;
    170#endif /* __KVM_X86_PMU_H */