cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

riscv_pmu.h (2138B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Copyright (C) 2018 SiFive
      4 * Copyright (C) 2018 Andes Technology Corporation
      5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
      6 *
      7 */
      8
      9#ifndef _ASM_RISCV_PERF_EVENT_H
     10#define _ASM_RISCV_PERF_EVENT_H
     11
     12#include <linux/perf_event.h>
     13#include <linux/ptrace.h>
     14#include <linux/interrupt.h>
     15
     16#ifdef CONFIG_RISCV_PMU
     17
     18/*
     19 * The RISCV_MAX_COUNTERS parameter should be specified.
     20 */
     21
     22#define RISCV_MAX_COUNTERS	64
     23#define RISCV_OP_UNSUPP		(-EOPNOTSUPP)
     24#define RISCV_PMU_PDEV_NAME	"riscv-pmu"
     25#define RISCV_PMU_LEGACY_PDEV_NAME	"riscv-pmu-legacy"
     26
     27#define RISCV_PMU_STOP_FLAG_RESET 1
     28
     29struct cpu_hw_events {
     30	/* currently enabled events */
     31	int			n_events;
     32	/* Counter overflow interrupt */
     33	int		irq;
     34	/* currently enabled events */
     35	struct perf_event	*events[RISCV_MAX_COUNTERS];
     36	/* currently enabled hardware counters */
     37	DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
     38	/* currently enabled firmware counters */
     39	DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
     40};
     41
     42struct riscv_pmu {
     43	struct pmu	pmu;
     44	char		*name;
     45
     46	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
     47
     48	int		num_counters;
     49	u64		(*ctr_read)(struct perf_event *event);
     50	int		(*ctr_get_idx)(struct perf_event *event);
     51	int		(*ctr_get_width)(int idx);
     52	void		(*ctr_clear_idx)(struct perf_event *event);
     53	void		(*ctr_start)(struct perf_event *event, u64 init_val);
     54	void		(*ctr_stop)(struct perf_event *event, unsigned long flag);
     55	int		(*event_map)(struct perf_event *event, u64 *config);
     56
     57	struct cpu_hw_events	__percpu *hw_events;
     58	struct hlist_node	node;
     59};
     60
     61#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
     62unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
     63int riscv_pmu_event_set_period(struct perf_event *event);
     64uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
     65u64 riscv_pmu_event_update(struct perf_event *event);
     66#ifdef CONFIG_RISCV_PMU_LEGACY
     67void riscv_pmu_legacy_skip_init(void);
     68#else
     69static inline void riscv_pmu_legacy_skip_init(void) {};
     70#endif
     71struct riscv_pmu *riscv_pmu_alloc(void);
     72
     73#endif /* CONFIG_RISCV_PMU */
     74
     75#endif /* _ASM_RISCV_PERF_EVENT_H */