cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

perf_event.h (16838B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _ASM_X86_PERF_EVENT_H
      3#define _ASM_X86_PERF_EVENT_H
      4
      5#include <linux/static_call.h>
      6
      7/*
      8 * Performance event hw details:
      9 */
     10
     11#define INTEL_PMC_MAX_GENERIC				       32
     12#define INTEL_PMC_MAX_FIXED				       16
     13#define INTEL_PMC_IDX_FIXED				       32
     14
     15#define X86_PMC_IDX_MAX					       64
     16
     17#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
     18#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
     19
     20#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
     21#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
     22
     23#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
     24#define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
     25#define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
     26#define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
     27#define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
     28#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
     29#define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
     30#define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
     31#define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
     32#define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
     33#define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
     34
     35#define HSW_IN_TX					(1ULL << 32)
     36#define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)
     37#define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
     38#define ICL_FIXED_0_ADAPTIVE				(1ULL << 32)
     39
     40#define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
     41#define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
     42#define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
     43
     44#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
     45#define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
     46	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
     47
     48#define AMD64_EVENTSEL_EVENT	\
     49	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
     50#define INTEL_ARCH_EVENT_MASK	\
     51	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
     52
     53#define AMD64_L3_SLICE_SHIFT				48
     54#define AMD64_L3_SLICE_MASK				\
     55	(0xFULL << AMD64_L3_SLICE_SHIFT)
     56#define AMD64_L3_SLICEID_MASK				\
     57	(0x7ULL << AMD64_L3_SLICE_SHIFT)
     58
     59#define AMD64_L3_THREAD_SHIFT				56
     60#define AMD64_L3_THREAD_MASK				\
     61	(0xFFULL << AMD64_L3_THREAD_SHIFT)
     62#define AMD64_L3_F19H_THREAD_MASK			\
     63	(0x3ULL << AMD64_L3_THREAD_SHIFT)
     64
     65#define AMD64_L3_EN_ALL_CORES				BIT_ULL(47)
     66#define AMD64_L3_EN_ALL_SLICES				BIT_ULL(46)
     67
     68#define AMD64_L3_COREID_SHIFT				42
     69#define AMD64_L3_COREID_MASK				\
     70	(0x7ULL << AMD64_L3_COREID_SHIFT)
     71
     72#define X86_RAW_EVENT_MASK		\
     73	(ARCH_PERFMON_EVENTSEL_EVENT |	\
     74	 ARCH_PERFMON_EVENTSEL_UMASK |	\
     75	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
     76	 ARCH_PERFMON_EVENTSEL_INV   |	\
     77	 ARCH_PERFMON_EVENTSEL_CMASK)
     78#define X86_ALL_EVENT_FLAGS  			\
     79	(ARCH_PERFMON_EVENTSEL_EDGE |  		\
     80	 ARCH_PERFMON_EVENTSEL_INV | 		\
     81	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
     82	 ARCH_PERFMON_EVENTSEL_ANY | 		\
     83	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
     84	 HSW_IN_TX | 				\
     85	 HSW_IN_TX_CHECKPOINTED)
     86#define AMD64_RAW_EVENT_MASK		\
     87	(X86_RAW_EVENT_MASK          |  \
     88	 AMD64_EVENTSEL_EVENT)
     89#define AMD64_RAW_EVENT_MASK_NB		\
     90	(AMD64_EVENTSEL_EVENT        |  \
     91	 ARCH_PERFMON_EVENTSEL_UMASK)
     92#define AMD64_NUM_COUNTERS				4
     93#define AMD64_NUM_COUNTERS_CORE				6
     94#define AMD64_NUM_COUNTERS_NB				4
     95
     96#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
     97#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
     98#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
     99#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
    100		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
    101
    102#define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
    103#define ARCH_PERFMON_EVENTS_COUNT			7
    104
    105#define PEBS_DATACFG_MEMINFO	BIT_ULL(0)
    106#define PEBS_DATACFG_GP	BIT_ULL(1)
    107#define PEBS_DATACFG_XMMS	BIT_ULL(2)
    108#define PEBS_DATACFG_LBRS	BIT_ULL(3)
    109#define PEBS_DATACFG_LBR_SHIFT	24
    110
    111/*
    112 * Intel "Architectural Performance Monitoring" CPUID
    113 * detection/enumeration details:
    114 */
    115union cpuid10_eax {
    116	struct {
    117		unsigned int version_id:8;
    118		unsigned int num_counters:8;
    119		unsigned int bit_width:8;
    120		unsigned int mask_length:8;
    121	} split;
    122	unsigned int full;
    123};
    124
    125union cpuid10_ebx {
    126	struct {
    127		unsigned int no_unhalted_core_cycles:1;
    128		unsigned int no_instructions_retired:1;
    129		unsigned int no_unhalted_reference_cycles:1;
    130		unsigned int no_llc_reference:1;
    131		unsigned int no_llc_misses:1;
    132		unsigned int no_branch_instruction_retired:1;
    133		unsigned int no_branch_misses_retired:1;
    134	} split;
    135	unsigned int full;
    136};
    137
    138union cpuid10_edx {
    139	struct {
    140		unsigned int num_counters_fixed:5;
    141		unsigned int bit_width_fixed:8;
    142		unsigned int reserved1:2;
    143		unsigned int anythread_deprecated:1;
    144		unsigned int reserved2:16;
    145	} split;
    146	unsigned int full;
    147};
    148
    149/*
    150 * Intel Architectural LBR CPUID detection/enumeration details:
    151 */
    152union cpuid28_eax {
    153	struct {
    154		/* Supported LBR depth values */
    155		unsigned int	lbr_depth_mask:8;
    156		unsigned int	reserved:22;
    157		/* Deep C-state Reset */
    158		unsigned int	lbr_deep_c_reset:1;
    159		/* IP values contain LIP */
    160		unsigned int	lbr_lip:1;
    161	} split;
    162	unsigned int		full;
    163};
    164
    165union cpuid28_ebx {
    166	struct {
    167		/* CPL Filtering Supported */
    168		unsigned int    lbr_cpl:1;
    169		/* Branch Filtering Supported */
    170		unsigned int    lbr_filter:1;
    171		/* Call-stack Mode Supported */
    172		unsigned int    lbr_call_stack:1;
    173	} split;
    174	unsigned int            full;
    175};
    176
    177union cpuid28_ecx {
    178	struct {
    179		/* Mispredict Bit Supported */
    180		unsigned int    lbr_mispred:1;
    181		/* Timed LBRs Supported */
    182		unsigned int    lbr_timed_lbr:1;
    183		/* Branch Type Field Supported */
    184		unsigned int    lbr_br_type:1;
    185	} split;
    186	unsigned int            full;
    187};
    188
    189/*
    190 * AMD "Extended Performance Monitoring and Debug" CPUID
    191 * detection/enumeration details:
    192 */
    193union cpuid_0x80000022_ebx {
    194	struct {
    195		/* Number of Core Performance Counters */
    196		unsigned int	num_core_pmc:4;
    197	} split;
    198	unsigned int		full;
    199};
    200
    201struct x86_pmu_capability {
    202	int		version;
    203	int		num_counters_gp;
    204	int		num_counters_fixed;
    205	int		bit_width_gp;
    206	int		bit_width_fixed;
    207	unsigned int	events_mask;
    208	int		events_mask_len;
    209};
    210
    211/*
    212 * Fixed-purpose performance events:
    213 */
    214
    215/* RDPMC offset for Fixed PMCs */
    216#define INTEL_PMC_FIXED_RDPMC_BASE		(1 << 30)
    217#define INTEL_PMC_FIXED_RDPMC_METRICS		(1 << 29)
    218
    219/*
    220 * All the fixed-mode PMCs are configured via this single MSR:
    221 */
    222#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
    223
    224/*
    225 * There is no event-code assigned to the fixed-mode PMCs.
    226 *
    227 * For a fixed-mode PMC, which has an equivalent event on a general-purpose
    228 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
    229 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
    230 *
    231 * For a fixed-mode PMC, which doesn't have an equivalent event, a
    232 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
    233 * The pseudo event-code for a fixed-mode PMC must be 0x00.
    234 * The pseudo umask-code is 0xX. The X equals the index of the fixed
    235 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
    236 *
    237 * The counts are available in separate MSRs:
    238 */
    239
    240/* Instr_Retired.Any: */
    241#define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
    242#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
    243
    244/* CPU_CLK_Unhalted.Core: */
    245#define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
    246#define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
    247
    248/* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
    249#define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
    250#define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
    251#define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
    252
    253/* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
    254#define MSR_ARCH_PERFMON_FIXED_CTR3	0x30c
    255#define INTEL_PMC_IDX_FIXED_SLOTS	(INTEL_PMC_IDX_FIXED + 3)
    256#define INTEL_PMC_MSK_FIXED_SLOTS	(1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
    257
    258static inline bool use_fixed_pseudo_encoding(u64 code)
    259{
    260	return !(code & 0xff);
    261}
    262
    263/*
    264 * We model BTS tracing as another fixed-mode PMC.
    265 *
    266 * We choose the value 47 for the fixed index of BTS, since lower
    267 * values are used by actual fixed events and higher values are used
    268 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
    269 */
    270#define INTEL_PMC_IDX_FIXED_BTS			(INTEL_PMC_IDX_FIXED + 15)
    271
    272/*
    273 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
    274 * each TopDown metric event.
    275 *
    276 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
    277 */
    278#define INTEL_PMC_IDX_METRIC_BASE		(INTEL_PMC_IDX_FIXED + 16)
    279#define INTEL_PMC_IDX_TD_RETIRING		(INTEL_PMC_IDX_METRIC_BASE + 0)
    280#define INTEL_PMC_IDX_TD_BAD_SPEC		(INTEL_PMC_IDX_METRIC_BASE + 1)
    281#define INTEL_PMC_IDX_TD_FE_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 2)
    282#define INTEL_PMC_IDX_TD_BE_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 3)
    283#define INTEL_PMC_IDX_TD_HEAVY_OPS		(INTEL_PMC_IDX_METRIC_BASE + 4)
    284#define INTEL_PMC_IDX_TD_BR_MISPREDICT		(INTEL_PMC_IDX_METRIC_BASE + 5)
    285#define INTEL_PMC_IDX_TD_FETCH_LAT		(INTEL_PMC_IDX_METRIC_BASE + 6)
    286#define INTEL_PMC_IDX_TD_MEM_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 7)
    287#define INTEL_PMC_IDX_METRIC_END		INTEL_PMC_IDX_TD_MEM_BOUND
    288#define INTEL_PMC_MSK_TOPDOWN			((0xffull << INTEL_PMC_IDX_METRIC_BASE) | \
    289						INTEL_PMC_MSK_FIXED_SLOTS)
    290
    291/*
    292 * There is no event-code assigned to the TopDown events.
    293 *
    294 * For the slots event, use the pseudo code of the fixed counter 3.
    295 *
    296 * For the metric events, the pseudo event-code is 0x00.
    297 * The pseudo umask-code starts from the middle of the pseudo event
    298 * space, 0x80.
    299 */
    300#define INTEL_TD_SLOTS				0x0400	/* TOPDOWN.SLOTS */
    301/* Level 1 metrics */
    302#define INTEL_TD_METRIC_RETIRING		0x8000	/* Retiring metric */
    303#define INTEL_TD_METRIC_BAD_SPEC		0x8100	/* Bad speculation metric */
    304#define INTEL_TD_METRIC_FE_BOUND		0x8200	/* FE bound metric */
    305#define INTEL_TD_METRIC_BE_BOUND		0x8300	/* BE bound metric */
    306/* Level 2 metrics */
    307#define INTEL_TD_METRIC_HEAVY_OPS		0x8400  /* Heavy Operations metric */
    308#define INTEL_TD_METRIC_BR_MISPREDICT		0x8500  /* Branch Mispredict metric */
    309#define INTEL_TD_METRIC_FETCH_LAT		0x8600  /* Fetch Latency metric */
    310#define INTEL_TD_METRIC_MEM_BOUND		0x8700  /* Memory bound metric */
    311
    312#define INTEL_TD_METRIC_MAX			INTEL_TD_METRIC_MEM_BOUND
    313#define INTEL_TD_METRIC_NUM			8
    314
    315static inline bool is_metric_idx(int idx)
    316{
    317	return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
    318}
    319
    320static inline bool is_topdown_idx(int idx)
    321{
    322	return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
    323}
    324
    325#define INTEL_PMC_OTHER_TOPDOWN_BITS(bit)	\
    326			(~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
    327
    328#define GLOBAL_STATUS_COND_CHG			BIT_ULL(63)
    329#define GLOBAL_STATUS_BUFFER_OVF_BIT		62
    330#define GLOBAL_STATUS_BUFFER_OVF		BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
    331#define GLOBAL_STATUS_UNC_OVF			BIT_ULL(61)
    332#define GLOBAL_STATUS_ASIF			BIT_ULL(60)
    333#define GLOBAL_STATUS_COUNTERS_FROZEN		BIT_ULL(59)
    334#define GLOBAL_STATUS_LBRS_FROZEN_BIT		58
    335#define GLOBAL_STATUS_LBRS_FROZEN		BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
    336#define GLOBAL_STATUS_TRACE_TOPAPMI_BIT		55
    337#define GLOBAL_STATUS_TRACE_TOPAPMI		BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
    338#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT	48
    339
    340#define GLOBAL_CTRL_EN_PERF_METRICS		48
    341/*
    342 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
    343 *
    344 * We choose bit 58 because it's used to indicate LBR stack frozen state
    345 * for architectural perfmon v4, also we unconditionally mask that bit in
    346 * the handle_pmi_common(), so it'll never be set in the overflow handling.
    347 *
    348 * With this fake counter assigned, the guest LBR event user (such as KVM),
    349 * can program the LBR registers on its own, and we don't actually do anything
    350 * with then in the host context.
    351 */
    352#define INTEL_PMC_IDX_FIXED_VLBR	(GLOBAL_STATUS_LBRS_FROZEN_BIT)
    353
    354/*
    355 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
    356 * since it would claim bit 58 which is effectively Fixed26.
    357 */
    358#define INTEL_FIXED_VLBR_EVENT	0x1b00
    359
    360/*
    361 * Adaptive PEBS v4
    362 */
    363
    364struct pebs_basic {
    365	u64 format_size;
    366	u64 ip;
    367	u64 applicable_counters;
    368	u64 tsc;
    369};
    370
    371struct pebs_meminfo {
    372	u64 address;
    373	u64 aux;
    374	u64 latency;
    375	u64 tsx_tuning;
    376};
    377
    378struct pebs_gprs {
    379	u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
    380	u64 r8, r9, r10, r11, r12, r13, r14, r15;
    381};
    382
    383struct pebs_xmm {
    384	u64 xmm[16*2];	/* two entries for each register */
    385};
    386
    387/*
    388 * AMD Extended Performance Monitoring and Debug cpuid feature detection
    389 */
    390#define EXT_PERFMON_DEBUG_FEATURES		0x80000022
    391
    392/*
    393 * IBS cpuid feature detection
    394 */
    395
    396#define IBS_CPUID_FEATURES		0x8000001b
    397
    398/*
    399 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
    400 * bit 0 is used to indicate the existence of IBS.
    401 */
    402#define IBS_CAPS_AVAIL			(1U<<0)
    403#define IBS_CAPS_FETCHSAM		(1U<<1)
    404#define IBS_CAPS_OPSAM			(1U<<2)
    405#define IBS_CAPS_RDWROPCNT		(1U<<3)
    406#define IBS_CAPS_OPCNT			(1U<<4)
    407#define IBS_CAPS_BRNTRGT		(1U<<5)
    408#define IBS_CAPS_OPCNTEXT		(1U<<6)
    409#define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
    410#define IBS_CAPS_OPBRNFUSE		(1U<<8)
    411#define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
    412#define IBS_CAPS_OPDATA4		(1U<<10)
    413#define IBS_CAPS_ZEN4			(1U<<11)
    414
    415#define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
    416					 | IBS_CAPS_FETCHSAM	\
    417					 | IBS_CAPS_OPSAM)
    418
    419/*
    420 * IBS APIC setup
    421 */
    422#define IBSCTL				0x1cc
    423#define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
    424#define IBSCTL_LVT_OFFSET_MASK		0x0F
    425
    426/* IBS fetch bits/masks */
    427#define IBS_FETCH_L3MISSONLY	(1ULL<<59)
    428#define IBS_FETCH_RAND_EN	(1ULL<<57)
    429#define IBS_FETCH_VAL		(1ULL<<49)
    430#define IBS_FETCH_ENABLE	(1ULL<<48)
    431#define IBS_FETCH_CNT		0xFFFF0000ULL
    432#define IBS_FETCH_MAX_CNT	0x0000FFFFULL
    433
    434/*
    435 * IBS op bits/masks
    436 * The lower 7 bits of the current count are random bits
    437 * preloaded by hardware and ignored in software
    438 */
    439#define IBS_OP_CUR_CNT		(0xFFF80ULL<<32)
    440#define IBS_OP_CUR_CNT_RAND	(0x0007FULL<<32)
    441#define IBS_OP_CNT_CTL		(1ULL<<19)
    442#define IBS_OP_VAL		(1ULL<<18)
    443#define IBS_OP_ENABLE		(1ULL<<17)
    444#define IBS_OP_L3MISSONLY	(1ULL<<16)
    445#define IBS_OP_MAX_CNT		0x0000FFFFULL
    446#define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
    447#define IBS_OP_MAX_CNT_EXT_MASK	(0x7FULL<<20)	/* separate upper 7 bits */
    448#define IBS_RIP_INVALID		(1ULL<<38)
    449
    450#ifdef CONFIG_X86_LOCAL_APIC
    451extern u32 get_ibs_caps(void);
    452#else
    453static inline u32 get_ibs_caps(void) { return 0; }
    454#endif
    455
    456#ifdef CONFIG_PERF_EVENTS
    457extern void perf_events_lapic_init(void);
    458
    459/*
    460 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
    461 * unused and ABI specified to be 0, so nobody should care what we do with
    462 * them.
    463 *
    464 * EXACT - the IP points to the exact instruction that triggered the
    465 *         event (HW bugs exempt).
    466 * VM    - original X86_VM_MASK; see set_linear_ip().
    467 */
    468#define PERF_EFLAGS_EXACT	(1UL << 3)
    469#define PERF_EFLAGS_VM		(1UL << 5)
    470
    471struct pt_regs;
    472struct x86_perf_regs {
    473	struct pt_regs	regs;
    474	u64		*xmm_regs;
    475};
    476
    477extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
    478extern unsigned long perf_misc_flags(struct pt_regs *regs);
    479#define perf_misc_flags(regs)	perf_misc_flags(regs)
    480
    481#include <asm/stacktrace.h>
    482
    483/*
    484 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
    485 * and the comment with PERF_EFLAGS_EXACT.
    486 */
    487#define perf_arch_fetch_caller_regs(regs, __ip)		{	\
    488	(regs)->ip = (__ip);					\
    489	(regs)->sp = (unsigned long)__builtin_frame_address(0);	\
    490	(regs)->cs = __KERNEL_CS;				\
    491	regs->flags = 0;					\
    492}
    493
    494struct perf_guest_switch_msr {
    495	unsigned msr;
    496	u64 host, guest;
    497};
    498
    499struct x86_pmu_lbr {
    500	unsigned int	nr;
    501	unsigned int	from;
    502	unsigned int	to;
    503	unsigned int	info;
    504};
    505
    506extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
    507extern void perf_check_microcode(void);
    508extern void perf_clear_dirty_counters(void);
    509extern int x86_perf_rdpmc_index(struct perf_event *event);
    510#else
    511static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
    512{
    513	memset(cap, 0, sizeof(*cap));
    514}
    515
    516static inline void perf_events_lapic_init(void)	{ }
    517static inline void perf_check_microcode(void) { }
    518#endif
    519
    520#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
    521extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
    522extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
    523#else
    524struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
    525static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
    526{
    527	return -1;
    528}
    529#endif
    530
    531#ifdef CONFIG_CPU_SUP_INTEL
    532 extern void intel_pt_handle_vmx(int on);
    533#else
    534static inline void intel_pt_handle_vmx(int on)
    535{
    536
    537}
    538#endif
    539
    540#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
    541 extern void amd_pmu_enable_virt(void);
    542 extern void amd_pmu_disable_virt(void);
    543
    544#if defined(CONFIG_PERF_EVENTS_AMD_BRS)
    545
    546#define PERF_NEEDS_LOPWR_CB 1
    547
    548/*
    549 * architectural low power callback impacts
    550 * drivers/acpi/processor_idle.c
    551 * drivers/acpi/acpi_pad.c
    552 */
    553extern void perf_amd_brs_lopwr_cb(bool lopwr_in);
    554
    555DECLARE_STATIC_CALL(perf_lopwr_cb, perf_amd_brs_lopwr_cb);
    556
    557static inline void perf_lopwr_cb(bool lopwr_in)
    558{
    559	static_call_mod(perf_lopwr_cb)(lopwr_in);
    560}
    561
    562#endif /* PERF_NEEDS_LOPWR_CB */
    563
    564#else
    565 static inline void amd_pmu_enable_virt(void) { }
    566 static inline void amd_pmu_disable_virt(void) { }
    567#endif
    568
    569#define arch_perf_out_copy_user copy_from_user_nmi
    570
    571#endif /* _ASM_X86_PERF_EVENT_H */