cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

generic-compat-pmu.c (9205B)


      1// SPDX-License-Identifier: GPL-2.0+
      2//
      3// Copyright 2019 Madhavan Srinivasan, IBM Corporation.
      4
      5#define pr_fmt(fmt)	"generic-compat-pmu: " fmt
      6
      7#include "isa207-common.h"
      8
      9/*
     10 * Raw event encoding:
     11 *
     12 *        60        56        52        48        44        40        36        32
     13 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
     14 *
     15 *        28        24        20        16        12         8         4         0
     16 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
     17 *                                 [ pmc ]                       [    pmcxsel    ]
     18 */
     19
     20/*
     21 * Event codes defined in ISA v3.0B
     22 */
     23#define EVENT(_name, _code)	_name = _code,
     24
     25enum {
     26	/* Cycles, alternate code */
     27	EVENT(PM_CYC_ALT,			0x100f0)
     28	/* One or more instructions completed in a cycle */
     29	EVENT(PM_CYC_INST_CMPL,			0x100f2)
     30	/* Floating-point instruction completed */
     31	EVENT(PM_FLOP_CMPL,			0x100f4)
     32	/* Instruction ERAT/L1-TLB miss */
     33	EVENT(PM_L1_ITLB_MISS,			0x100f6)
     34	/* All instructions completed and none available */
     35	EVENT(PM_NO_INST_AVAIL,			0x100f8)
     36	/* A load-type instruction completed (ISA v3.0+) */
     37	EVENT(PM_LD_CMPL,			0x100fc)
     38	/* Instruction completed, alternate code (ISA v3.0+) */
     39	EVENT(PM_INST_CMPL_ALT,			0x100fe)
     40	/* A store-type instruction completed */
     41	EVENT(PM_ST_CMPL,			0x200f0)
     42	/* Instruction Dispatched */
     43	EVENT(PM_INST_DISP,			0x200f2)
     44	/* Run_cycles */
     45	EVENT(PM_RUN_CYC,			0x200f4)
     46	/* Data ERAT/L1-TLB miss/reload */
     47	EVENT(PM_L1_DTLB_RELOAD,		0x200f6)
     48	/* Taken branch completed */
     49	EVENT(PM_BR_TAKEN_CMPL,			0x200fa)
     50	/* Demand iCache Miss */
     51	EVENT(PM_L1_ICACHE_MISS,		0x200fc)
     52	/* L1 Dcache reload from memory */
     53	EVENT(PM_L1_RELOAD_FROM_MEM,		0x200fe)
     54	/* L1 Dcache store miss */
     55	EVENT(PM_ST_MISS_L1,			0x300f0)
     56	/* Alternate code for PM_INST_DISP */
     57	EVENT(PM_INST_DISP_ALT,			0x300f2)
     58	/* Branch direction or target mispredicted */
     59	EVENT(PM_BR_MISPREDICT,			0x300f6)
     60	/* Data TLB miss/reload */
     61	EVENT(PM_DTLB_MISS,			0x300fc)
     62	/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
     63	EVENT(PM_DATA_FROM_L3MISS,		0x300fe)
     64	/* L1 Dcache load miss */
     65	EVENT(PM_LD_MISS_L1,			0x400f0)
     66	/* Cycle when instruction(s) dispatched */
     67	EVENT(PM_CYC_INST_DISP,			0x400f2)
     68	/* Branch or branch target mispredicted */
     69	EVENT(PM_BR_MPRED_CMPL,			0x400f6)
     70	/* Instructions completed with run latch set */
     71	EVENT(PM_RUN_INST_CMPL,			0x400fa)
     72	/* Instruction TLB miss/reload */
     73	EVENT(PM_ITLB_MISS,			0x400fc)
     74	/* Load data not cached */
     75	EVENT(PM_LD_NOT_CACHED,			0x400fe)
     76	/* Instructions */
     77	EVENT(PM_INST_CMPL,			0x500fa)
     78	/* Cycles */
     79	EVENT(PM_CYC,				0x600f4)
     80};
     81
     82#undef EVENT
     83
     84/* Table of alternatives, sorted in increasing order of column 0 */
     85/* Note that in each row, column 0 must be the smallest */
     86static const unsigned int generic_event_alternatives[][MAX_ALT] = {
     87	{ PM_CYC_ALT,			PM_CYC },
     88	{ PM_INST_CMPL_ALT,		PM_INST_CMPL },
     89	{ PM_INST_DISP,			PM_INST_DISP_ALT },
     90};
     91
     92static int generic_get_alternatives(u64 event, unsigned int flags, u64 alt[])
     93{
     94	int num_alt = 0;
     95
     96	num_alt = isa207_get_alternatives(event, alt,
     97					  ARRAY_SIZE(generic_event_alternatives), flags,
     98					  generic_event_alternatives);
     99
    100	return num_alt;
    101}
    102
    103GENERIC_EVENT_ATTR(cpu-cycles,			PM_CYC);
    104GENERIC_EVENT_ATTR(instructions,		PM_INST_CMPL);
    105GENERIC_EVENT_ATTR(stalled-cycles-frontend,	PM_NO_INST_AVAIL);
    106GENERIC_EVENT_ATTR(branch-misses,		PM_BR_MPRED_CMPL);
    107GENERIC_EVENT_ATTR(cache-misses,		PM_LD_MISS_L1);
    108
    109CACHE_EVENT_ATTR(L1-dcache-load-misses,		PM_LD_MISS_L1);
    110CACHE_EVENT_ATTR(L1-dcache-store-misses,	PM_ST_MISS_L1);
    111CACHE_EVENT_ATTR(L1-icache-load-misses,		PM_L1_ICACHE_MISS);
    112CACHE_EVENT_ATTR(LLC-load-misses,		PM_DATA_FROM_L3MISS);
    113CACHE_EVENT_ATTR(branch-load-misses,		PM_BR_MPRED_CMPL);
    114CACHE_EVENT_ATTR(dTLB-load-misses,		PM_DTLB_MISS);
    115CACHE_EVENT_ATTR(iTLB-load-misses,		PM_ITLB_MISS);
    116
    117static struct attribute *generic_compat_events_attr[] = {
    118	GENERIC_EVENT_PTR(PM_CYC),
    119	GENERIC_EVENT_PTR(PM_INST_CMPL),
    120	GENERIC_EVENT_PTR(PM_NO_INST_AVAIL),
    121	GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
    122	GENERIC_EVENT_PTR(PM_LD_MISS_L1),
    123	CACHE_EVENT_PTR(PM_LD_MISS_L1),
    124	CACHE_EVENT_PTR(PM_ST_MISS_L1),
    125	CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
    126	CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
    127	CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
    128	CACHE_EVENT_PTR(PM_DTLB_MISS),
    129	CACHE_EVENT_PTR(PM_ITLB_MISS),
    130	NULL
    131};
    132
    133static const struct attribute_group generic_compat_pmu_events_group = {
    134	.name = "events",
    135	.attrs = generic_compat_events_attr,
    136};
    137
    138PMU_FORMAT_ATTR(event,		"config:0-19");
    139PMU_FORMAT_ATTR(pmcxsel,	"config:0-7");
    140PMU_FORMAT_ATTR(pmc,		"config:16-19");
    141
    142static struct attribute *generic_compat_pmu_format_attr[] = {
    143	&format_attr_event.attr,
    144	&format_attr_pmcxsel.attr,
    145	&format_attr_pmc.attr,
    146	NULL,
    147};
    148
    149static const struct attribute_group generic_compat_pmu_format_group = {
    150	.name = "format",
    151	.attrs = generic_compat_pmu_format_attr,
    152};
    153
    154static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
    155	&generic_compat_pmu_format_group,
    156	&generic_compat_pmu_events_group,
    157	NULL,
    158};
    159
    160static int compat_generic_events[] = {
    161	[PERF_COUNT_HW_CPU_CYCLES] =			PM_CYC,
    162	[PERF_COUNT_HW_INSTRUCTIONS] =			PM_INST_CMPL,
    163	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =	PM_NO_INST_AVAIL,
    164	[PERF_COUNT_HW_BRANCH_MISSES] =			PM_BR_MPRED_CMPL,
    165	[PERF_COUNT_HW_CACHE_MISSES] =			PM_LD_MISS_L1,
    166};
    167
    168#define C(x)	PERF_COUNT_HW_CACHE_##x
    169
    170/*
    171 * Table of generalized cache-related events.
    172 * 0 means not supported, -1 means nonsensical, other values
    173 * are event codes.
    174 */
    175static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
    176	[ C(L1D) ] = {
    177		[ C(OP_READ) ] = {
    178			[ C(RESULT_ACCESS) ] = 0,
    179			[ C(RESULT_MISS)   ] = PM_LD_MISS_L1,
    180		},
    181		[ C(OP_WRITE) ] = {
    182			[ C(RESULT_ACCESS) ] = 0,
    183			[ C(RESULT_MISS)   ] = PM_ST_MISS_L1,
    184		},
    185		[ C(OP_PREFETCH) ] = {
    186			[ C(RESULT_ACCESS) ] = 0,
    187			[ C(RESULT_MISS)   ] = 0,
    188		},
    189	},
    190	[ C(L1I) ] = {
    191		[ C(OP_READ) ] = {
    192			[ C(RESULT_ACCESS) ] = 0,
    193			[ C(RESULT_MISS)   ] = PM_L1_ICACHE_MISS,
    194		},
    195		[ C(OP_WRITE) ] = {
    196			[ C(RESULT_ACCESS) ] = 0,
    197			[ C(RESULT_MISS)   ] = -1,
    198		},
    199		[ C(OP_PREFETCH) ] = {
    200			[ C(RESULT_ACCESS) ] = 0,
    201			[ C(RESULT_MISS)   ] = 0,
    202		},
    203	},
    204	[ C(LL) ] = {
    205		[ C(OP_READ) ] = {
    206			[ C(RESULT_ACCESS) ] = 0,
    207			[ C(RESULT_MISS)   ] = PM_DATA_FROM_L3MISS,
    208		},
    209		[ C(OP_WRITE) ] = {
    210			[ C(RESULT_ACCESS) ] = 0,
    211			[ C(RESULT_MISS)   ] = 0,
    212		},
    213		[ C(OP_PREFETCH) ] = {
    214			[ C(RESULT_ACCESS) ] = 0,
    215			[ C(RESULT_MISS)   ] = 0,
    216		},
    217	},
    218	[ C(DTLB) ] = {
    219		[ C(OP_READ) ] = {
    220			[ C(RESULT_ACCESS) ] = 0,
    221			[ C(RESULT_MISS)   ] = PM_DTLB_MISS,
    222		},
    223		[ C(OP_WRITE) ] = {
    224			[ C(RESULT_ACCESS) ] = -1,
    225			[ C(RESULT_MISS)   ] = -1,
    226		},
    227		[ C(OP_PREFETCH) ] = {
    228			[ C(RESULT_ACCESS) ] = -1,
    229			[ C(RESULT_MISS)   ] = -1,
    230		},
    231	},
    232	[ C(ITLB) ] = {
    233		[ C(OP_READ) ] = {
    234			[ C(RESULT_ACCESS) ] = 0,
    235			[ C(RESULT_MISS)   ] = PM_ITLB_MISS,
    236		},
    237		[ C(OP_WRITE) ] = {
    238			[ C(RESULT_ACCESS) ] = -1,
    239			[ C(RESULT_MISS)   ] = -1,
    240		},
    241		[ C(OP_PREFETCH) ] = {
    242			[ C(RESULT_ACCESS) ] = -1,
    243			[ C(RESULT_MISS)   ] = -1,
    244		},
    245	},
    246	[ C(BPU) ] = {
    247		[ C(OP_READ) ] = {
    248			[ C(RESULT_ACCESS) ] = 0,
    249			[ C(RESULT_MISS)   ] = PM_BR_MPRED_CMPL,
    250		},
    251		[ C(OP_WRITE) ] = {
    252			[ C(RESULT_ACCESS) ] = -1,
    253			[ C(RESULT_MISS)   ] = -1,
    254		},
    255		[ C(OP_PREFETCH) ] = {
    256			[ C(RESULT_ACCESS) ] = -1,
    257			[ C(RESULT_MISS)   ] = -1,
    258		},
    259	},
    260	[ C(NODE) ] = {
    261		[ C(OP_READ) ] = {
    262			[ C(RESULT_ACCESS) ] = -1,
    263			[ C(RESULT_MISS)   ] = -1,
    264		},
    265		[ C(OP_WRITE) ] = {
    266			[ C(RESULT_ACCESS) ] = -1,
    267			[ C(RESULT_MISS)   ] = -1,
    268		},
    269		[ C(OP_PREFETCH) ] = {
    270			[ C(RESULT_ACCESS) ] = -1,
    271			[ C(RESULT_MISS)   ] = -1,
    272		},
    273	},
    274};
    275
    276#undef C
    277
    278/*
    279 * We set MMCR0[CC5-6RUN] so we can use counters 5 and 6 for
    280 * PM_INST_CMPL and PM_CYC.
    281 */
    282static int generic_compute_mmcr(u64 event[], int n_ev,
    283				unsigned int hwc[], struct mmcr_regs *mmcr,
    284				struct perf_event *pevents[], u32 flags)
    285{
    286	int ret;
    287
    288	ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
    289	if (!ret)
    290		mmcr->mmcr0 |= MMCR0_C56RUN;
    291	return ret;
    292}
    293
    294static struct power_pmu generic_compat_pmu = {
    295	.name			= "GENERIC_COMPAT",
    296	.n_counter		= MAX_PMU_COUNTERS,
    297	.add_fields		= ISA207_ADD_FIELDS,
    298	.test_adder		= ISA207_TEST_ADDER,
    299	.compute_mmcr		= generic_compute_mmcr,
    300	.get_constraint		= isa207_get_constraint,
    301	.get_alternatives	= generic_get_alternatives,
    302	.disable_pmc		= isa207_disable_pmc,
    303	.flags			= PPMU_HAS_SIER | PPMU_ARCH_207S,
    304	.n_generic		= ARRAY_SIZE(compat_generic_events),
    305	.generic_events		= compat_generic_events,
    306	.cache_events		= &generic_compat_cache_events,
    307	.attr_groups		= generic_compat_pmu_attr_groups,
    308};
    309
    310int __init init_generic_compat_pmu(void)
    311{
    312	int rc = 0;
    313
    314	/*
    315	 * From ISA v2.07 on, PMU features are architected;
    316	 * we require >= v3.0 because (a) that has PM_LD_CMPL and
    317	 * PM_INST_CMPL_ALT, which v2.07 doesn't have, and
    318	 * (b) we don't expect any non-IBM Power ISA
    319	 * implementations that conform to v2.07 but not v3.0.
    320	 */
    321	if (!cpu_has_feature(CPU_FTR_ARCH_300))
    322		return -ENODEV;
    323
    324	rc = register_power_pmu(&generic_compat_pmu);
    325	if (rc)
    326		return rc;
    327
    328	/* Tell userspace that EBB is supported */
    329	cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
    330
    331	return 0;
    332}