cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uncore_snb.c (46610B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
      3#include "uncore.h"
      4#include "uncore_discovery.h"
      5
      6/* Uncore IMC PCI IDs */
      7#define PCI_DEVICE_ID_INTEL_SNB_IMC		0x0100
      8#define PCI_DEVICE_ID_INTEL_IVB_IMC		0x0154
      9#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC		0x0150
     10#define PCI_DEVICE_ID_INTEL_HSW_IMC		0x0c00
     11#define PCI_DEVICE_ID_INTEL_HSW_U_IMC		0x0a04
     12#define PCI_DEVICE_ID_INTEL_BDW_IMC		0x1604
     13#define PCI_DEVICE_ID_INTEL_SKL_U_IMC		0x1904
     14#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC		0x190c
     15#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC		0x1900
     16#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC		0x1910
     17#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC		0x190f
     18#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC		0x191f
     19#define PCI_DEVICE_ID_INTEL_SKL_E3_IMC		0x1918
     20#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC		0x590c
     21#define PCI_DEVICE_ID_INTEL_KBL_U_IMC		0x5904
     22#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC		0x5914
     23#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC		0x590f
     24#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC		0x591f
     25#define PCI_DEVICE_ID_INTEL_KBL_HQ_IMC		0x5910
     26#define PCI_DEVICE_ID_INTEL_KBL_WQ_IMC		0x5918
     27#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC		0x3ecc
     28#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC		0x3ed0
     29#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC		0x3e10
     30#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC		0x3ec4
     31#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC	0x3e0f
     32#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC	0x3e1f
     33#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC	0x3ec2
     34#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC	0x3e30
     35#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC	0x3e18
     36#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC	0x3ec6
     37#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC	0x3e31
     38#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC	0x3e33
     39#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC	0x3eca
     40#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC	0x3e32
     41#define PCI_DEVICE_ID_INTEL_AML_YD_IMC		0x590c
     42#define PCI_DEVICE_ID_INTEL_AML_YQ_IMC		0x590d
     43#define PCI_DEVICE_ID_INTEL_WHL_UQ_IMC		0x3ed0
     44#define PCI_DEVICE_ID_INTEL_WHL_4_UQ_IMC	0x3e34
     45#define PCI_DEVICE_ID_INTEL_WHL_UD_IMC		0x3e35
     46#define PCI_DEVICE_ID_INTEL_CML_H1_IMC		0x9b44
     47#define PCI_DEVICE_ID_INTEL_CML_H2_IMC		0x9b54
     48#define PCI_DEVICE_ID_INTEL_CML_H3_IMC		0x9b64
     49#define PCI_DEVICE_ID_INTEL_CML_U1_IMC		0x9b51
     50#define PCI_DEVICE_ID_INTEL_CML_U2_IMC		0x9b61
     51#define PCI_DEVICE_ID_INTEL_CML_U3_IMC		0x9b71
     52#define PCI_DEVICE_ID_INTEL_CML_S1_IMC		0x9b33
     53#define PCI_DEVICE_ID_INTEL_CML_S2_IMC		0x9b43
     54#define PCI_DEVICE_ID_INTEL_CML_S3_IMC		0x9b53
     55#define PCI_DEVICE_ID_INTEL_CML_S4_IMC		0x9b63
     56#define PCI_DEVICE_ID_INTEL_CML_S5_IMC		0x9b73
     57#define PCI_DEVICE_ID_INTEL_ICL_U_IMC		0x8a02
     58#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC		0x8a12
     59#define PCI_DEVICE_ID_INTEL_TGL_U1_IMC		0x9a02
     60#define PCI_DEVICE_ID_INTEL_TGL_U2_IMC		0x9a04
     61#define PCI_DEVICE_ID_INTEL_TGL_U3_IMC		0x9a12
     62#define PCI_DEVICE_ID_INTEL_TGL_U4_IMC		0x9a14
     63#define PCI_DEVICE_ID_INTEL_TGL_H_IMC		0x9a36
     64#define PCI_DEVICE_ID_INTEL_RKL_1_IMC		0x4c43
     65#define PCI_DEVICE_ID_INTEL_RKL_2_IMC		0x4c53
     66#define PCI_DEVICE_ID_INTEL_ADL_1_IMC		0x4660
     67#define PCI_DEVICE_ID_INTEL_ADL_2_IMC		0x4641
     68#define PCI_DEVICE_ID_INTEL_ADL_3_IMC		0x4601
     69#define PCI_DEVICE_ID_INTEL_ADL_4_IMC		0x4602
     70#define PCI_DEVICE_ID_INTEL_ADL_5_IMC		0x4609
     71#define PCI_DEVICE_ID_INTEL_ADL_6_IMC		0x460a
     72#define PCI_DEVICE_ID_INTEL_ADL_7_IMC		0x4621
     73#define PCI_DEVICE_ID_INTEL_ADL_8_IMC		0x4623
     74#define PCI_DEVICE_ID_INTEL_ADL_9_IMC		0x4629
     75#define PCI_DEVICE_ID_INTEL_ADL_10_IMC		0x4637
     76#define PCI_DEVICE_ID_INTEL_ADL_11_IMC		0x463b
     77#define PCI_DEVICE_ID_INTEL_ADL_12_IMC		0x4648
     78#define PCI_DEVICE_ID_INTEL_ADL_13_IMC		0x4649
     79#define PCI_DEVICE_ID_INTEL_ADL_14_IMC		0x4650
     80#define PCI_DEVICE_ID_INTEL_ADL_15_IMC		0x4668
     81#define PCI_DEVICE_ID_INTEL_ADL_16_IMC		0x4670
     82#define PCI_DEVICE_ID_INTEL_ADL_17_IMC		0x4614
     83#define PCI_DEVICE_ID_INTEL_ADL_18_IMC		0x4617
     84#define PCI_DEVICE_ID_INTEL_ADL_19_IMC		0x4618
     85#define PCI_DEVICE_ID_INTEL_ADL_20_IMC		0x461B
     86#define PCI_DEVICE_ID_INTEL_ADL_21_IMC		0x461C
     87#define PCI_DEVICE_ID_INTEL_RPL_1_IMC		0xA700
     88#define PCI_DEVICE_ID_INTEL_RPL_2_IMC		0xA702
     89#define PCI_DEVICE_ID_INTEL_RPL_3_IMC		0xA706
     90#define PCI_DEVICE_ID_INTEL_RPL_4_IMC		0xA709
     91#define PCI_DEVICE_ID_INTEL_RPL_5_IMC		0xA701
     92#define PCI_DEVICE_ID_INTEL_RPL_6_IMC		0xA703
     93#define PCI_DEVICE_ID_INTEL_RPL_7_IMC		0xA704
     94#define PCI_DEVICE_ID_INTEL_RPL_8_IMC		0xA705
     95#define PCI_DEVICE_ID_INTEL_RPL_9_IMC		0xA706
     96#define PCI_DEVICE_ID_INTEL_RPL_10_IMC		0xA707
     97#define PCI_DEVICE_ID_INTEL_RPL_11_IMC		0xA708
     98#define PCI_DEVICE_ID_INTEL_RPL_12_IMC		0xA709
     99#define PCI_DEVICE_ID_INTEL_RPL_13_IMC		0xA70a
    100#define PCI_DEVICE_ID_INTEL_RPL_14_IMC		0xA70b
    101#define PCI_DEVICE_ID_INTEL_RPL_15_IMC		0xA715
    102#define PCI_DEVICE_ID_INTEL_RPL_16_IMC		0xA716
    103#define PCI_DEVICE_ID_INTEL_RPL_17_IMC		0xA717
    104#define PCI_DEVICE_ID_INTEL_RPL_18_IMC		0xA718
    105#define PCI_DEVICE_ID_INTEL_RPL_19_IMC		0xA719
    106#define PCI_DEVICE_ID_INTEL_RPL_20_IMC		0xA71A
    107#define PCI_DEVICE_ID_INTEL_RPL_21_IMC		0xA71B
    108#define PCI_DEVICE_ID_INTEL_RPL_22_IMC		0xA71C
    109#define PCI_DEVICE_ID_INTEL_RPL_23_IMC		0xA728
    110#define PCI_DEVICE_ID_INTEL_RPL_24_IMC		0xA729
    111#define PCI_DEVICE_ID_INTEL_RPL_25_IMC		0xA72A
    112
    113
    114#define IMC_UNCORE_DEV(a)						\
    115{									\
    116	PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_##a##_IMC),	\
    117	.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),	\
    118}
    119
    120/* SNB event control */
    121#define SNB_UNC_CTL_EV_SEL_MASK			0x000000ff
    122#define SNB_UNC_CTL_UMASK_MASK			0x0000ff00
    123#define SNB_UNC_CTL_EDGE_DET			(1 << 18)
    124#define SNB_UNC_CTL_EN				(1 << 22)
    125#define SNB_UNC_CTL_INVERT			(1 << 23)
    126#define SNB_UNC_CTL_CMASK_MASK			0x1f000000
    127#define NHM_UNC_CTL_CMASK_MASK			0xff000000
    128#define NHM_UNC_FIXED_CTR_CTL_EN		(1 << 0)
    129
    130#define SNB_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
    131						 SNB_UNC_CTL_UMASK_MASK | \
    132						 SNB_UNC_CTL_EDGE_DET | \
    133						 SNB_UNC_CTL_INVERT | \
    134						 SNB_UNC_CTL_CMASK_MASK)
    135
    136#define NHM_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
    137						 SNB_UNC_CTL_UMASK_MASK | \
    138						 SNB_UNC_CTL_EDGE_DET | \
    139						 SNB_UNC_CTL_INVERT | \
    140						 NHM_UNC_CTL_CMASK_MASK)
    141
    142/* SNB global control register */
    143#define SNB_UNC_PERF_GLOBAL_CTL                 0x391
    144#define SNB_UNC_FIXED_CTR_CTRL                  0x394
    145#define SNB_UNC_FIXED_CTR                       0x395
    146
    147/* SNB uncore global control */
    148#define SNB_UNC_GLOBAL_CTL_CORE_ALL             ((1 << 4) - 1)
    149#define SNB_UNC_GLOBAL_CTL_EN                   (1 << 29)
    150
    151/* SNB Cbo register */
    152#define SNB_UNC_CBO_0_PERFEVTSEL0               0x700
    153#define SNB_UNC_CBO_0_PER_CTR0                  0x706
    154#define SNB_UNC_CBO_MSR_OFFSET                  0x10
    155
    156/* SNB ARB register */
    157#define SNB_UNC_ARB_PER_CTR0			0x3b0
    158#define SNB_UNC_ARB_PERFEVTSEL0			0x3b2
    159#define SNB_UNC_ARB_MSR_OFFSET			0x10
    160
    161/* NHM global control register */
    162#define NHM_UNC_PERF_GLOBAL_CTL                 0x391
    163#define NHM_UNC_FIXED_CTR                       0x394
    164#define NHM_UNC_FIXED_CTR_CTRL                  0x395
    165
    166/* NHM uncore global control */
    167#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL            ((1ULL << 8) - 1)
    168#define NHM_UNC_GLOBAL_CTL_EN_FC                (1ULL << 32)
    169
    170/* NHM uncore register */
    171#define NHM_UNC_PERFEVTSEL0                     0x3c0
    172#define NHM_UNC_UNCORE_PMC0                     0x3b0
    173
    174/* SKL uncore global control */
    175#define SKL_UNC_PERF_GLOBAL_CTL			0xe01
    176#define SKL_UNC_GLOBAL_CTL_CORE_ALL		((1 << 5) - 1)
    177
    178/* ICL Cbo register */
    179#define ICL_UNC_CBO_CONFIG			0x396
    180#define ICL_UNC_NUM_CBO_MASK			0xf
    181#define ICL_UNC_CBO_0_PER_CTR0			0x702
    182#define ICL_UNC_CBO_MSR_OFFSET			0x8
    183
    184/* ICL ARB register */
    185#define ICL_UNC_ARB_PER_CTR			0x3b1
    186#define ICL_UNC_ARB_PERFEVTSEL			0x3b3
    187
    188/* ADL uncore global control */
    189#define ADL_UNC_PERF_GLOBAL_CTL			0x2ff0
    190#define ADL_UNC_FIXED_CTR_CTRL                  0x2fde
    191#define ADL_UNC_FIXED_CTR                       0x2fdf
    192
    193/* ADL Cbo register */
    194#define ADL_UNC_CBO_0_PER_CTR0			0x2002
    195#define ADL_UNC_CBO_0_PERFEVTSEL0		0x2000
    196#define ADL_UNC_CTL_THRESHOLD			0x3f000000
    197#define ADL_UNC_RAW_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
    198						 SNB_UNC_CTL_UMASK_MASK | \
    199						 SNB_UNC_CTL_EDGE_DET | \
    200						 SNB_UNC_CTL_INVERT | \
    201						 ADL_UNC_CTL_THRESHOLD)
    202
    203/* ADL ARB register */
    204#define ADL_UNC_ARB_PER_CTR0			0x2FD2
    205#define ADL_UNC_ARB_PERFEVTSEL0			0x2FD0
    206#define ADL_UNC_ARB_MSR_OFFSET			0x8
    207
    208DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
    209DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
    210DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
    211DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
    212DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
    213DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
    214DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
    215DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
    216
    217/* Sandy Bridge uncore support */
    218static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
    219{
    220	struct hw_perf_event *hwc = &event->hw;
    221
    222	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
    223		wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
    224	else
    225		wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
    226}
    227
    228static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event)
    229{
    230	wrmsrl(event->hw.config_base, 0);
    231}
    232
    233static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
    234{
    235	if (box->pmu->pmu_idx == 0) {
    236		wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
    237			SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
    238	}
    239}
    240
    241static void snb_uncore_msr_enable_box(struct intel_uncore_box *box)
    242{
    243	wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
    244		SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
    245}
    246
    247static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
    248{
    249	if (box->pmu->pmu_idx == 0)
    250		wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
    251}
    252
    253static struct uncore_event_desc snb_uncore_events[] = {
    254	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
    255	{ /* end: all zeroes */ },
    256};
    257
    258static struct attribute *snb_uncore_formats_attr[] = {
    259	&format_attr_event.attr,
    260	&format_attr_umask.attr,
    261	&format_attr_edge.attr,
    262	&format_attr_inv.attr,
    263	&format_attr_cmask5.attr,
    264	NULL,
    265};
    266
    267static const struct attribute_group snb_uncore_format_group = {
    268	.name		= "format",
    269	.attrs		= snb_uncore_formats_attr,
    270};
    271
    272static struct intel_uncore_ops snb_uncore_msr_ops = {
    273	.init_box	= snb_uncore_msr_init_box,
    274	.enable_box	= snb_uncore_msr_enable_box,
    275	.exit_box	= snb_uncore_msr_exit_box,
    276	.disable_event	= snb_uncore_msr_disable_event,
    277	.enable_event	= snb_uncore_msr_enable_event,
    278	.read_counter	= uncore_msr_read_counter,
    279};
    280
    281static struct event_constraint snb_uncore_arb_constraints[] = {
    282	UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
    283	UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
    284	EVENT_CONSTRAINT_END
    285};
    286
    287static struct intel_uncore_type snb_uncore_cbox = {
    288	.name		= "cbox",
    289	.num_counters   = 2,
    290	.num_boxes	= 4,
    291	.perf_ctr_bits	= 44,
    292	.fixed_ctr_bits	= 48,
    293	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
    294	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
    295	.fixed_ctr	= SNB_UNC_FIXED_CTR,
    296	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
    297	.single_fixed	= 1,
    298	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
    299	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
    300	.ops		= &snb_uncore_msr_ops,
    301	.format_group	= &snb_uncore_format_group,
    302	.event_descs	= snb_uncore_events,
    303};
    304
    305static struct intel_uncore_type snb_uncore_arb = {
    306	.name		= "arb",
    307	.num_counters   = 2,
    308	.num_boxes	= 1,
    309	.perf_ctr_bits	= 44,
    310	.perf_ctr	= SNB_UNC_ARB_PER_CTR0,
    311	.event_ctl	= SNB_UNC_ARB_PERFEVTSEL0,
    312	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
    313	.msr_offset	= SNB_UNC_ARB_MSR_OFFSET,
    314	.constraints	= snb_uncore_arb_constraints,
    315	.ops		= &snb_uncore_msr_ops,
    316	.format_group	= &snb_uncore_format_group,
    317};
    318
    319static struct intel_uncore_type *snb_msr_uncores[] = {
    320	&snb_uncore_cbox,
    321	&snb_uncore_arb,
    322	NULL,
    323};
    324
    325void snb_uncore_cpu_init(void)
    326{
    327	uncore_msr_uncores = snb_msr_uncores;
    328	if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
    329		snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
    330}
    331
    332static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
    333{
    334	if (box->pmu->pmu_idx == 0) {
    335		wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
    336			SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
    337	}
    338
    339	/* The 8th CBOX has different MSR space */
    340	if (box->pmu->pmu_idx == 7)
    341		__set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
    342}
    343
    344static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
    345{
    346	wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
    347		SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
    348}
    349
    350static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
    351{
    352	if (box->pmu->pmu_idx == 0)
    353		wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
    354}
    355
    356static struct intel_uncore_ops skl_uncore_msr_ops = {
    357	.init_box	= skl_uncore_msr_init_box,
    358	.enable_box	= skl_uncore_msr_enable_box,
    359	.exit_box	= skl_uncore_msr_exit_box,
    360	.disable_event	= snb_uncore_msr_disable_event,
    361	.enable_event	= snb_uncore_msr_enable_event,
    362	.read_counter	= uncore_msr_read_counter,
    363};
    364
    365static struct intel_uncore_type skl_uncore_cbox = {
    366	.name		= "cbox",
    367	.num_counters   = 4,
    368	.num_boxes	= 8,
    369	.perf_ctr_bits	= 44,
    370	.fixed_ctr_bits	= 48,
    371	.perf_ctr	= SNB_UNC_CBO_0_PER_CTR0,
    372	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
    373	.fixed_ctr	= SNB_UNC_FIXED_CTR,
    374	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
    375	.single_fixed	= 1,
    376	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
    377	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
    378	.ops		= &skl_uncore_msr_ops,
    379	.format_group	= &snb_uncore_format_group,
    380	.event_descs	= snb_uncore_events,
    381};
    382
    383static struct intel_uncore_type *skl_msr_uncores[] = {
    384	&skl_uncore_cbox,
    385	&snb_uncore_arb,
    386	NULL,
    387};
    388
    389void skl_uncore_cpu_init(void)
    390{
    391	uncore_msr_uncores = skl_msr_uncores;
    392	if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
    393		skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
    394	snb_uncore_arb.ops = &skl_uncore_msr_ops;
    395}
    396
    397static struct intel_uncore_ops icl_uncore_msr_ops = {
    398	.disable_event	= snb_uncore_msr_disable_event,
    399	.enable_event	= snb_uncore_msr_enable_event,
    400	.read_counter	= uncore_msr_read_counter,
    401};
    402
    403static struct intel_uncore_type icl_uncore_cbox = {
    404	.name		= "cbox",
    405	.num_counters   = 2,
    406	.perf_ctr_bits	= 44,
    407	.perf_ctr	= ICL_UNC_CBO_0_PER_CTR0,
    408	.event_ctl	= SNB_UNC_CBO_0_PERFEVTSEL0,
    409	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
    410	.msr_offset	= ICL_UNC_CBO_MSR_OFFSET,
    411	.ops		= &icl_uncore_msr_ops,
    412	.format_group	= &snb_uncore_format_group,
    413};
    414
    415static struct uncore_event_desc icl_uncore_events[] = {
    416	INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff"),
    417	{ /* end: all zeroes */ },
    418};
    419
    420static struct attribute *icl_uncore_clock_formats_attr[] = {
    421	&format_attr_event.attr,
    422	NULL,
    423};
    424
    425static struct attribute_group icl_uncore_clock_format_group = {
    426	.name = "format",
    427	.attrs = icl_uncore_clock_formats_attr,
    428};
    429
    430static struct intel_uncore_type icl_uncore_clockbox = {
    431	.name		= "clock",
    432	.num_counters	= 1,
    433	.num_boxes	= 1,
    434	.fixed_ctr_bits	= 48,
    435	.fixed_ctr	= SNB_UNC_FIXED_CTR,
    436	.fixed_ctl	= SNB_UNC_FIXED_CTR_CTRL,
    437	.single_fixed	= 1,
    438	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
    439	.format_group	= &icl_uncore_clock_format_group,
    440	.ops		= &icl_uncore_msr_ops,
    441	.event_descs	= icl_uncore_events,
    442};
    443
    444static struct intel_uncore_type icl_uncore_arb = {
    445	.name		= "arb",
    446	.num_counters   = 1,
    447	.num_boxes	= 1,
    448	.perf_ctr_bits	= 44,
    449	.perf_ctr	= ICL_UNC_ARB_PER_CTR,
    450	.event_ctl	= ICL_UNC_ARB_PERFEVTSEL,
    451	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
    452	.ops		= &icl_uncore_msr_ops,
    453	.format_group	= &snb_uncore_format_group,
    454};
    455
    456static struct intel_uncore_type *icl_msr_uncores[] = {
    457	&icl_uncore_cbox,
    458	&icl_uncore_arb,
    459	&icl_uncore_clockbox,
    460	NULL,
    461};
    462
    463static int icl_get_cbox_num(void)
    464{
    465	u64 num_boxes;
    466
    467	rdmsrl(ICL_UNC_CBO_CONFIG, num_boxes);
    468
    469	return num_boxes & ICL_UNC_NUM_CBO_MASK;
    470}
    471
    472void icl_uncore_cpu_init(void)
    473{
    474	uncore_msr_uncores = icl_msr_uncores;
    475	icl_uncore_cbox.num_boxes = icl_get_cbox_num();
    476}
    477
    478static struct intel_uncore_type *tgl_msr_uncores[] = {
    479	&icl_uncore_cbox,
    480	&snb_uncore_arb,
    481	&icl_uncore_clockbox,
    482	NULL,
    483};
    484
    485static void rkl_uncore_msr_init_box(struct intel_uncore_box *box)
    486{
    487	if (box->pmu->pmu_idx == 0)
    488		wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
    489}
    490
    491void tgl_uncore_cpu_init(void)
    492{
    493	uncore_msr_uncores = tgl_msr_uncores;
    494	icl_uncore_cbox.num_boxes = icl_get_cbox_num();
    495	icl_uncore_cbox.ops = &skl_uncore_msr_ops;
    496	icl_uncore_clockbox.ops = &skl_uncore_msr_ops;
    497	snb_uncore_arb.ops = &skl_uncore_msr_ops;
    498	skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
    499}
    500
    501static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
    502{
    503	if (box->pmu->pmu_idx == 0)
    504		wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
    505}
    506
    507static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
    508{
    509	wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
    510}
    511
    512static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
    513{
    514	if (box->pmu->pmu_idx == 0)
    515		wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
    516}
    517
    518static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
    519{
    520	if (box->pmu->pmu_idx == 0)
    521		wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
    522}
    523
    524static struct intel_uncore_ops adl_uncore_msr_ops = {
    525	.init_box	= adl_uncore_msr_init_box,
    526	.enable_box	= adl_uncore_msr_enable_box,
    527	.disable_box	= adl_uncore_msr_disable_box,
    528	.exit_box	= adl_uncore_msr_exit_box,
    529	.disable_event	= snb_uncore_msr_disable_event,
    530	.enable_event	= snb_uncore_msr_enable_event,
    531	.read_counter	= uncore_msr_read_counter,
    532};
    533
    534static struct attribute *adl_uncore_formats_attr[] = {
    535	&format_attr_event.attr,
    536	&format_attr_umask.attr,
    537	&format_attr_edge.attr,
    538	&format_attr_inv.attr,
    539	&format_attr_threshold.attr,
    540	NULL,
    541};
    542
    543static const struct attribute_group adl_uncore_format_group = {
    544	.name		= "format",
    545	.attrs		= adl_uncore_formats_attr,
    546};
    547
    548static struct intel_uncore_type adl_uncore_cbox = {
    549	.name		= "cbox",
    550	.num_counters   = 2,
    551	.perf_ctr_bits	= 44,
    552	.perf_ctr	= ADL_UNC_CBO_0_PER_CTR0,
    553	.event_ctl	= ADL_UNC_CBO_0_PERFEVTSEL0,
    554	.event_mask	= ADL_UNC_RAW_EVENT_MASK,
    555	.msr_offset	= ICL_UNC_CBO_MSR_OFFSET,
    556	.ops		= &adl_uncore_msr_ops,
    557	.format_group	= &adl_uncore_format_group,
    558};
    559
    560static struct intel_uncore_type adl_uncore_arb = {
    561	.name		= "arb",
    562	.num_counters   = 2,
    563	.num_boxes	= 2,
    564	.perf_ctr_bits	= 44,
    565	.perf_ctr	= ADL_UNC_ARB_PER_CTR0,
    566	.event_ctl	= ADL_UNC_ARB_PERFEVTSEL0,
    567	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
    568	.msr_offset	= ADL_UNC_ARB_MSR_OFFSET,
    569	.constraints	= snb_uncore_arb_constraints,
    570	.ops		= &adl_uncore_msr_ops,
    571	.format_group	= &snb_uncore_format_group,
    572};
    573
    574static struct intel_uncore_type adl_uncore_clockbox = {
    575	.name		= "clock",
    576	.num_counters	= 1,
    577	.num_boxes	= 1,
    578	.fixed_ctr_bits	= 48,
    579	.fixed_ctr	= ADL_UNC_FIXED_CTR,
    580	.fixed_ctl	= ADL_UNC_FIXED_CTR_CTRL,
    581	.single_fixed	= 1,
    582	.event_mask	= SNB_UNC_CTL_EV_SEL_MASK,
    583	.format_group	= &icl_uncore_clock_format_group,
    584	.ops		= &adl_uncore_msr_ops,
    585	.event_descs	= icl_uncore_events,
    586};
    587
    588static struct intel_uncore_type *adl_msr_uncores[] = {
    589	&adl_uncore_cbox,
    590	&adl_uncore_arb,
    591	&adl_uncore_clockbox,
    592	NULL,
    593};
    594
    595void adl_uncore_cpu_init(void)
    596{
    597	adl_uncore_cbox.num_boxes = icl_get_cbox_num();
    598	uncore_msr_uncores = adl_msr_uncores;
    599}
    600
    601enum {
    602	SNB_PCI_UNCORE_IMC,
    603};
    604
    605static struct uncore_event_desc snb_uncore_imc_events[] = {
    606	INTEL_UNCORE_EVENT_DESC(data_reads,  "event=0x01"),
    607	INTEL_UNCORE_EVENT_DESC(data_reads.scale, "6.103515625e-5"),
    608	INTEL_UNCORE_EVENT_DESC(data_reads.unit, "MiB"),
    609
    610	INTEL_UNCORE_EVENT_DESC(data_writes, "event=0x02"),
    611	INTEL_UNCORE_EVENT_DESC(data_writes.scale, "6.103515625e-5"),
    612	INTEL_UNCORE_EVENT_DESC(data_writes.unit, "MiB"),
    613
    614	INTEL_UNCORE_EVENT_DESC(gt_requests, "event=0x03"),
    615	INTEL_UNCORE_EVENT_DESC(gt_requests.scale, "6.103515625e-5"),
    616	INTEL_UNCORE_EVENT_DESC(gt_requests.unit, "MiB"),
    617
    618	INTEL_UNCORE_EVENT_DESC(ia_requests, "event=0x04"),
    619	INTEL_UNCORE_EVENT_DESC(ia_requests.scale, "6.103515625e-5"),
    620	INTEL_UNCORE_EVENT_DESC(ia_requests.unit, "MiB"),
    621
    622	INTEL_UNCORE_EVENT_DESC(io_requests, "event=0x05"),
    623	INTEL_UNCORE_EVENT_DESC(io_requests.scale, "6.103515625e-5"),
    624	INTEL_UNCORE_EVENT_DESC(io_requests.unit, "MiB"),
    625
    626	{ /* end: all zeroes */ },
    627};
    628
    629#define SNB_UNCORE_PCI_IMC_EVENT_MASK		0xff
    630#define SNB_UNCORE_PCI_IMC_BAR_OFFSET		0x48
    631
    632/* page size multiple covering all config regs */
    633#define SNB_UNCORE_PCI_IMC_MAP_SIZE		0x6000
    634
    635#define SNB_UNCORE_PCI_IMC_DATA_READS		0x1
    636#define SNB_UNCORE_PCI_IMC_DATA_READS_BASE	0x5050
    637#define SNB_UNCORE_PCI_IMC_DATA_WRITES		0x2
    638#define SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE	0x5054
    639#define SNB_UNCORE_PCI_IMC_CTR_BASE		SNB_UNCORE_PCI_IMC_DATA_READS_BASE
    640
    641/* BW break down- legacy counters */
    642#define SNB_UNCORE_PCI_IMC_GT_REQUESTS		0x3
    643#define SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE	0x5040
    644#define SNB_UNCORE_PCI_IMC_IA_REQUESTS		0x4
    645#define SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE	0x5044
    646#define SNB_UNCORE_PCI_IMC_IO_REQUESTS		0x5
    647#define SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE	0x5048
    648
    649enum perf_snb_uncore_imc_freerunning_types {
    650	SNB_PCI_UNCORE_IMC_DATA_READS		= 0,
    651	SNB_PCI_UNCORE_IMC_DATA_WRITES,
    652	SNB_PCI_UNCORE_IMC_GT_REQUESTS,
    653	SNB_PCI_UNCORE_IMC_IA_REQUESTS,
    654	SNB_PCI_UNCORE_IMC_IO_REQUESTS,
    655
    656	SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
    657};
    658
    659static struct freerunning_counters snb_uncore_imc_freerunning[] = {
    660	[SNB_PCI_UNCORE_IMC_DATA_READS]		= { SNB_UNCORE_PCI_IMC_DATA_READS_BASE,
    661							0x0, 0x0, 1, 32 },
    662	[SNB_PCI_UNCORE_IMC_DATA_WRITES]	= { SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE,
    663							0x0, 0x0, 1, 32 },
    664	[SNB_PCI_UNCORE_IMC_GT_REQUESTS]	= { SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE,
    665							0x0, 0x0, 1, 32 },
    666	[SNB_PCI_UNCORE_IMC_IA_REQUESTS]	= { SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE,
    667							0x0, 0x0, 1, 32 },
    668	[SNB_PCI_UNCORE_IMC_IO_REQUESTS]	= { SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE,
    669							0x0, 0x0, 1, 32 },
    670};
    671
    672static struct attribute *snb_uncore_imc_formats_attr[] = {
    673	&format_attr_event.attr,
    674	NULL,
    675};
    676
    677static const struct attribute_group snb_uncore_imc_format_group = {
    678	.name = "format",
    679	.attrs = snb_uncore_imc_formats_attr,
    680};
    681
    682static void snb_uncore_imc_init_box(struct intel_uncore_box *box)
    683{
    684	struct intel_uncore_type *type = box->pmu->type;
    685	struct pci_dev *pdev = box->pci_dev;
    686	int where = SNB_UNCORE_PCI_IMC_BAR_OFFSET;
    687	resource_size_t addr;
    688	u32 pci_dword;
    689
    690	pci_read_config_dword(pdev, where, &pci_dword);
    691	addr = pci_dword;
    692
    693#ifdef CONFIG_PHYS_ADDR_T_64BIT
    694	pci_read_config_dword(pdev, where + 4, &pci_dword);
    695	addr |= ((resource_size_t)pci_dword << 32);
    696#endif
    697
    698	addr &= ~(PAGE_SIZE - 1);
    699
    700	box->io_addr = ioremap(addr, type->mmio_map_size);
    701	if (!box->io_addr)
    702		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
    703
    704	box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
    705}
    706
    707static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
    708{}
    709
    710static void snb_uncore_imc_disable_box(struct intel_uncore_box *box)
    711{}
    712
    713static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event)
    714{}
    715
    716static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event)
    717{}
    718
    719/*
    720 * Keep the custom event_init() function compatible with old event
    721 * encoding for free running counters.
    722 */
    723static int snb_uncore_imc_event_init(struct perf_event *event)
    724{
    725	struct intel_uncore_pmu *pmu;
    726	struct intel_uncore_box *box;
    727	struct hw_perf_event *hwc = &event->hw;
    728	u64 cfg = event->attr.config & SNB_UNCORE_PCI_IMC_EVENT_MASK;
    729	int idx, base;
    730
    731	if (event->attr.type != event->pmu->type)
    732		return -ENOENT;
    733
    734	pmu = uncore_event_to_pmu(event);
    735	/* no device found for this pmu */
    736	if (pmu->func_id < 0)
    737		return -ENOENT;
    738
    739	/* Sampling not supported yet */
    740	if (hwc->sample_period)
    741		return -EINVAL;
    742
    743	/* unsupported modes and filters */
    744	if (event->attr.sample_period) /* no sampling */
    745		return -EINVAL;
    746
    747	/*
    748	 * Place all uncore events for a particular physical package
    749	 * onto a single cpu
    750	 */
    751	if (event->cpu < 0)
    752		return -EINVAL;
    753
    754	/* check only supported bits are set */
    755	if (event->attr.config & ~SNB_UNCORE_PCI_IMC_EVENT_MASK)
    756		return -EINVAL;
    757
    758	box = uncore_pmu_to_box(pmu, event->cpu);
    759	if (!box || box->cpu < 0)
    760		return -EINVAL;
    761
    762	event->cpu = box->cpu;
    763	event->pmu_private = box;
    764
    765	event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
    766
    767	event->hw.idx = -1;
    768	event->hw.last_tag = ~0ULL;
    769	event->hw.extra_reg.idx = EXTRA_REG_NONE;
    770	event->hw.branch_reg.idx = EXTRA_REG_NONE;
    771	/*
    772	 * check event is known (whitelist, determines counter)
    773	 */
    774	switch (cfg) {
    775	case SNB_UNCORE_PCI_IMC_DATA_READS:
    776		base = SNB_UNCORE_PCI_IMC_DATA_READS_BASE;
    777		idx = UNCORE_PMC_IDX_FREERUNNING;
    778		break;
    779	case SNB_UNCORE_PCI_IMC_DATA_WRITES:
    780		base = SNB_UNCORE_PCI_IMC_DATA_WRITES_BASE;
    781		idx = UNCORE_PMC_IDX_FREERUNNING;
    782		break;
    783	case SNB_UNCORE_PCI_IMC_GT_REQUESTS:
    784		base = SNB_UNCORE_PCI_IMC_GT_REQUESTS_BASE;
    785		idx = UNCORE_PMC_IDX_FREERUNNING;
    786		break;
    787	case SNB_UNCORE_PCI_IMC_IA_REQUESTS:
    788		base = SNB_UNCORE_PCI_IMC_IA_REQUESTS_BASE;
    789		idx = UNCORE_PMC_IDX_FREERUNNING;
    790		break;
    791	case SNB_UNCORE_PCI_IMC_IO_REQUESTS:
    792		base = SNB_UNCORE_PCI_IMC_IO_REQUESTS_BASE;
    793		idx = UNCORE_PMC_IDX_FREERUNNING;
    794		break;
    795	default:
    796		return -EINVAL;
    797	}
    798
    799	/* must be done before validate_group */
    800	event->hw.event_base = base;
    801	event->hw.idx = idx;
    802
    803	/* Convert to standard encoding format for freerunning counters */
    804	event->hw.config = ((cfg - 1) << 8) | 0x10ff;
    805
    806	/* no group validation needed, we have free running counters */
    807
    808	return 0;
    809}
    810
    811static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event)
    812{
    813	return 0;
    814}
    815
    816int snb_pci2phy_map_init(int devid)
    817{
    818	struct pci_dev *dev = NULL;
    819	struct pci2phy_map *map;
    820	int bus, segment;
    821
    822	dev = pci_get_device(PCI_VENDOR_ID_INTEL, devid, dev);
    823	if (!dev)
    824		return -ENOTTY;
    825
    826	bus = dev->bus->number;
    827	segment = pci_domain_nr(dev->bus);
    828
    829	raw_spin_lock(&pci2phy_map_lock);
    830	map = __find_pci2phy_map(segment);
    831	if (!map) {
    832		raw_spin_unlock(&pci2phy_map_lock);
    833		pci_dev_put(dev);
    834		return -ENOMEM;
    835	}
    836	map->pbus_to_dieid[bus] = 0;
    837	raw_spin_unlock(&pci2phy_map_lock);
    838
    839	pci_dev_put(dev);
    840
    841	return 0;
    842}
    843
    844static struct pmu snb_uncore_imc_pmu = {
    845	.task_ctx_nr	= perf_invalid_context,
    846	.event_init	= snb_uncore_imc_event_init,
    847	.add		= uncore_pmu_event_add,
    848	.del		= uncore_pmu_event_del,
    849	.start		= uncore_pmu_event_start,
    850	.stop		= uncore_pmu_event_stop,
    851	.read		= uncore_pmu_event_read,
    852	.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
    853};
    854
    855static struct intel_uncore_ops snb_uncore_imc_ops = {
    856	.init_box	= snb_uncore_imc_init_box,
    857	.exit_box	= uncore_mmio_exit_box,
    858	.enable_box	= snb_uncore_imc_enable_box,
    859	.disable_box	= snb_uncore_imc_disable_box,
    860	.disable_event	= snb_uncore_imc_disable_event,
    861	.enable_event	= snb_uncore_imc_enable_event,
    862	.hw_config	= snb_uncore_imc_hw_config,
    863	.read_counter	= uncore_mmio_read_counter,
    864};
    865
    866static struct intel_uncore_type snb_uncore_imc = {
    867	.name		= "imc",
    868	.num_counters   = 5,
    869	.num_boxes	= 1,
    870	.num_freerunning_types	= SNB_PCI_UNCORE_IMC_FREERUNNING_TYPE_MAX,
    871	.mmio_map_size	= SNB_UNCORE_PCI_IMC_MAP_SIZE,
    872	.freerunning	= snb_uncore_imc_freerunning,
    873	.event_descs	= snb_uncore_imc_events,
    874	.format_group	= &snb_uncore_imc_format_group,
    875	.ops		= &snb_uncore_imc_ops,
    876	.pmu		= &snb_uncore_imc_pmu,
    877};
    878
    879static struct intel_uncore_type *snb_pci_uncores[] = {
    880	[SNB_PCI_UNCORE_IMC]	= &snb_uncore_imc,
    881	NULL,
    882};
    883
    884static const struct pci_device_id snb_uncore_pci_ids[] = {
    885	IMC_UNCORE_DEV(SNB),
    886	{ /* end: all zeroes */ },
    887};
    888
    889static const struct pci_device_id ivb_uncore_pci_ids[] = {
    890	IMC_UNCORE_DEV(IVB),
    891	IMC_UNCORE_DEV(IVB_E3),
    892	{ /* end: all zeroes */ },
    893};
    894
    895static const struct pci_device_id hsw_uncore_pci_ids[] = {
    896	IMC_UNCORE_DEV(HSW),
    897	IMC_UNCORE_DEV(HSW_U),
    898	{ /* end: all zeroes */ },
    899};
    900
    901static const struct pci_device_id bdw_uncore_pci_ids[] = {
    902	IMC_UNCORE_DEV(BDW),
    903	{ /* end: all zeroes */ },
    904};
    905
    906static const struct pci_device_id skl_uncore_pci_ids[] = {
    907	IMC_UNCORE_DEV(SKL_Y),
    908	IMC_UNCORE_DEV(SKL_U),
    909	IMC_UNCORE_DEV(SKL_HD),
    910	IMC_UNCORE_DEV(SKL_HQ),
    911	IMC_UNCORE_DEV(SKL_SD),
    912	IMC_UNCORE_DEV(SKL_SQ),
    913	IMC_UNCORE_DEV(SKL_E3),
    914	IMC_UNCORE_DEV(KBL_Y),
    915	IMC_UNCORE_DEV(KBL_U),
    916	IMC_UNCORE_DEV(KBL_UQ),
    917	IMC_UNCORE_DEV(KBL_SD),
    918	IMC_UNCORE_DEV(KBL_SQ),
    919	IMC_UNCORE_DEV(KBL_HQ),
    920	IMC_UNCORE_DEV(KBL_WQ),
    921	IMC_UNCORE_DEV(CFL_2U),
    922	IMC_UNCORE_DEV(CFL_4U),
    923	IMC_UNCORE_DEV(CFL_4H),
    924	IMC_UNCORE_DEV(CFL_6H),
    925	IMC_UNCORE_DEV(CFL_2S_D),
    926	IMC_UNCORE_DEV(CFL_4S_D),
    927	IMC_UNCORE_DEV(CFL_6S_D),
    928	IMC_UNCORE_DEV(CFL_8S_D),
    929	IMC_UNCORE_DEV(CFL_4S_W),
    930	IMC_UNCORE_DEV(CFL_6S_W),
    931	IMC_UNCORE_DEV(CFL_8S_W),
    932	IMC_UNCORE_DEV(CFL_4S_S),
    933	IMC_UNCORE_DEV(CFL_6S_S),
    934	IMC_UNCORE_DEV(CFL_8S_S),
    935	IMC_UNCORE_DEV(AML_YD),
    936	IMC_UNCORE_DEV(AML_YQ),
    937	IMC_UNCORE_DEV(WHL_UQ),
    938	IMC_UNCORE_DEV(WHL_4_UQ),
    939	IMC_UNCORE_DEV(WHL_UD),
    940	IMC_UNCORE_DEV(CML_H1),
    941	IMC_UNCORE_DEV(CML_H2),
    942	IMC_UNCORE_DEV(CML_H3),
    943	IMC_UNCORE_DEV(CML_U1),
    944	IMC_UNCORE_DEV(CML_U2),
    945	IMC_UNCORE_DEV(CML_U3),
    946	IMC_UNCORE_DEV(CML_S1),
    947	IMC_UNCORE_DEV(CML_S2),
    948	IMC_UNCORE_DEV(CML_S3),
    949	IMC_UNCORE_DEV(CML_S4),
    950	IMC_UNCORE_DEV(CML_S5),
    951	{ /* end: all zeroes */ },
    952};
    953
    954static const struct pci_device_id icl_uncore_pci_ids[] = {
    955	IMC_UNCORE_DEV(ICL_U),
    956	IMC_UNCORE_DEV(ICL_U2),
    957	IMC_UNCORE_DEV(RKL_1),
    958	IMC_UNCORE_DEV(RKL_2),
    959	{ /* end: all zeroes */ },
    960};
    961
    962static struct pci_driver snb_uncore_pci_driver = {
    963	.name		= "snb_uncore",
    964	.id_table	= snb_uncore_pci_ids,
    965};
    966
    967static struct pci_driver ivb_uncore_pci_driver = {
    968	.name		= "ivb_uncore",
    969	.id_table	= ivb_uncore_pci_ids,
    970};
    971
    972static struct pci_driver hsw_uncore_pci_driver = {
    973	.name		= "hsw_uncore",
    974	.id_table	= hsw_uncore_pci_ids,
    975};
    976
    977static struct pci_driver bdw_uncore_pci_driver = {
    978	.name		= "bdw_uncore",
    979	.id_table	= bdw_uncore_pci_ids,
    980};
    981
    982static struct pci_driver skl_uncore_pci_driver = {
    983	.name		= "skl_uncore",
    984	.id_table	= skl_uncore_pci_ids,
    985};
    986
    987static struct pci_driver icl_uncore_pci_driver = {
    988	.name		= "icl_uncore",
    989	.id_table	= icl_uncore_pci_ids,
    990};
    991
    992struct imc_uncore_pci_dev {
    993	__u32 pci_id;
    994	struct pci_driver *driver;
    995};
    996#define IMC_DEV(a, d) \
    997	{ .pci_id = PCI_DEVICE_ID_INTEL_##a, .driver = (d) }
    998
    999static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
   1000	IMC_DEV(SNB_IMC, &snb_uncore_pci_driver),
   1001	IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver),    /* 3rd Gen Core processor */
   1002	IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
   1003	IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
   1004	IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
   1005	IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
   1006	IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core Y */
   1007	IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
   1008	IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Dual Core */
   1009	IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
   1010	IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
   1011	IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
   1012	IMC_DEV(SKL_E3_IMC, &skl_uncore_pci_driver),  /* Xeon E3 V5 Gen Core processor */
   1013	IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
   1014	IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
   1015	IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
   1016	IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
   1017	IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
   1018	IMC_DEV(KBL_HQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core H Quad Core */
   1019	IMC_DEV(KBL_WQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S 4 cores Work Station */
   1020	IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
   1021	IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
   1022	IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
   1023	IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
   1024	IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
   1025	IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
   1026	IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
   1027	IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
   1028	IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
   1029	IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
   1030	IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
   1031	IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
   1032	IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
   1033	IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
   1034	IMC_DEV(AML_YD_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core Y Mobile Dual Core */
   1035	IMC_DEV(AML_YQ_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core Y Mobile Quad Core */
   1036	IMC_DEV(WHL_UQ_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core U Mobile Quad Core */
   1037	IMC_DEV(WHL_4_UQ_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core U Mobile Quad Core */
   1038	IMC_DEV(WHL_UD_IMC, &skl_uncore_pci_driver),	/* 8th Gen Core U Mobile Dual Core */
   1039	IMC_DEV(CML_H1_IMC, &skl_uncore_pci_driver),
   1040	IMC_DEV(CML_H2_IMC, &skl_uncore_pci_driver),
   1041	IMC_DEV(CML_H3_IMC, &skl_uncore_pci_driver),
   1042	IMC_DEV(CML_U1_IMC, &skl_uncore_pci_driver),
   1043	IMC_DEV(CML_U2_IMC, &skl_uncore_pci_driver),
   1044	IMC_DEV(CML_U3_IMC, &skl_uncore_pci_driver),
   1045	IMC_DEV(CML_S1_IMC, &skl_uncore_pci_driver),
   1046	IMC_DEV(CML_S2_IMC, &skl_uncore_pci_driver),
   1047	IMC_DEV(CML_S3_IMC, &skl_uncore_pci_driver),
   1048	IMC_DEV(CML_S4_IMC, &skl_uncore_pci_driver),
   1049	IMC_DEV(CML_S5_IMC, &skl_uncore_pci_driver),
   1050	IMC_DEV(ICL_U_IMC, &icl_uncore_pci_driver),	/* 10th Gen Core Mobile */
   1051	IMC_DEV(ICL_U2_IMC, &icl_uncore_pci_driver),	/* 10th Gen Core Mobile */
   1052	IMC_DEV(RKL_1_IMC, &icl_uncore_pci_driver),
   1053	IMC_DEV(RKL_2_IMC, &icl_uncore_pci_driver),
   1054	{  /* end marker */ }
   1055};
   1056
   1057
   1058#define for_each_imc_pci_id(x, t) \
   1059	for (x = (t); (x)->pci_id; x++)
   1060
   1061static struct pci_driver *imc_uncore_find_dev(void)
   1062{
   1063	const struct imc_uncore_pci_dev *p;
   1064	int ret;
   1065
   1066	for_each_imc_pci_id(p, desktop_imc_pci_ids) {
   1067		ret = snb_pci2phy_map_init(p->pci_id);
   1068		if (ret == 0)
   1069			return p->driver;
   1070	}
   1071	return NULL;
   1072}
   1073
   1074static int imc_uncore_pci_init(void)
   1075{
   1076	struct pci_driver *imc_drv = imc_uncore_find_dev();
   1077
   1078	if (!imc_drv)
   1079		return -ENODEV;
   1080
   1081	uncore_pci_uncores = snb_pci_uncores;
   1082	uncore_pci_driver = imc_drv;
   1083
   1084	return 0;
   1085}
   1086
   1087int snb_uncore_pci_init(void)
   1088{
   1089	return imc_uncore_pci_init();
   1090}
   1091
   1092int ivb_uncore_pci_init(void)
   1093{
   1094	return imc_uncore_pci_init();
   1095}
   1096int hsw_uncore_pci_init(void)
   1097{
   1098	return imc_uncore_pci_init();
   1099}
   1100
   1101int bdw_uncore_pci_init(void)
   1102{
   1103	return imc_uncore_pci_init();
   1104}
   1105
   1106int skl_uncore_pci_init(void)
   1107{
   1108	return imc_uncore_pci_init();
   1109}
   1110
   1111/* end of Sandy Bridge uncore support */
   1112
   1113/* Nehalem uncore support */
   1114static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
   1115{
   1116	wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
   1117}
   1118
   1119static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
   1120{
   1121	wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
   1122}
   1123
   1124static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
   1125{
   1126	struct hw_perf_event *hwc = &event->hw;
   1127
   1128	if (hwc->idx < UNCORE_PMC_IDX_FIXED)
   1129		wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
   1130	else
   1131		wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
   1132}
   1133
   1134static struct attribute *nhm_uncore_formats_attr[] = {
   1135	&format_attr_event.attr,
   1136	&format_attr_umask.attr,
   1137	&format_attr_edge.attr,
   1138	&format_attr_inv.attr,
   1139	&format_attr_cmask8.attr,
   1140	NULL,
   1141};
   1142
   1143static const struct attribute_group nhm_uncore_format_group = {
   1144	.name = "format",
   1145	.attrs = nhm_uncore_formats_attr,
   1146};
   1147
   1148static struct uncore_event_desc nhm_uncore_events[] = {
   1149	INTEL_UNCORE_EVENT_DESC(clockticks,                "event=0xff,umask=0x00"),
   1150	INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any,       "event=0x2f,umask=0x0f"),
   1151	INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any,      "event=0x2c,umask=0x0f"),
   1152	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads,     "event=0x20,umask=0x01"),
   1153	INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes,    "event=0x20,umask=0x02"),
   1154	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads,  "event=0x20,umask=0x04"),
   1155	INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
   1156	INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads,   "event=0x20,umask=0x10"),
   1157	INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes,  "event=0x20,umask=0x20"),
   1158	{ /* end: all zeroes */ },
   1159};
   1160
   1161static struct intel_uncore_ops nhm_uncore_msr_ops = {
   1162	.disable_box	= nhm_uncore_msr_disable_box,
   1163	.enable_box	= nhm_uncore_msr_enable_box,
   1164	.disable_event	= snb_uncore_msr_disable_event,
   1165	.enable_event	= nhm_uncore_msr_enable_event,
   1166	.read_counter	= uncore_msr_read_counter,
   1167};
   1168
   1169static struct intel_uncore_type nhm_uncore = {
   1170	.name		= "",
   1171	.num_counters   = 8,
   1172	.num_boxes	= 1,
   1173	.perf_ctr_bits	= 48,
   1174	.fixed_ctr_bits	= 48,
   1175	.event_ctl	= NHM_UNC_PERFEVTSEL0,
   1176	.perf_ctr	= NHM_UNC_UNCORE_PMC0,
   1177	.fixed_ctr	= NHM_UNC_FIXED_CTR,
   1178	.fixed_ctl	= NHM_UNC_FIXED_CTR_CTRL,
   1179	.event_mask	= NHM_UNC_RAW_EVENT_MASK,
   1180	.event_descs	= nhm_uncore_events,
   1181	.ops		= &nhm_uncore_msr_ops,
   1182	.format_group	= &nhm_uncore_format_group,
   1183};
   1184
   1185static struct intel_uncore_type *nhm_msr_uncores[] = {
   1186	&nhm_uncore,
   1187	NULL,
   1188};
   1189
   1190void nhm_uncore_cpu_init(void)
   1191{
   1192	uncore_msr_uncores = nhm_msr_uncores;
   1193}
   1194
   1195/* end of Nehalem uncore support */
   1196
   1197/* Tiger Lake MMIO uncore support */
   1198
   1199static const struct pci_device_id tgl_uncore_pci_ids[] = {
   1200	IMC_UNCORE_DEV(TGL_U1),
   1201	IMC_UNCORE_DEV(TGL_U2),
   1202	IMC_UNCORE_DEV(TGL_U3),
   1203	IMC_UNCORE_DEV(TGL_U4),
   1204	IMC_UNCORE_DEV(TGL_H),
   1205	IMC_UNCORE_DEV(ADL_1),
   1206	IMC_UNCORE_DEV(ADL_2),
   1207	IMC_UNCORE_DEV(ADL_3),
   1208	IMC_UNCORE_DEV(ADL_4),
   1209	IMC_UNCORE_DEV(ADL_5),
   1210	IMC_UNCORE_DEV(ADL_6),
   1211	IMC_UNCORE_DEV(ADL_7),
   1212	IMC_UNCORE_DEV(ADL_8),
   1213	IMC_UNCORE_DEV(ADL_9),
   1214	IMC_UNCORE_DEV(ADL_10),
   1215	IMC_UNCORE_DEV(ADL_11),
   1216	IMC_UNCORE_DEV(ADL_12),
   1217	IMC_UNCORE_DEV(ADL_13),
   1218	IMC_UNCORE_DEV(ADL_14),
   1219	IMC_UNCORE_DEV(ADL_15),
   1220	IMC_UNCORE_DEV(ADL_16),
   1221	IMC_UNCORE_DEV(ADL_17),
   1222	IMC_UNCORE_DEV(ADL_18),
   1223	IMC_UNCORE_DEV(ADL_19),
   1224	IMC_UNCORE_DEV(ADL_20),
   1225	IMC_UNCORE_DEV(ADL_21),
   1226	IMC_UNCORE_DEV(RPL_1),
   1227	IMC_UNCORE_DEV(RPL_2),
   1228	IMC_UNCORE_DEV(RPL_3),
   1229	IMC_UNCORE_DEV(RPL_4),
   1230	IMC_UNCORE_DEV(RPL_5),
   1231	IMC_UNCORE_DEV(RPL_6),
   1232	IMC_UNCORE_DEV(RPL_7),
   1233	IMC_UNCORE_DEV(RPL_8),
   1234	IMC_UNCORE_DEV(RPL_9),
   1235	IMC_UNCORE_DEV(RPL_10),
   1236	IMC_UNCORE_DEV(RPL_11),
   1237	IMC_UNCORE_DEV(RPL_12),
   1238	IMC_UNCORE_DEV(RPL_13),
   1239	IMC_UNCORE_DEV(RPL_14),
   1240	IMC_UNCORE_DEV(RPL_15),
   1241	IMC_UNCORE_DEV(RPL_16),
   1242	IMC_UNCORE_DEV(RPL_17),
   1243	IMC_UNCORE_DEV(RPL_18),
   1244	IMC_UNCORE_DEV(RPL_19),
   1245	IMC_UNCORE_DEV(RPL_20),
   1246	IMC_UNCORE_DEV(RPL_21),
   1247	IMC_UNCORE_DEV(RPL_22),
   1248	IMC_UNCORE_DEV(RPL_23),
   1249	IMC_UNCORE_DEV(RPL_24),
   1250	IMC_UNCORE_DEV(RPL_25),
   1251	{ /* end: all zeroes */ }
   1252};
   1253
   1254enum perf_tgl_uncore_imc_freerunning_types {
   1255	TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
   1256	TGL_MMIO_UNCORE_IMC_DATA_READ,
   1257	TGL_MMIO_UNCORE_IMC_DATA_WRITE,
   1258	TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
   1259};
   1260
   1261static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
   1262	[TGL_MMIO_UNCORE_IMC_DATA_TOTAL]	= { 0x5040, 0x0, 0x0, 1, 64 },
   1263	[TGL_MMIO_UNCORE_IMC_DATA_READ]		= { 0x5058, 0x0, 0x0, 1, 64 },
   1264	[TGL_MMIO_UNCORE_IMC_DATA_WRITE]	= { 0x50A0, 0x0, 0x0, 1, 64 },
   1265};
   1266
   1267static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
   1268	[TGL_MMIO_UNCORE_IMC_DATA_TOTAL]	= { 0xd840, 0x0, 0x0, 1, 64 },
   1269	[TGL_MMIO_UNCORE_IMC_DATA_READ]		= { 0xd858, 0x0, 0x0, 1, 64 },
   1270	[TGL_MMIO_UNCORE_IMC_DATA_WRITE]	= { 0xd8A0, 0x0, 0x0, 1, 64 },
   1271};
   1272
   1273static struct uncore_event_desc tgl_uncore_imc_events[] = {
   1274	INTEL_UNCORE_EVENT_DESC(data_total,         "event=0xff,umask=0x10"),
   1275	INTEL_UNCORE_EVENT_DESC(data_total.scale,   "6.103515625e-5"),
   1276	INTEL_UNCORE_EVENT_DESC(data_total.unit,    "MiB"),
   1277
   1278	INTEL_UNCORE_EVENT_DESC(data_read,         "event=0xff,umask=0x20"),
   1279	INTEL_UNCORE_EVENT_DESC(data_read.scale,   "6.103515625e-5"),
   1280	INTEL_UNCORE_EVENT_DESC(data_read.unit,    "MiB"),
   1281
   1282	INTEL_UNCORE_EVENT_DESC(data_write,        "event=0xff,umask=0x30"),
   1283	INTEL_UNCORE_EVENT_DESC(data_write.scale,  "6.103515625e-5"),
   1284	INTEL_UNCORE_EVENT_DESC(data_write.unit,   "MiB"),
   1285
   1286	{ /* end: all zeroes */ }
   1287};
   1288
   1289static struct pci_dev *tgl_uncore_get_mc_dev(void)
   1290{
   1291	const struct pci_device_id *ids = tgl_uncore_pci_ids;
   1292	struct pci_dev *mc_dev = NULL;
   1293
   1294	while (ids && ids->vendor) {
   1295		mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
   1296		if (mc_dev)
   1297			return mc_dev;
   1298		ids++;
   1299	}
   1300
   1301	return mc_dev;
   1302}
   1303
   1304#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET		0x10000
   1305#define TGL_UNCORE_PCI_IMC_MAP_SIZE		0xe000
   1306
   1307static void __uncore_imc_init_box(struct intel_uncore_box *box,
   1308				  unsigned int base_offset)
   1309{
   1310	struct pci_dev *pdev = tgl_uncore_get_mc_dev();
   1311	struct intel_uncore_pmu *pmu = box->pmu;
   1312	struct intel_uncore_type *type = pmu->type;
   1313	resource_size_t addr;
   1314	u32 mch_bar;
   1315
   1316	if (!pdev) {
   1317		pr_warn("perf uncore: Cannot find matched IMC device.\n");
   1318		return;
   1319	}
   1320
   1321	pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
   1322	/* MCHBAR is disabled */
   1323	if (!(mch_bar & BIT(0))) {
   1324		pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
   1325		return;
   1326	}
   1327	mch_bar &= ~BIT(0);
   1328	addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
   1329
   1330#ifdef CONFIG_PHYS_ADDR_T_64BIT
   1331	pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
   1332	addr |= ((resource_size_t)mch_bar << 32);
   1333#endif
   1334
   1335	addr += base_offset;
   1336	box->io_addr = ioremap(addr, type->mmio_map_size);
   1337	if (!box->io_addr)
   1338		pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
   1339}
   1340
   1341static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
   1342{
   1343	__uncore_imc_init_box(box, 0);
   1344}
   1345
   1346static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
   1347	.init_box	= tgl_uncore_imc_freerunning_init_box,
   1348	.exit_box	= uncore_mmio_exit_box,
   1349	.read_counter	= uncore_mmio_read_counter,
   1350	.hw_config	= uncore_freerunning_hw_config,
   1351};
   1352
   1353static struct attribute *tgl_uncore_imc_formats_attr[] = {
   1354	&format_attr_event.attr,
   1355	&format_attr_umask.attr,
   1356	NULL
   1357};
   1358
   1359static const struct attribute_group tgl_uncore_imc_format_group = {
   1360	.name = "format",
   1361	.attrs = tgl_uncore_imc_formats_attr,
   1362};
   1363
   1364static struct intel_uncore_type tgl_uncore_imc_free_running = {
   1365	.name			= "imc_free_running",
   1366	.num_counters		= 3,
   1367	.num_boxes		= 2,
   1368	.num_freerunning_types	= TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
   1369	.mmio_map_size		= TGL_UNCORE_PCI_IMC_MAP_SIZE,
   1370	.freerunning		= tgl_uncore_imc_freerunning,
   1371	.ops			= &tgl_uncore_imc_freerunning_ops,
   1372	.event_descs		= tgl_uncore_imc_events,
   1373	.format_group		= &tgl_uncore_imc_format_group,
   1374};
   1375
   1376static struct intel_uncore_type *tgl_mmio_uncores[] = {
   1377	&tgl_uncore_imc_free_running,
   1378	NULL
   1379};
   1380
   1381void tgl_l_uncore_mmio_init(void)
   1382{
   1383	tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
   1384	uncore_mmio_uncores = tgl_mmio_uncores;
   1385}
   1386
   1387void tgl_uncore_mmio_init(void)
   1388{
   1389	uncore_mmio_uncores = tgl_mmio_uncores;
   1390}
   1391
   1392/* end of Tiger Lake MMIO uncore support */
   1393
   1394/* Alder Lake MMIO uncore support */
   1395#define ADL_UNCORE_IMC_BASE			0xd900
   1396#define ADL_UNCORE_IMC_MAP_SIZE			0x200
   1397#define ADL_UNCORE_IMC_CTR			0xe8
   1398#define ADL_UNCORE_IMC_CTRL			0xd0
   1399#define ADL_UNCORE_IMC_GLOBAL_CTL		0xc0
   1400#define ADL_UNCORE_IMC_BOX_CTL			0xc4
   1401#define ADL_UNCORE_IMC_FREERUNNING_BASE		0xd800
   1402#define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE	0x100
   1403
   1404#define ADL_UNCORE_IMC_CTL_FRZ			(1 << 0)
   1405#define ADL_UNCORE_IMC_CTL_RST_CTRL		(1 << 1)
   1406#define ADL_UNCORE_IMC_CTL_RST_CTRS		(1 << 2)
   1407#define ADL_UNCORE_IMC_CTL_INT			(ADL_UNCORE_IMC_CTL_RST_CTRL | \
   1408						ADL_UNCORE_IMC_CTL_RST_CTRS)
   1409
   1410static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
   1411{
   1412	__uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
   1413
   1414	/* The global control in MC1 can control both MCs. */
   1415	if (box->io_addr && (box->pmu->pmu_idx == 1))
   1416		writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
   1417}
   1418
   1419static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
   1420{
   1421	if (!box->io_addr)
   1422		return;
   1423
   1424	writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
   1425}
   1426
   1427static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
   1428{
   1429	if (!box->io_addr)
   1430		return;
   1431
   1432	writel(0, box->io_addr + uncore_mmio_box_ctl(box));
   1433}
   1434
   1435static struct intel_uncore_ops adl_uncore_mmio_ops = {
   1436	.init_box	= adl_uncore_imc_init_box,
   1437	.exit_box	= uncore_mmio_exit_box,
   1438	.disable_box	= adl_uncore_mmio_disable_box,
   1439	.enable_box	= adl_uncore_mmio_enable_box,
   1440	.disable_event	= intel_generic_uncore_mmio_disable_event,
   1441	.enable_event	= intel_generic_uncore_mmio_enable_event,
   1442	.read_counter	= uncore_mmio_read_counter,
   1443};
   1444
   1445#define ADL_UNC_CTL_CHMASK_MASK			0x00000f00
   1446#define ADL_UNC_IMC_EVENT_MASK			(SNB_UNC_CTL_EV_SEL_MASK | \
   1447						 ADL_UNC_CTL_CHMASK_MASK | \
   1448						 SNB_UNC_CTL_EDGE_DET)
   1449
   1450static struct attribute *adl_uncore_imc_formats_attr[] = {
   1451	&format_attr_event.attr,
   1452	&format_attr_chmask.attr,
   1453	&format_attr_edge.attr,
   1454	NULL,
   1455};
   1456
   1457static const struct attribute_group adl_uncore_imc_format_group = {
   1458	.name		= "format",
   1459	.attrs		= adl_uncore_imc_formats_attr,
   1460};
   1461
   1462static struct intel_uncore_type adl_uncore_imc = {
   1463	.name		= "imc",
   1464	.num_counters   = 5,
   1465	.num_boxes	= 2,
   1466	.perf_ctr_bits	= 64,
   1467	.perf_ctr	= ADL_UNCORE_IMC_CTR,
   1468	.event_ctl	= ADL_UNCORE_IMC_CTRL,
   1469	.event_mask	= ADL_UNC_IMC_EVENT_MASK,
   1470	.box_ctl	= ADL_UNCORE_IMC_BOX_CTL,
   1471	.mmio_offset	= 0,
   1472	.mmio_map_size	= ADL_UNCORE_IMC_MAP_SIZE,
   1473	.ops		= &adl_uncore_mmio_ops,
   1474	.format_group	= &adl_uncore_imc_format_group,
   1475};
   1476
   1477enum perf_adl_uncore_imc_freerunning_types {
   1478	ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
   1479	ADL_MMIO_UNCORE_IMC_DATA_READ,
   1480	ADL_MMIO_UNCORE_IMC_DATA_WRITE,
   1481	ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
   1482};
   1483
   1484static struct freerunning_counters adl_uncore_imc_freerunning[] = {
   1485	[ADL_MMIO_UNCORE_IMC_DATA_TOTAL]	= { 0x40, 0x0, 0x0, 1, 64 },
   1486	[ADL_MMIO_UNCORE_IMC_DATA_READ]		= { 0x58, 0x0, 0x0, 1, 64 },
   1487	[ADL_MMIO_UNCORE_IMC_DATA_WRITE]	= { 0xA0, 0x0, 0x0, 1, 64 },
   1488};
   1489
   1490static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
   1491{
   1492	__uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
   1493}
   1494
   1495static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
   1496	.init_box	= adl_uncore_imc_freerunning_init_box,
   1497	.exit_box	= uncore_mmio_exit_box,
   1498	.read_counter	= uncore_mmio_read_counter,
   1499	.hw_config	= uncore_freerunning_hw_config,
   1500};
   1501
   1502static struct intel_uncore_type adl_uncore_imc_free_running = {
   1503	.name			= "imc_free_running",
   1504	.num_counters		= 3,
   1505	.num_boxes		= 2,
   1506	.num_freerunning_types	= ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
   1507	.mmio_map_size		= ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
   1508	.freerunning		= adl_uncore_imc_freerunning,
   1509	.ops			= &adl_uncore_imc_freerunning_ops,
   1510	.event_descs		= tgl_uncore_imc_events,
   1511	.format_group		= &tgl_uncore_imc_format_group,
   1512};
   1513
   1514static struct intel_uncore_type *adl_mmio_uncores[] = {
   1515	&adl_uncore_imc,
   1516	&adl_uncore_imc_free_running,
   1517	NULL
   1518};
   1519
   1520void adl_uncore_mmio_init(void)
   1521{
   1522	uncore_mmio_uncores = adl_mmio_uncores;
   1523}
   1524
   1525/* end of Alder Lake MMIO uncore support */