cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

uncore.h (18801B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#include <linux/slab.h>
      3#include <linux/pci.h>
      4#include <asm/apicdef.h>
      5#include <linux/io-64-nonatomic-lo-hi.h>
      6
      7#include <linux/perf_event.h>
      8#include "../perf_event.h"
      9
     10#define UNCORE_PMU_NAME_LEN		32
     11#define UNCORE_PMU_HRTIMER_INTERVAL	(60LL * NSEC_PER_SEC)
     12#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
     13
     14#define UNCORE_FIXED_EVENT		0xff
     15#define UNCORE_PMC_IDX_MAX_GENERIC	8
     16#define UNCORE_PMC_IDX_MAX_FIXED	1
     17#define UNCORE_PMC_IDX_MAX_FREERUNNING	1
     18#define UNCORE_PMC_IDX_FIXED		UNCORE_PMC_IDX_MAX_GENERIC
     19#define UNCORE_PMC_IDX_FREERUNNING	(UNCORE_PMC_IDX_FIXED + \
     20					UNCORE_PMC_IDX_MAX_FIXED)
     21#define UNCORE_PMC_IDX_MAX		(UNCORE_PMC_IDX_FREERUNNING + \
     22					UNCORE_PMC_IDX_MAX_FREERUNNING)
     23
     24#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx)	\
     25		((dev << 24) | (func << 16) | (type << 8) | idx)
     26#define UNCORE_PCI_DEV_DATA(type, idx)	((type << 8) | idx)
     27#define UNCORE_PCI_DEV_DEV(data)	((data >> 24) & 0xff)
     28#define UNCORE_PCI_DEV_FUNC(data)	((data >> 16) & 0xff)
     29#define UNCORE_PCI_DEV_TYPE(data)	((data >> 8) & 0xff)
     30#define UNCORE_PCI_DEV_IDX(data)	(data & 0xff)
     31#define UNCORE_EXTRA_PCI_DEV		0xff
     32#define UNCORE_EXTRA_PCI_DEV_MAX	4
     33
     34#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
     35
     36struct pci_extra_dev {
     37	struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
     38};
     39
     40struct intel_uncore_ops;
     41struct intel_uncore_pmu;
     42struct intel_uncore_box;
     43struct uncore_event_desc;
     44struct freerunning_counters;
     45struct intel_uncore_topology;
     46
     47struct intel_uncore_type {
     48	const char *name;
     49	int num_counters;
     50	int num_boxes;
     51	int perf_ctr_bits;
     52	int fixed_ctr_bits;
     53	int num_freerunning_types;
     54	int type_id;
     55	unsigned perf_ctr;
     56	unsigned event_ctl;
     57	unsigned event_mask;
     58	unsigned event_mask_ext;
     59	unsigned fixed_ctr;
     60	unsigned fixed_ctl;
     61	unsigned box_ctl;
     62	u64 *box_ctls;	/* Unit ctrl addr of the first box of each die */
     63	union {
     64		unsigned msr_offset;
     65		unsigned mmio_offset;
     66	};
     67	unsigned mmio_map_size;
     68	unsigned num_shared_regs:8;
     69	unsigned single_fixed:1;
     70	unsigned pair_ctr_ctl:1;
     71	union {
     72		unsigned *msr_offsets;
     73		unsigned *pci_offsets;
     74		unsigned *mmio_offsets;
     75	};
     76	unsigned *box_ids;
     77	struct event_constraint unconstrainted;
     78	struct event_constraint *constraints;
     79	struct intel_uncore_pmu *pmus;
     80	struct intel_uncore_ops *ops;
     81	struct uncore_event_desc *event_descs;
     82	struct freerunning_counters *freerunning;
     83	const struct attribute_group *attr_groups[4];
     84	const struct attribute_group **attr_update;
     85	struct pmu *pmu; /* for custom pmu ops */
     86	/*
     87	 * Uncore PMU would store relevant platform topology configuration here
     88	 * to identify which platform component each PMON block of that type is
     89	 * supposed to monitor.
     90	 */
     91	struct intel_uncore_topology *topology;
     92	/*
     93	 * Optional callbacks for managing mapping of Uncore units to PMONs
     94	 */
     95	int (*get_topology)(struct intel_uncore_type *type);
     96	int (*set_mapping)(struct intel_uncore_type *type);
     97	void (*cleanup_mapping)(struct intel_uncore_type *type);
     98};
     99
    100#define pmu_group attr_groups[0]
    101#define format_group attr_groups[1]
    102#define events_group attr_groups[2]
    103
    104struct intel_uncore_ops {
    105	void (*init_box)(struct intel_uncore_box *);
    106	void (*exit_box)(struct intel_uncore_box *);
    107	void (*disable_box)(struct intel_uncore_box *);
    108	void (*enable_box)(struct intel_uncore_box *);
    109	void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
    110	void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
    111	u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
    112	int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
    113	struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
    114						   struct perf_event *);
    115	void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
    116};
    117
    118struct intel_uncore_pmu {
    119	struct pmu			pmu;
    120	char				name[UNCORE_PMU_NAME_LEN];
    121	int				pmu_idx;
    122	int				func_id;
    123	bool				registered;
    124	atomic_t			activeboxes;
    125	struct intel_uncore_type	*type;
    126	struct intel_uncore_box		**boxes;
    127};
    128
    129struct intel_uncore_extra_reg {
    130	raw_spinlock_t lock;
    131	u64 config, config1, config2;
    132	atomic_t ref;
    133};
    134
    135struct intel_uncore_box {
    136	int dieid;	/* Logical die ID */
    137	int n_active;	/* number of active events */
    138	int n_events;
    139	int cpu;	/* cpu to collect events */
    140	unsigned long flags;
    141	atomic_t refcnt;
    142	struct perf_event *events[UNCORE_PMC_IDX_MAX];
    143	struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
    144	struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
    145	unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
    146	u64 tags[UNCORE_PMC_IDX_MAX];
    147	struct pci_dev *pci_dev;
    148	struct intel_uncore_pmu *pmu;
    149	u64 hrtimer_duration; /* hrtimer timeout for this box */
    150	struct hrtimer hrtimer;
    151	struct list_head list;
    152	struct list_head active_list;
    153	void __iomem *io_addr;
    154	struct intel_uncore_extra_reg shared_regs[];
    155};
    156
    157/* CFL uncore 8th cbox MSRs */
    158#define CFL_UNC_CBO_7_PERFEVTSEL0		0xf70
    159#define CFL_UNC_CBO_7_PER_CTR0			0xf76
    160
    161#define UNCORE_BOX_FLAG_INITIATED		0
    162/* event config registers are 8-byte apart */
    163#define UNCORE_BOX_FLAG_CTL_OFFS8		1
    164/* CFL 8th CBOX has different MSR space */
    165#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS	2
    166
    167struct uncore_event_desc {
    168	struct device_attribute attr;
    169	const char *config;
    170};
    171
    172struct freerunning_counters {
    173	unsigned int counter_base;
    174	unsigned int counter_offset;
    175	unsigned int box_offset;
    176	unsigned int num_counters;
    177	unsigned int bits;
    178	unsigned *box_offsets;
    179};
    180
    181struct intel_uncore_topology {
    182	u64 configuration;
    183	int segment;
    184};
    185
    186struct pci2phy_map {
    187	struct list_head list;
    188	int segment;
    189	int pbus_to_dieid[256];
    190};
    191
    192struct pci2phy_map *__find_pci2phy_map(int segment);
    193int uncore_pcibus_to_dieid(struct pci_bus *bus);
    194int uncore_die_to_segment(int die);
    195
    196ssize_t uncore_event_show(struct device *dev,
    197			  struct device_attribute *attr, char *buf);
    198
    199static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
    200{
    201	return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
    202}
    203
    204#define to_device_attribute(n)	container_of(n, struct device_attribute, attr)
    205#define to_dev_ext_attribute(n)	container_of(n, struct dev_ext_attribute, attr)
    206#define attr_to_ext_attr(n)	to_dev_ext_attribute(to_device_attribute(n))
    207
    208extern int __uncore_max_dies;
    209#define uncore_max_dies()	(__uncore_max_dies)
    210
    211#define INTEL_UNCORE_EVENT_DESC(_name, _config)			\
    212{								\
    213	.attr	= __ATTR(_name, 0444, uncore_event_show, NULL),	\
    214	.config	= _config,					\
    215}
    216
    217#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
    218static ssize_t __uncore_##_var##_show(struct device *dev,		\
    219				struct device_attribute *attr,		\
    220				char *page)				\
    221{									\
    222	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
    223	return sprintf(page, _format "\n");				\
    224}									\
    225static struct device_attribute format_attr_##_var =			\
    226	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
    227
    228static inline bool uncore_pmc_fixed(int idx)
    229{
    230	return idx == UNCORE_PMC_IDX_FIXED;
    231}
    232
    233static inline bool uncore_pmc_freerunning(int idx)
    234{
    235	return idx == UNCORE_PMC_IDX_FREERUNNING;
    236}
    237
    238static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box,
    239					       unsigned long offset)
    240{
    241	if (offset < box->pmu->type->mmio_map_size)
    242		return true;
    243
    244	pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
    245		     offset, box->pmu->type->name);
    246
    247	return false;
    248}
    249
    250static inline
    251unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
    252{
    253	return box->pmu->type->box_ctl +
    254	       box->pmu->type->mmio_offset * box->pmu->pmu_idx;
    255}
    256
    257static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
    258{
    259	return box->pmu->type->box_ctl;
    260}
    261
    262static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
    263{
    264	return box->pmu->type->fixed_ctl;
    265}
    266
    267static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
    268{
    269	return box->pmu->type->fixed_ctr;
    270}
    271
    272static inline
    273unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
    274{
    275	if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
    276		return idx * 8 + box->pmu->type->event_ctl;
    277
    278	return idx * 4 + box->pmu->type->event_ctl;
    279}
    280
    281static inline
    282unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
    283{
    284	return idx * 8 + box->pmu->type->perf_ctr;
    285}
    286
    287static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
    288{
    289	struct intel_uncore_pmu *pmu = box->pmu;
    290	return pmu->type->msr_offsets ?
    291		pmu->type->msr_offsets[pmu->pmu_idx] :
    292		pmu->type->msr_offset * pmu->pmu_idx;
    293}
    294
    295static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
    296{
    297	if (!box->pmu->type->box_ctl)
    298		return 0;
    299	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
    300}
    301
    302static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
    303{
    304	if (!box->pmu->type->fixed_ctl)
    305		return 0;
    306	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
    307}
    308
    309static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
    310{
    311	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
    312}
    313
    314
    315/*
    316 * In the uncore document, there is no event-code assigned to free running
    317 * counters. Some events need to be defined to indicate the free running
    318 * counters. The events are encoded as event-code + umask-code.
    319 *
    320 * The event-code for all free running counters is 0xff, which is the same as
    321 * the fixed counters.
    322 *
    323 * The umask-code is used to distinguish a fixed counter and a free running
    324 * counter, and different types of free running counters.
    325 * - For fixed counters, the umask-code is 0x0X.
    326 *   X indicates the index of the fixed counter, which starts from 0.
    327 * - For free running counters, the umask-code uses the rest of the space.
    328 *   It would bare the format of 0xXY.
    329 *   X stands for the type of free running counters, which starts from 1.
    330 *   Y stands for the index of free running counters of same type, which
    331 *   starts from 0.
    332 *
    333 * For example, there are three types of IIO free running counters on Skylake
    334 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
    335 * The event-code for all the free running counters is 0xff.
    336 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
    337 * which umask-code starts from 0x10.
    338 * So 'ioclk' is encoded as event=0xff,umask=0x10
    339 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
    340 * the second type, which umask-code starts from 0x20.
    341 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
    342 */
    343static inline unsigned int uncore_freerunning_idx(u64 config)
    344{
    345	return ((config >> 8) & 0xf);
    346}
    347
    348#define UNCORE_FREERUNNING_UMASK_START		0x10
    349
    350static inline unsigned int uncore_freerunning_type(u64 config)
    351{
    352	return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
    353}
    354
    355static inline
    356unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
    357					struct perf_event *event)
    358{
    359	unsigned int type = uncore_freerunning_type(event->hw.config);
    360	unsigned int idx = uncore_freerunning_idx(event->hw.config);
    361	struct intel_uncore_pmu *pmu = box->pmu;
    362
    363	return pmu->type->freerunning[type].counter_base +
    364	       pmu->type->freerunning[type].counter_offset * idx +
    365	       (pmu->type->freerunning[type].box_offsets ?
    366	        pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
    367	        pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
    368}
    369
    370static inline
    371unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
    372{
    373	if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
    374		return CFL_UNC_CBO_7_PERFEVTSEL0 +
    375		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
    376	} else {
    377		return box->pmu->type->event_ctl +
    378		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
    379		       uncore_msr_box_offset(box);
    380	}
    381}
    382
    383static inline
    384unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
    385{
    386	if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
    387		return CFL_UNC_CBO_7_PER_CTR0 +
    388		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
    389	} else {
    390		return box->pmu->type->perf_ctr +
    391		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
    392		       uncore_msr_box_offset(box);
    393	}
    394}
    395
    396static inline
    397unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
    398{
    399	if (box->pci_dev || box->io_addr)
    400		return uncore_pci_fixed_ctl(box);
    401	else
    402		return uncore_msr_fixed_ctl(box);
    403}
    404
    405static inline
    406unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
    407{
    408	if (box->pci_dev || box->io_addr)
    409		return uncore_pci_fixed_ctr(box);
    410	else
    411		return uncore_msr_fixed_ctr(box);
    412}
    413
    414static inline
    415unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
    416{
    417	if (box->pci_dev || box->io_addr)
    418		return uncore_pci_event_ctl(box, idx);
    419	else
    420		return uncore_msr_event_ctl(box, idx);
    421}
    422
    423static inline
    424unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
    425{
    426	if (box->pci_dev || box->io_addr)
    427		return uncore_pci_perf_ctr(box, idx);
    428	else
    429		return uncore_msr_perf_ctr(box, idx);
    430}
    431
    432static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
    433{
    434	return box->pmu->type->perf_ctr_bits;
    435}
    436
    437static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
    438{
    439	return box->pmu->type->fixed_ctr_bits;
    440}
    441
    442static inline
    443unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
    444				     struct perf_event *event)
    445{
    446	unsigned int type = uncore_freerunning_type(event->hw.config);
    447
    448	return box->pmu->type->freerunning[type].bits;
    449}
    450
    451static inline int uncore_num_freerunning(struct intel_uncore_box *box,
    452					 struct perf_event *event)
    453{
    454	unsigned int type = uncore_freerunning_type(event->hw.config);
    455
    456	return box->pmu->type->freerunning[type].num_counters;
    457}
    458
    459static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
    460					       struct perf_event *event)
    461{
    462	return box->pmu->type->num_freerunning_types;
    463}
    464
    465static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
    466						 struct perf_event *event)
    467{
    468	unsigned int type = uncore_freerunning_type(event->hw.config);
    469	unsigned int idx = uncore_freerunning_idx(event->hw.config);
    470
    471	return (type < uncore_num_freerunning_types(box, event)) &&
    472	       (idx < uncore_num_freerunning(box, event));
    473}
    474
    475static inline int uncore_num_counters(struct intel_uncore_box *box)
    476{
    477	return box->pmu->type->num_counters;
    478}
    479
    480static inline bool is_freerunning_event(struct perf_event *event)
    481{
    482	u64 cfg = event->attr.config;
    483
    484	return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
    485	       (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
    486}
    487
    488/* Check and reject invalid config */
    489static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
    490					       struct perf_event *event)
    491{
    492	if (is_freerunning_event(event))
    493		return 0;
    494
    495	return -EINVAL;
    496}
    497
    498static inline void uncore_disable_event(struct intel_uncore_box *box,
    499				struct perf_event *event)
    500{
    501	box->pmu->type->ops->disable_event(box, event);
    502}
    503
    504static inline void uncore_enable_event(struct intel_uncore_box *box,
    505				struct perf_event *event)
    506{
    507	box->pmu->type->ops->enable_event(box, event);
    508}
    509
    510static inline u64 uncore_read_counter(struct intel_uncore_box *box,
    511				struct perf_event *event)
    512{
    513	return box->pmu->type->ops->read_counter(box, event);
    514}
    515
    516static inline void uncore_box_init(struct intel_uncore_box *box)
    517{
    518	if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
    519		if (box->pmu->type->ops->init_box)
    520			box->pmu->type->ops->init_box(box);
    521	}
    522}
    523
    524static inline void uncore_box_exit(struct intel_uncore_box *box)
    525{
    526	if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
    527		if (box->pmu->type->ops->exit_box)
    528			box->pmu->type->ops->exit_box(box);
    529	}
    530}
    531
    532static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
    533{
    534	return (box->dieid < 0);
    535}
    536
    537static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
    538{
    539	return container_of(event->pmu, struct intel_uncore_pmu, pmu);
    540}
    541
    542static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
    543{
    544	return event->pmu_private;
    545}
    546
    547struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
    548u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
    549void uncore_mmio_exit_box(struct intel_uncore_box *box);
    550u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
    551			     struct perf_event *event);
    552void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
    553void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
    554void uncore_pmu_event_start(struct perf_event *event, int flags);
    555void uncore_pmu_event_stop(struct perf_event *event, int flags);
    556int uncore_pmu_event_add(struct perf_event *event, int flags);
    557void uncore_pmu_event_del(struct perf_event *event, int flags);
    558void uncore_pmu_event_read(struct perf_event *event);
    559void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
    560struct event_constraint *
    561uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
    562void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
    563u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
    564void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
    565
    566extern struct intel_uncore_type *empty_uncore[];
    567extern struct intel_uncore_type **uncore_msr_uncores;
    568extern struct intel_uncore_type **uncore_pci_uncores;
    569extern struct intel_uncore_type **uncore_mmio_uncores;
    570extern struct pci_driver *uncore_pci_driver;
    571extern struct pci_driver *uncore_pci_sub_driver;
    572extern raw_spinlock_t pci2phy_map_lock;
    573extern struct list_head pci2phy_map_head;
    574extern struct pci_extra_dev *uncore_extra_pci_dev;
    575extern struct event_constraint uncore_constraint_empty;
    576
    577/* uncore_snb.c */
    578int snb_uncore_pci_init(void);
    579int ivb_uncore_pci_init(void);
    580int hsw_uncore_pci_init(void);
    581int bdw_uncore_pci_init(void);
    582int skl_uncore_pci_init(void);
    583void snb_uncore_cpu_init(void);
    584void nhm_uncore_cpu_init(void);
    585void skl_uncore_cpu_init(void);
    586void icl_uncore_cpu_init(void);
    587void tgl_uncore_cpu_init(void);
    588void adl_uncore_cpu_init(void);
    589void tgl_uncore_mmio_init(void);
    590void tgl_l_uncore_mmio_init(void);
    591void adl_uncore_mmio_init(void);
    592int snb_pci2phy_map_init(int devid);
    593
    594/* uncore_snbep.c */
    595int snbep_uncore_pci_init(void);
    596void snbep_uncore_cpu_init(void);
    597int ivbep_uncore_pci_init(void);
    598void ivbep_uncore_cpu_init(void);
    599int hswep_uncore_pci_init(void);
    600void hswep_uncore_cpu_init(void);
    601int bdx_uncore_pci_init(void);
    602void bdx_uncore_cpu_init(void);
    603int knl_uncore_pci_init(void);
    604void knl_uncore_cpu_init(void);
    605int skx_uncore_pci_init(void);
    606void skx_uncore_cpu_init(void);
    607int snr_uncore_pci_init(void);
    608void snr_uncore_cpu_init(void);
    609void snr_uncore_mmio_init(void);
    610int icx_uncore_pci_init(void);
    611void icx_uncore_cpu_init(void);
    612void icx_uncore_mmio_init(void);
    613int spr_uncore_pci_init(void);
    614void spr_uncore_cpu_init(void);
    615void spr_uncore_mmio_init(void);
    616
    617/* uncore_nhmex.c */
    618void nhmex_uncore_cpu_init(void);