cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hv-gpci.c (8937B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Hypervisor supplied "gpci" ("get performance counter info") performance
      4 * counter support
      5 *
      6 * Author: Cody P Schafer <cody@linux.vnet.ibm.com>
      7 * Copyright 2014 IBM Corporation.
      8 */
      9
     10#define pr_fmt(fmt) "hv-gpci: " fmt
     11
     12#include <linux/init.h>
     13#include <linux/perf_event.h>
     14#include <asm/firmware.h>
     15#include <asm/hvcall.h>
     16#include <asm/io.h>
     17
     18#include "hv-gpci.h"
     19#include "hv-common.h"
     20
     21/*
     22 * Example usage:
     23 *  perf stat -e 'hv_gpci/counter_info_version=3,offset=0,length=8,
     24 *		  secondary_index=0,starting_index=0xffffffff,request=0x10/' ...
     25 */
     26
     27/* u32 */
     28EVENT_DEFINE_RANGE_FORMAT(request, config, 0, 31);
     29/* u32 */
     30/*
     31 * Note that starting_index, phys_processor_idx, sibling_part_id,
     32 * hw_chip_id, partition_id all refer to the same bit range. They
     33 * are basically aliases for the starting_index. The specific alias
     34 * used depends on the event. See REQUEST_IDX_KIND in hv-gpci-requests.h
     35 */
     36EVENT_DEFINE_RANGE_FORMAT(starting_index, config, 32, 63);
     37EVENT_DEFINE_RANGE_FORMAT_LITE(phys_processor_idx, config, 32, 63);
     38EVENT_DEFINE_RANGE_FORMAT_LITE(sibling_part_id, config, 32, 63);
     39EVENT_DEFINE_RANGE_FORMAT_LITE(hw_chip_id, config, 32, 63);
     40EVENT_DEFINE_RANGE_FORMAT_LITE(partition_id, config, 32, 63);
     41
     42/* u16 */
     43EVENT_DEFINE_RANGE_FORMAT(secondary_index, config1, 0, 15);
     44/* u8 */
     45EVENT_DEFINE_RANGE_FORMAT(counter_info_version, config1, 16, 23);
     46/* u8, bytes of data (1-8) */
     47EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
     48/* u32, byte offset */
     49EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
     50
     51static cpumask_t hv_gpci_cpumask;
     52
     53static struct attribute *format_attrs[] = {
     54	&format_attr_request.attr,
     55	&format_attr_starting_index.attr,
     56	&format_attr_phys_processor_idx.attr,
     57	&format_attr_sibling_part_id.attr,
     58	&format_attr_hw_chip_id.attr,
     59	&format_attr_partition_id.attr,
     60	&format_attr_secondary_index.attr,
     61	&format_attr_counter_info_version.attr,
     62
     63	&format_attr_offset.attr,
     64	&format_attr_length.attr,
     65	NULL,
     66};
     67
     68static const struct attribute_group format_group = {
     69	.name = "format",
     70	.attrs = format_attrs,
     71};
     72
     73static const struct attribute_group event_group = {
     74	.name  = "events",
     75	.attrs = hv_gpci_event_attrs,
     76};
     77
     78#define HV_CAPS_ATTR(_name, _format)				\
     79static ssize_t _name##_show(struct device *dev,			\
     80			    struct device_attribute *attr,	\
     81			    char *page)				\
     82{								\
     83	struct hv_perf_caps caps;				\
     84	unsigned long hret = hv_perf_caps_get(&caps);		\
     85	if (hret)						\
     86		return -EIO;					\
     87								\
     88	return sprintf(page, _format, caps._name);		\
     89}								\
     90static struct device_attribute hv_caps_attr_##_name = __ATTR_RO(_name)
     91
     92static ssize_t kernel_version_show(struct device *dev,
     93				   struct device_attribute *attr,
     94				   char *page)
     95{
     96	return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
     97}
     98
     99static ssize_t cpumask_show(struct device *dev,
    100			    struct device_attribute *attr, char *buf)
    101{
    102	return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask);
    103}
    104
    105static DEVICE_ATTR_RO(kernel_version);
    106static DEVICE_ATTR_RO(cpumask);
    107
    108HV_CAPS_ATTR(version, "0x%x\n");
    109HV_CAPS_ATTR(ga, "%d\n");
    110HV_CAPS_ATTR(expanded, "%d\n");
    111HV_CAPS_ATTR(lab, "%d\n");
    112HV_CAPS_ATTR(collect_privileged, "%d\n");
    113
    114static struct attribute *interface_attrs[] = {
    115	&dev_attr_kernel_version.attr,
    116	&hv_caps_attr_version.attr,
    117	&hv_caps_attr_ga.attr,
    118	&hv_caps_attr_expanded.attr,
    119	&hv_caps_attr_lab.attr,
    120	&hv_caps_attr_collect_privileged.attr,
    121	NULL,
    122};
    123
    124static struct attribute *cpumask_attrs[] = {
    125	&dev_attr_cpumask.attr,
    126	NULL,
    127};
    128
    129static const struct attribute_group cpumask_attr_group = {
    130	.attrs = cpumask_attrs,
    131};
    132
    133static const struct attribute_group interface_group = {
    134	.name = "interface",
    135	.attrs = interface_attrs,
    136};
    137
    138static const struct attribute_group *attr_groups[] = {
    139	&format_group,
    140	&event_group,
    141	&interface_group,
    142	&cpumask_attr_group,
    143	NULL,
    144};
    145
    146static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
    147
    148static unsigned long single_gpci_request(u32 req, u32 starting_index,
    149		u16 secondary_index, u8 version_in, u32 offset, u8 length,
    150		u64 *value)
    151{
    152	unsigned long ret;
    153	size_t i;
    154	u64 count;
    155	struct hv_gpci_request_buffer *arg;
    156
    157	arg = (void *)get_cpu_var(hv_gpci_reqb);
    158	memset(arg, 0, HGPCI_REQ_BUFFER_SIZE);
    159
    160	arg->params.counter_request = cpu_to_be32(req);
    161	arg->params.starting_index = cpu_to_be32(starting_index);
    162	arg->params.secondary_index = cpu_to_be16(secondary_index);
    163	arg->params.counter_info_version_in = version_in;
    164
    165	ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
    166			virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
    167	if (ret) {
    168		pr_devel("hcall failed: 0x%lx\n", ret);
    169		goto out;
    170	}
    171
    172	/*
    173	 * we verify offset and length are within the zeroed buffer at event
    174	 * init.
    175	 */
    176	count = 0;
    177	for (i = offset; i < offset + length; i++)
    178		count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
    179
    180	*value = count;
    181out:
    182	put_cpu_var(hv_gpci_reqb);
    183	return ret;
    184}
    185
    186static u64 h_gpci_get_value(struct perf_event *event)
    187{
    188	u64 count;
    189	unsigned long ret = single_gpci_request(event_get_request(event),
    190					event_get_starting_index(event),
    191					event_get_secondary_index(event),
    192					event_get_counter_info_version(event),
    193					event_get_offset(event),
    194					event_get_length(event),
    195					&count);
    196	if (ret)
    197		return 0;
    198	return count;
    199}
    200
    201static void h_gpci_event_update(struct perf_event *event)
    202{
    203	s64 prev;
    204	u64 now = h_gpci_get_value(event);
    205	prev = local64_xchg(&event->hw.prev_count, now);
    206	local64_add(now - prev, &event->count);
    207}
    208
    209static void h_gpci_event_start(struct perf_event *event, int flags)
    210{
    211	local64_set(&event->hw.prev_count, h_gpci_get_value(event));
    212}
    213
    214static void h_gpci_event_stop(struct perf_event *event, int flags)
    215{
    216	h_gpci_event_update(event);
    217}
    218
    219static int h_gpci_event_add(struct perf_event *event, int flags)
    220{
    221	if (flags & PERF_EF_START)
    222		h_gpci_event_start(event, flags);
    223
    224	return 0;
    225}
    226
    227static int h_gpci_event_init(struct perf_event *event)
    228{
    229	u64 count;
    230	u8 length;
    231
    232	/* Not our event */
    233	if (event->attr.type != event->pmu->type)
    234		return -ENOENT;
    235
    236	/* config2 is unused */
    237	if (event->attr.config2) {
    238		pr_devel("config2 set when reserved\n");
    239		return -EINVAL;
    240	}
    241
    242	/* no branch sampling */
    243	if (has_branch_stack(event))
    244		return -EOPNOTSUPP;
    245
    246	length = event_get_length(event);
    247	if (length < 1 || length > 8) {
    248		pr_devel("length invalid\n");
    249		return -EINVAL;
    250	}
    251
    252	/* last byte within the buffer? */
    253	if ((event_get_offset(event) + length) > HGPCI_MAX_DATA_BYTES) {
    254		pr_devel("request outside of buffer: %zu > %zu\n",
    255				(size_t)event_get_offset(event) + length,
    256				HGPCI_MAX_DATA_BYTES);
    257		return -EINVAL;
    258	}
    259
    260	/* check if the request works... */
    261	if (single_gpci_request(event_get_request(event),
    262				event_get_starting_index(event),
    263				event_get_secondary_index(event),
    264				event_get_counter_info_version(event),
    265				event_get_offset(event),
    266				length,
    267				&count)) {
    268		pr_devel("gpci hcall failed\n");
    269		return -EINVAL;
    270	}
    271
    272	return 0;
    273}
    274
    275static struct pmu h_gpci_pmu = {
    276	.task_ctx_nr = perf_invalid_context,
    277
    278	.name = "hv_gpci",
    279	.attr_groups = attr_groups,
    280	.event_init  = h_gpci_event_init,
    281	.add         = h_gpci_event_add,
    282	.del         = h_gpci_event_stop,
    283	.start       = h_gpci_event_start,
    284	.stop        = h_gpci_event_stop,
    285	.read        = h_gpci_event_update,
    286	.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
    287};
    288
    289static int ppc_hv_gpci_cpu_online(unsigned int cpu)
    290{
    291	if (cpumask_empty(&hv_gpci_cpumask))
    292		cpumask_set_cpu(cpu, &hv_gpci_cpumask);
    293
    294	return 0;
    295}
    296
    297static int ppc_hv_gpci_cpu_offline(unsigned int cpu)
    298{
    299	int target;
    300
    301	/* Check if exiting cpu is used for collecting gpci events */
    302	if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask))
    303		return 0;
    304
    305	/* Find a new cpu to collect gpci events */
    306	target = cpumask_last(cpu_active_mask);
    307
    308	if (target < 0 || target >= nr_cpu_ids) {
    309		pr_err("hv_gpci: CPU hotplug init failed\n");
    310		return -1;
    311	}
    312
    313	/* Migrate gpci events to the new target */
    314	cpumask_set_cpu(target, &hv_gpci_cpumask);
    315	perf_pmu_migrate_context(&h_gpci_pmu, cpu, target);
    316
    317	return 0;
    318}
    319
    320static int hv_gpci_cpu_hotplug_init(void)
    321{
    322	return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
    323			  "perf/powerpc/hv_gcpi:online",
    324			  ppc_hv_gpci_cpu_online,
    325			  ppc_hv_gpci_cpu_offline);
    326}
    327
    328static int hv_gpci_init(void)
    329{
    330	int r;
    331	unsigned long hret;
    332	struct hv_perf_caps caps;
    333
    334	hv_gpci_assert_offsets_correct();
    335
    336	if (!firmware_has_feature(FW_FEATURE_LPAR)) {
    337		pr_debug("not a virtualized system, not enabling\n");
    338		return -ENODEV;
    339	}
    340
    341	hret = hv_perf_caps_get(&caps);
    342	if (hret) {
    343		pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
    344				hret);
    345		return -ENODEV;
    346	}
    347
    348	/* init cpuhotplug */
    349	r = hv_gpci_cpu_hotplug_init();
    350	if (r)
    351		return r;
    352
    353	/* sampling not supported */
    354	h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
    355
    356	r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
    357	if (r)
    358		return r;
    359
    360	return 0;
    361}
    362
    363device_initcall(hv_gpci_init);