cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cacheinfo.c (17605B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * cacheinfo support - processor cache information via sysfs
      4 *
      5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
      6 * Author: Sudeep Holla <sudeep.holla@arm.com>
      7 */
      8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
      9
     10#include <linux/acpi.h>
     11#include <linux/bitops.h>
     12#include <linux/cacheinfo.h>
     13#include <linux/compiler.h>
     14#include <linux/cpu.h>
     15#include <linux/device.h>
     16#include <linux/init.h>
     17#include <linux/of.h>
     18#include <linux/sched.h>
     19#include <linux/slab.h>
     20#include <linux/smp.h>
     21#include <linux/sysfs.h>
     22
     23/* pointer to per cpu cacheinfo */
     24static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
     25#define ci_cacheinfo(cpu)	(&per_cpu(ci_cpu_cacheinfo, cpu))
     26#define cache_leaves(cpu)	(ci_cacheinfo(cpu)->num_leaves)
     27#define per_cpu_cacheinfo(cpu)	(ci_cacheinfo(cpu)->info_list)
     28
     29struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
     30{
     31	return ci_cacheinfo(cpu);
     32}
     33
     34#ifdef CONFIG_OF
     35static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
     36					   struct cacheinfo *sib_leaf)
     37{
     38	return sib_leaf->fw_token == this_leaf->fw_token;
     39}
     40
     41/* OF properties to query for a given cache type */
     42struct cache_type_info {
     43	const char *size_prop;
     44	const char *line_size_props[2];
     45	const char *nr_sets_prop;
     46};
     47
     48static const struct cache_type_info cache_type_info[] = {
     49	{
     50		.size_prop       = "cache-size",
     51		.line_size_props = { "cache-line-size",
     52				     "cache-block-size", },
     53		.nr_sets_prop    = "cache-sets",
     54	}, {
     55		.size_prop       = "i-cache-size",
     56		.line_size_props = { "i-cache-line-size",
     57				     "i-cache-block-size", },
     58		.nr_sets_prop    = "i-cache-sets",
     59	}, {
     60		.size_prop       = "d-cache-size",
     61		.line_size_props = { "d-cache-line-size",
     62				     "d-cache-block-size", },
     63		.nr_sets_prop    = "d-cache-sets",
     64	},
     65};
     66
     67static inline int get_cacheinfo_idx(enum cache_type type)
     68{
     69	if (type == CACHE_TYPE_UNIFIED)
     70		return 0;
     71	return type;
     72}
     73
     74static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
     75{
     76	const char *propname;
     77	int ct_idx;
     78
     79	ct_idx = get_cacheinfo_idx(this_leaf->type);
     80	propname = cache_type_info[ct_idx].size_prop;
     81
     82	of_property_read_u32(np, propname, &this_leaf->size);
     83}
     84
     85/* not cache_line_size() because that's a macro in include/linux/cache.h */
     86static void cache_get_line_size(struct cacheinfo *this_leaf,
     87				struct device_node *np)
     88{
     89	int i, lim, ct_idx;
     90
     91	ct_idx = get_cacheinfo_idx(this_leaf->type);
     92	lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
     93
     94	for (i = 0; i < lim; i++) {
     95		int ret;
     96		u32 line_size;
     97		const char *propname;
     98
     99		propname = cache_type_info[ct_idx].line_size_props[i];
    100		ret = of_property_read_u32(np, propname, &line_size);
    101		if (!ret) {
    102			this_leaf->coherency_line_size = line_size;
    103			break;
    104		}
    105	}
    106}
    107
    108static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
    109{
    110	const char *propname;
    111	int ct_idx;
    112
    113	ct_idx = get_cacheinfo_idx(this_leaf->type);
    114	propname = cache_type_info[ct_idx].nr_sets_prop;
    115
    116	of_property_read_u32(np, propname, &this_leaf->number_of_sets);
    117}
    118
    119static void cache_associativity(struct cacheinfo *this_leaf)
    120{
    121	unsigned int line_size = this_leaf->coherency_line_size;
    122	unsigned int nr_sets = this_leaf->number_of_sets;
    123	unsigned int size = this_leaf->size;
    124
    125	/*
    126	 * If the cache is fully associative, there is no need to
    127	 * check the other properties.
    128	 */
    129	if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
    130		this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
    131}
    132
    133static bool cache_node_is_unified(struct cacheinfo *this_leaf,
    134				  struct device_node *np)
    135{
    136	return of_property_read_bool(np, "cache-unified");
    137}
    138
    139static void cache_of_set_props(struct cacheinfo *this_leaf,
    140			       struct device_node *np)
    141{
    142	/*
    143	 * init_cache_level must setup the cache level correctly
    144	 * overriding the architecturally specified levels, so
    145	 * if type is NONE at this stage, it should be unified
    146	 */
    147	if (this_leaf->type == CACHE_TYPE_NOCACHE &&
    148	    cache_node_is_unified(this_leaf, np))
    149		this_leaf->type = CACHE_TYPE_UNIFIED;
    150	cache_size(this_leaf, np);
    151	cache_get_line_size(this_leaf, np);
    152	cache_nr_sets(this_leaf, np);
    153	cache_associativity(this_leaf);
    154}
    155
    156static int cache_setup_of_node(unsigned int cpu)
    157{
    158	struct device_node *np;
    159	struct cacheinfo *this_leaf;
    160	struct device *cpu_dev = get_cpu_device(cpu);
    161	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
    162	unsigned int index = 0;
    163
    164	/* skip if fw_token is already populated */
    165	if (this_cpu_ci->info_list->fw_token) {
    166		return 0;
    167	}
    168
    169	if (!cpu_dev) {
    170		pr_err("No cpu device for CPU %d\n", cpu);
    171		return -ENODEV;
    172	}
    173	np = cpu_dev->of_node;
    174	if (!np) {
    175		pr_err("Failed to find cpu%d device node\n", cpu);
    176		return -ENOENT;
    177	}
    178
    179	while (index < cache_leaves(cpu)) {
    180		this_leaf = this_cpu_ci->info_list + index;
    181		if (this_leaf->level != 1)
    182			np = of_find_next_cache_node(np);
    183		else
    184			np = of_node_get(np);/* cpu node itself */
    185		if (!np)
    186			break;
    187		cache_of_set_props(this_leaf, np);
    188		this_leaf->fw_token = np;
    189		index++;
    190	}
    191
    192	if (index != cache_leaves(cpu)) /* not all OF nodes populated */
    193		return -ENOENT;
    194
    195	return 0;
    196}
    197#else
    198static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
    199static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
    200					   struct cacheinfo *sib_leaf)
    201{
    202	/*
    203	 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
    204	 * shared caches for all other levels. This will be used only if
    205	 * arch specific code has not populated shared_cpu_map
    206	 */
    207	return !(this_leaf->level == 1);
    208}
    209#endif
    210
    211int __weak cache_setup_acpi(unsigned int cpu)
    212{
    213	return -ENOTSUPP;
    214}
    215
    216unsigned int coherency_max_size;
    217
    218static int cache_shared_cpu_map_setup(unsigned int cpu)
    219{
    220	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
    221	struct cacheinfo *this_leaf, *sib_leaf;
    222	unsigned int index;
    223	int ret = 0;
    224
    225	if (this_cpu_ci->cpu_map_populated)
    226		return 0;
    227
    228	if (of_have_populated_dt())
    229		ret = cache_setup_of_node(cpu);
    230	else if (!acpi_disabled)
    231		ret = cache_setup_acpi(cpu);
    232
    233	if (ret)
    234		return ret;
    235
    236	for (index = 0; index < cache_leaves(cpu); index++) {
    237		unsigned int i;
    238
    239		this_leaf = this_cpu_ci->info_list + index;
    240		/* skip if shared_cpu_map is already populated */
    241		if (!cpumask_empty(&this_leaf->shared_cpu_map))
    242			continue;
    243
    244		cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
    245		for_each_online_cpu(i) {
    246			struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
    247
    248			if (i == cpu || !sib_cpu_ci->info_list)
    249				continue;/* skip if itself or no cacheinfo */
    250			sib_leaf = sib_cpu_ci->info_list + index;
    251			if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
    252				cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
    253				cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
    254			}
    255		}
    256		/* record the maximum cache line size */
    257		if (this_leaf->coherency_line_size > coherency_max_size)
    258			coherency_max_size = this_leaf->coherency_line_size;
    259	}
    260
    261	return 0;
    262}
    263
    264static void cache_shared_cpu_map_remove(unsigned int cpu)
    265{
    266	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
    267	struct cacheinfo *this_leaf, *sib_leaf;
    268	unsigned int sibling, index;
    269
    270	for (index = 0; index < cache_leaves(cpu); index++) {
    271		this_leaf = this_cpu_ci->info_list + index;
    272		for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
    273			struct cpu_cacheinfo *sib_cpu_ci;
    274
    275			if (sibling == cpu) /* skip itself */
    276				continue;
    277
    278			sib_cpu_ci = get_cpu_cacheinfo(sibling);
    279			if (!sib_cpu_ci->info_list)
    280				continue;
    281
    282			sib_leaf = sib_cpu_ci->info_list + index;
    283			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
    284			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
    285		}
    286		if (of_have_populated_dt())
    287			of_node_put(this_leaf->fw_token);
    288	}
    289}
    290
    291static void free_cache_attributes(unsigned int cpu)
    292{
    293	if (!per_cpu_cacheinfo(cpu))
    294		return;
    295
    296	cache_shared_cpu_map_remove(cpu);
    297
    298	kfree(per_cpu_cacheinfo(cpu));
    299	per_cpu_cacheinfo(cpu) = NULL;
    300	cache_leaves(cpu) = 0;
    301}
    302
    303int __weak init_cache_level(unsigned int cpu)
    304{
    305	return -ENOENT;
    306}
    307
    308int __weak populate_cache_leaves(unsigned int cpu)
    309{
    310	return -ENOENT;
    311}
    312
    313static int detect_cache_attributes(unsigned int cpu)
    314{
    315	int ret;
    316
    317	if (init_cache_level(cpu) || !cache_leaves(cpu))
    318		return -ENOENT;
    319
    320	per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
    321					 sizeof(struct cacheinfo), GFP_KERNEL);
    322	if (per_cpu_cacheinfo(cpu) == NULL)
    323		return -ENOMEM;
    324
    325	/*
    326	 * populate_cache_leaves() may completely setup the cache leaves and
    327	 * shared_cpu_map or it may leave it partially setup.
    328	 */
    329	ret = populate_cache_leaves(cpu);
    330	if (ret)
    331		goto free_ci;
    332	/*
    333	 * For systems using DT for cache hierarchy, fw_token
    334	 * and shared_cpu_map will be set up here only if they are
    335	 * not populated already
    336	 */
    337	ret = cache_shared_cpu_map_setup(cpu);
    338	if (ret) {
    339		pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
    340		goto free_ci;
    341	}
    342
    343	return 0;
    344
    345free_ci:
    346	free_cache_attributes(cpu);
    347	return ret;
    348}
    349
    350/* pointer to cpuX/cache device */
    351static DEFINE_PER_CPU(struct device *, ci_cache_dev);
    352#define per_cpu_cache_dev(cpu)	(per_cpu(ci_cache_dev, cpu))
    353
    354static cpumask_t cache_dev_map;
    355
    356/* pointer to array of devices for cpuX/cache/indexY */
    357static DEFINE_PER_CPU(struct device **, ci_index_dev);
    358#define per_cpu_index_dev(cpu)	(per_cpu(ci_index_dev, cpu))
    359#define per_cache_index_dev(cpu, idx)	((per_cpu_index_dev(cpu))[idx])
    360
    361#define show_one(file_name, object)				\
    362static ssize_t file_name##_show(struct device *dev,		\
    363		struct device_attribute *attr, char *buf)	\
    364{								\
    365	struct cacheinfo *this_leaf = dev_get_drvdata(dev);	\
    366	return sysfs_emit(buf, "%u\n", this_leaf->object);	\
    367}
    368
    369show_one(id, id);
    370show_one(level, level);
    371show_one(coherency_line_size, coherency_line_size);
    372show_one(number_of_sets, number_of_sets);
    373show_one(physical_line_partition, physical_line_partition);
    374show_one(ways_of_associativity, ways_of_associativity);
    375
    376static ssize_t size_show(struct device *dev,
    377			 struct device_attribute *attr, char *buf)
    378{
    379	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
    380
    381	return sysfs_emit(buf, "%uK\n", this_leaf->size >> 10);
    382}
    383
    384static ssize_t shared_cpu_map_show(struct device *dev,
    385				   struct device_attribute *attr, char *buf)
    386{
    387	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
    388	const struct cpumask *mask = &this_leaf->shared_cpu_map;
    389
    390	return sysfs_emit(buf, "%*pb\n", nr_cpu_ids, mask);
    391}
    392
    393static ssize_t shared_cpu_list_show(struct device *dev,
    394				    struct device_attribute *attr, char *buf)
    395{
    396	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
    397	const struct cpumask *mask = &this_leaf->shared_cpu_map;
    398
    399	return sysfs_emit(buf, "%*pbl\n", nr_cpu_ids, mask);
    400}
    401
    402static ssize_t type_show(struct device *dev,
    403			 struct device_attribute *attr, char *buf)
    404{
    405	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
    406	const char *output;
    407
    408	switch (this_leaf->type) {
    409	case CACHE_TYPE_DATA:
    410		output = "Data";
    411		break;
    412	case CACHE_TYPE_INST:
    413		output = "Instruction";
    414		break;
    415	case CACHE_TYPE_UNIFIED:
    416		output = "Unified";
    417		break;
    418	default:
    419		return -EINVAL;
    420	}
    421
    422	return sysfs_emit(buf, "%s\n", output);
    423}
    424
    425static ssize_t allocation_policy_show(struct device *dev,
    426				      struct device_attribute *attr, char *buf)
    427{
    428	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
    429	unsigned int ci_attr = this_leaf->attributes;
    430	const char *output;
    431
    432	if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
    433		output = "ReadWriteAllocate";
    434	else if (ci_attr & CACHE_READ_ALLOCATE)
    435		output = "ReadAllocate";
    436	else if (ci_attr & CACHE_WRITE_ALLOCATE)
    437		output = "WriteAllocate";
    438	else
    439		return 0;
    440
    441	return sysfs_emit(buf, "%s\n", output);
    442}
    443
    444static ssize_t write_policy_show(struct device *dev,
    445				 struct device_attribute *attr, char *buf)
    446{
    447	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
    448	unsigned int ci_attr = this_leaf->attributes;
    449	int n = 0;
    450
    451	if (ci_attr & CACHE_WRITE_THROUGH)
    452		n = sysfs_emit(buf, "WriteThrough\n");
    453	else if (ci_attr & CACHE_WRITE_BACK)
    454		n = sysfs_emit(buf, "WriteBack\n");
    455	return n;
    456}
    457
    458static DEVICE_ATTR_RO(id);
    459static DEVICE_ATTR_RO(level);
    460static DEVICE_ATTR_RO(type);
    461static DEVICE_ATTR_RO(coherency_line_size);
    462static DEVICE_ATTR_RO(ways_of_associativity);
    463static DEVICE_ATTR_RO(number_of_sets);
    464static DEVICE_ATTR_RO(size);
    465static DEVICE_ATTR_RO(allocation_policy);
    466static DEVICE_ATTR_RO(write_policy);
    467static DEVICE_ATTR_RO(shared_cpu_map);
    468static DEVICE_ATTR_RO(shared_cpu_list);
    469static DEVICE_ATTR_RO(physical_line_partition);
    470
    471static struct attribute *cache_default_attrs[] = {
    472	&dev_attr_id.attr,
    473	&dev_attr_type.attr,
    474	&dev_attr_level.attr,
    475	&dev_attr_shared_cpu_map.attr,
    476	&dev_attr_shared_cpu_list.attr,
    477	&dev_attr_coherency_line_size.attr,
    478	&dev_attr_ways_of_associativity.attr,
    479	&dev_attr_number_of_sets.attr,
    480	&dev_attr_size.attr,
    481	&dev_attr_allocation_policy.attr,
    482	&dev_attr_write_policy.attr,
    483	&dev_attr_physical_line_partition.attr,
    484	NULL
    485};
    486
    487static umode_t
    488cache_default_attrs_is_visible(struct kobject *kobj,
    489			       struct attribute *attr, int unused)
    490{
    491	struct device *dev = kobj_to_dev(kobj);
    492	struct cacheinfo *this_leaf = dev_get_drvdata(dev);
    493	const struct cpumask *mask = &this_leaf->shared_cpu_map;
    494	umode_t mode = attr->mode;
    495
    496	if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
    497		return mode;
    498	if ((attr == &dev_attr_type.attr) && this_leaf->type)
    499		return mode;
    500	if ((attr == &dev_attr_level.attr) && this_leaf->level)
    501		return mode;
    502	if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
    503		return mode;
    504	if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
    505		return mode;
    506	if ((attr == &dev_attr_coherency_line_size.attr) &&
    507	    this_leaf->coherency_line_size)
    508		return mode;
    509	if ((attr == &dev_attr_ways_of_associativity.attr) &&
    510	    this_leaf->size) /* allow 0 = full associativity */
    511		return mode;
    512	if ((attr == &dev_attr_number_of_sets.attr) &&
    513	    this_leaf->number_of_sets)
    514		return mode;
    515	if ((attr == &dev_attr_size.attr) && this_leaf->size)
    516		return mode;
    517	if ((attr == &dev_attr_write_policy.attr) &&
    518	    (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
    519		return mode;
    520	if ((attr == &dev_attr_allocation_policy.attr) &&
    521	    (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
    522		return mode;
    523	if ((attr == &dev_attr_physical_line_partition.attr) &&
    524	    this_leaf->physical_line_partition)
    525		return mode;
    526
    527	return 0;
    528}
    529
    530static const struct attribute_group cache_default_group = {
    531	.attrs = cache_default_attrs,
    532	.is_visible = cache_default_attrs_is_visible,
    533};
    534
    535static const struct attribute_group *cache_default_groups[] = {
    536	&cache_default_group,
    537	NULL,
    538};
    539
    540static const struct attribute_group *cache_private_groups[] = {
    541	&cache_default_group,
    542	NULL, /* Place holder for private group */
    543	NULL,
    544};
    545
    546const struct attribute_group *
    547__weak cache_get_priv_group(struct cacheinfo *this_leaf)
    548{
    549	return NULL;
    550}
    551
    552static const struct attribute_group **
    553cache_get_attribute_groups(struct cacheinfo *this_leaf)
    554{
    555	const struct attribute_group *priv_group =
    556			cache_get_priv_group(this_leaf);
    557
    558	if (!priv_group)
    559		return cache_default_groups;
    560
    561	if (!cache_private_groups[1])
    562		cache_private_groups[1] = priv_group;
    563
    564	return cache_private_groups;
    565}
    566
    567/* Add/Remove cache interface for CPU device */
    568static void cpu_cache_sysfs_exit(unsigned int cpu)
    569{
    570	int i;
    571	struct device *ci_dev;
    572
    573	if (per_cpu_index_dev(cpu)) {
    574		for (i = 0; i < cache_leaves(cpu); i++) {
    575			ci_dev = per_cache_index_dev(cpu, i);
    576			if (!ci_dev)
    577				continue;
    578			device_unregister(ci_dev);
    579		}
    580		kfree(per_cpu_index_dev(cpu));
    581		per_cpu_index_dev(cpu) = NULL;
    582	}
    583	device_unregister(per_cpu_cache_dev(cpu));
    584	per_cpu_cache_dev(cpu) = NULL;
    585}
    586
    587static int cpu_cache_sysfs_init(unsigned int cpu)
    588{
    589	struct device *dev = get_cpu_device(cpu);
    590
    591	if (per_cpu_cacheinfo(cpu) == NULL)
    592		return -ENOENT;
    593
    594	per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
    595	if (IS_ERR(per_cpu_cache_dev(cpu)))
    596		return PTR_ERR(per_cpu_cache_dev(cpu));
    597
    598	/* Allocate all required memory */
    599	per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
    600					 sizeof(struct device *), GFP_KERNEL);
    601	if (unlikely(per_cpu_index_dev(cpu) == NULL))
    602		goto err_out;
    603
    604	return 0;
    605
    606err_out:
    607	cpu_cache_sysfs_exit(cpu);
    608	return -ENOMEM;
    609}
    610
    611static int cache_add_dev(unsigned int cpu)
    612{
    613	unsigned int i;
    614	int rc;
    615	struct device *ci_dev, *parent;
    616	struct cacheinfo *this_leaf;
    617	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
    618	const struct attribute_group **cache_groups;
    619
    620	rc = cpu_cache_sysfs_init(cpu);
    621	if (unlikely(rc < 0))
    622		return rc;
    623
    624	parent = per_cpu_cache_dev(cpu);
    625	for (i = 0; i < cache_leaves(cpu); i++) {
    626		this_leaf = this_cpu_ci->info_list + i;
    627		if (this_leaf->disable_sysfs)
    628			continue;
    629		if (this_leaf->type == CACHE_TYPE_NOCACHE)
    630			break;
    631		cache_groups = cache_get_attribute_groups(this_leaf);
    632		ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
    633					   "index%1u", i);
    634		if (IS_ERR(ci_dev)) {
    635			rc = PTR_ERR(ci_dev);
    636			goto err;
    637		}
    638		per_cache_index_dev(cpu, i) = ci_dev;
    639	}
    640	cpumask_set_cpu(cpu, &cache_dev_map);
    641
    642	return 0;
    643err:
    644	cpu_cache_sysfs_exit(cpu);
    645	return rc;
    646}
    647
    648static int cacheinfo_cpu_online(unsigned int cpu)
    649{
    650	int rc = detect_cache_attributes(cpu);
    651
    652	if (rc)
    653		return rc;
    654	rc = cache_add_dev(cpu);
    655	if (rc)
    656		free_cache_attributes(cpu);
    657	return rc;
    658}
    659
    660static int cacheinfo_cpu_pre_down(unsigned int cpu)
    661{
    662	if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
    663		cpu_cache_sysfs_exit(cpu);
    664
    665	free_cache_attributes(cpu);
    666	return 0;
    667}
    668
    669static int __init cacheinfo_sysfs_init(void)
    670{
    671	return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
    672				 "base/cacheinfo:online",
    673				 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
    674}
    675device_initcall(cacheinfo_sysfs_init);