cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cacheinfo.c (2722B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * MIPS cacheinfo support
      4 */
      5#include <linux/cacheinfo.h>
      6
      7/* Populates leaf and increments to next leaf */
      8#define populate_cache(cache, leaf, c_level, c_type)		\
      9do {								\
     10	leaf->type = c_type;					\
     11	leaf->level = c_level;					\
     12	leaf->coherency_line_size = c->cache.linesz;		\
     13	leaf->number_of_sets = c->cache.sets;			\
     14	leaf->ways_of_associativity = c->cache.ways;		\
     15	leaf->size = c->cache.linesz * c->cache.sets *		\
     16		c->cache.ways;					\
     17	leaf++;							\
     18} while (0)
     19
     20int init_cache_level(unsigned int cpu)
     21{
     22	struct cpuinfo_mips *c = &current_cpu_data;
     23	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     24	int levels = 0, leaves = 0;
     25
     26	/*
     27	 * If Dcache is not set, we assume the cache structures
     28	 * are not properly initialized.
     29	 */
     30	if (c->dcache.waysize)
     31		levels += 1;
     32	else
     33		return -ENOENT;
     34
     35
     36	leaves += (c->icache.waysize) ? 2 : 1;
     37
     38	if (c->vcache.waysize) {
     39		levels++;
     40		leaves++;
     41	}
     42
     43	if (c->scache.waysize) {
     44		levels++;
     45		leaves++;
     46	}
     47
     48	if (c->tcache.waysize) {
     49		levels++;
     50		leaves++;
     51	}
     52
     53	this_cpu_ci->num_levels = levels;
     54	this_cpu_ci->num_leaves = leaves;
     55	return 0;
     56}
     57
     58static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
     59{
     60	int cpu1;
     61
     62	for_each_possible_cpu(cpu1)
     63		if (cpus_are_siblings(cpu, cpu1))
     64			cpumask_set_cpu(cpu1, cpu_map);
     65}
     66
     67static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
     68{
     69	int cpu1;
     70	int cluster = cpu_cluster(&cpu_data[cpu]);
     71
     72	for_each_possible_cpu(cpu1)
     73		if (cpu_cluster(&cpu_data[cpu1]) == cluster)
     74			cpumask_set_cpu(cpu1, cpu_map);
     75}
     76
     77int populate_cache_leaves(unsigned int cpu)
     78{
     79	struct cpuinfo_mips *c = &current_cpu_data;
     80	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
     81	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
     82	int level = 1;
     83
     84	if (c->icache.waysize) {
     85		/* I/D caches are per core */
     86		fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
     87		populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
     88		fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
     89		populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
     90		level++;
     91	} else {
     92		populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
     93		level++;
     94	}
     95
     96	if (c->vcache.waysize) {
     97		/* Vcache is per core as well */
     98		fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
     99		populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
    100		level++;
    101	}
    102
    103	if (c->scache.waysize) {
    104		/* Scache is per cluster */
    105		fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
    106		populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
    107		level++;
    108	}
    109
    110	if (c->tcache.waysize)
    111		populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
    112
    113	this_cpu_ci->cpu_map_populated = true;
    114
    115	return 0;
    116}