cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cbe_regs.c (6743B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * cbe_regs.c
      4 *
      5 * Accessor routines for the various MMIO register blocks of the CBE
      6 *
      7 * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
      8 */
      9
     10#include <linux/percpu.h>
     11#include <linux/types.h>
     12#include <linux/export.h>
     13#include <linux/of_address.h>
     14#include <linux/of_device.h>
     15#include <linux/of_platform.h>
     16#include <linux/pgtable.h>
     17
     18#include <asm/io.h>
     19#include <asm/ptrace.h>
     20#include <asm/cell-regs.h>
     21
     22/*
     23 * Current implementation uses "cpu" nodes. We build our own mapping
     24 * array of cpu numbers to cpu nodes locally for now to allow interrupt
     25 * time code to have a fast path rather than call of_get_cpu_node(). If
     26 * we implement cpu hotplug, we'll have to install an appropriate notifier
     27 * in order to release references to the cpu going away
     28 */
     29static struct cbe_regs_map
     30{
     31	struct device_node *cpu_node;
     32	struct device_node *be_node;
     33	struct cbe_pmd_regs __iomem *pmd_regs;
     34	struct cbe_iic_regs __iomem *iic_regs;
     35	struct cbe_mic_tm_regs __iomem *mic_tm_regs;
     36	struct cbe_pmd_shadow_regs pmd_shadow_regs;
     37} cbe_regs_maps[MAX_CBE];
     38static int cbe_regs_map_count;
     39
     40static struct cbe_thread_map
     41{
     42	struct device_node *cpu_node;
     43	struct device_node *be_node;
     44	struct cbe_regs_map *regs;
     45	unsigned int thread_id;
     46	unsigned int cbe_id;
     47} cbe_thread_map[NR_CPUS];
     48
     49static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
     50static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
     51
     52static struct cbe_regs_map *cbe_find_map(struct device_node *np)
     53{
     54	int i;
     55	struct device_node *tmp_np;
     56
     57	if (!of_node_is_type(np, "spe")) {
     58		for (i = 0; i < cbe_regs_map_count; i++)
     59			if (cbe_regs_maps[i].cpu_node == np ||
     60			    cbe_regs_maps[i].be_node == np)
     61				return &cbe_regs_maps[i];
     62		return NULL;
     63	}
     64
     65	if (np->data)
     66		return np->data;
     67
     68	/* walk up path until cpu or be node was found */
     69	tmp_np = np;
     70	do {
     71		tmp_np = tmp_np->parent;
     72		/* on a correct devicetree we wont get up to root */
     73		BUG_ON(!tmp_np);
     74	} while (!of_node_is_type(tmp_np, "cpu") ||
     75		 !of_node_is_type(tmp_np, "be"));
     76
     77	np->data = cbe_find_map(tmp_np);
     78
     79	return np->data;
     80}
     81
     82struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
     83{
     84	struct cbe_regs_map *map = cbe_find_map(np);
     85	if (map == NULL)
     86		return NULL;
     87	return map->pmd_regs;
     88}
     89EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
     90
     91struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
     92{
     93	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
     94	if (map == NULL)
     95		return NULL;
     96	return map->pmd_regs;
     97}
     98EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
     99
    100struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
    101{
    102	struct cbe_regs_map *map = cbe_find_map(np);
    103	if (map == NULL)
    104		return NULL;
    105	return &map->pmd_shadow_regs;
    106}
    107
    108struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
    109{
    110	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
    111	if (map == NULL)
    112		return NULL;
    113	return &map->pmd_shadow_regs;
    114}
    115
    116struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
    117{
    118	struct cbe_regs_map *map = cbe_find_map(np);
    119	if (map == NULL)
    120		return NULL;
    121	return map->iic_regs;
    122}
    123
    124struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
    125{
    126	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
    127	if (map == NULL)
    128		return NULL;
    129	return map->iic_regs;
    130}
    131
    132struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
    133{
    134	struct cbe_regs_map *map = cbe_find_map(np);
    135	if (map == NULL)
    136		return NULL;
    137	return map->mic_tm_regs;
    138}
    139
    140struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
    141{
    142	struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
    143	if (map == NULL)
    144		return NULL;
    145	return map->mic_tm_regs;
    146}
    147EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
    148
    149u32 cbe_get_hw_thread_id(int cpu)
    150{
    151	return cbe_thread_map[cpu].thread_id;
    152}
    153EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
    154
    155u32 cbe_cpu_to_node(int cpu)
    156{
    157	return cbe_thread_map[cpu].cbe_id;
    158}
    159EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
    160
    161u32 cbe_node_to_cpu(int node)
    162{
    163	return cpumask_first(&cbe_local_mask[node]);
    164
    165}
    166EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
    167
    168static struct device_node *__init cbe_get_be_node(int cpu_id)
    169{
    170	struct device_node *np;
    171
    172	for_each_node_by_type (np, "be") {
    173		int len,i;
    174		const phandle *cpu_handle;
    175
    176		cpu_handle = of_get_property(np, "cpus", &len);
    177
    178		/*
    179		 * the CAB SLOF tree is non compliant, so we just assume
    180		 * there is only one node
    181		 */
    182		if (WARN_ON_ONCE(!cpu_handle))
    183			return np;
    184
    185		for (i=0; i<len; i++)
    186			if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
    187				return np;
    188	}
    189
    190	return NULL;
    191}
    192
    193static void __init cbe_fill_regs_map(struct cbe_regs_map *map)
    194{
    195	if(map->be_node) {
    196		struct device_node *be, *np;
    197
    198		be = map->be_node;
    199
    200		for_each_node_by_type(np, "pervasive")
    201			if (of_get_parent(np) == be)
    202				map->pmd_regs = of_iomap(np, 0);
    203
    204		for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
    205			if (of_get_parent(np) == be)
    206				map->iic_regs = of_iomap(np, 2);
    207
    208		for_each_node_by_type(np, "mic-tm")
    209			if (of_get_parent(np) == be)
    210				map->mic_tm_regs = of_iomap(np, 0);
    211	} else {
    212		struct device_node *cpu;
    213		/* That hack must die die die ! */
    214		const struct address_prop {
    215			unsigned long address;
    216			unsigned int len;
    217		} __attribute__((packed)) *prop;
    218
    219		cpu = map->cpu_node;
    220
    221		prop = of_get_property(cpu, "pervasive", NULL);
    222		if (prop != NULL)
    223			map->pmd_regs = ioremap(prop->address, prop->len);
    224
    225		prop = of_get_property(cpu, "iic", NULL);
    226		if (prop != NULL)
    227			map->iic_regs = ioremap(prop->address, prop->len);
    228
    229		prop = of_get_property(cpu, "mic-tm", NULL);
    230		if (prop != NULL)
    231			map->mic_tm_regs = ioremap(prop->address, prop->len);
    232	}
    233}
    234
    235
    236void __init cbe_regs_init(void)
    237{
    238	int i;
    239	unsigned int thread_id;
    240	struct device_node *cpu;
    241
    242	/* Build local fast map of CPUs */
    243	for_each_possible_cpu(i) {
    244		cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
    245		cbe_thread_map[i].be_node = cbe_get_be_node(i);
    246		cbe_thread_map[i].thread_id = thread_id;
    247	}
    248
    249	/* Find maps for each device tree CPU */
    250	for_each_node_by_type(cpu, "cpu") {
    251		struct cbe_regs_map *map;
    252		unsigned int cbe_id;
    253
    254		cbe_id = cbe_regs_map_count++;
    255		map = &cbe_regs_maps[cbe_id];
    256
    257		if (cbe_regs_map_count > MAX_CBE) {
    258			printk(KERN_ERR "cbe_regs: More BE chips than supported"
    259			       "!\n");
    260			cbe_regs_map_count--;
    261			of_node_put(cpu);
    262			return;
    263		}
    264		map->cpu_node = cpu;
    265
    266		for_each_possible_cpu(i) {
    267			struct cbe_thread_map *thread = &cbe_thread_map[i];
    268
    269			if (thread->cpu_node == cpu) {
    270				thread->regs = map;
    271				thread->cbe_id = cbe_id;
    272				map->be_node = thread->be_node;
    273				cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
    274				if(thread->thread_id == 0)
    275					cpumask_set_cpu(i, &cbe_first_online_cpu);
    276			}
    277		}
    278
    279		cbe_fill_regs_map(map);
    280	}
    281}
    282