cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

dcscb.c (4670B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * dcscb.c - Dual Cluster System Configuration Block
      4 *
      5 * Created by:	Nicolas Pitre, May 2012
      6 * Copyright:	(C) 2012-2013  Linaro Limited
      7 */
      8
      9#include <linux/init.h>
     10#include <linux/kernel.h>
     11#include <linux/io.h>
     12#include <linux/errno.h>
     13#include <linux/of_address.h>
     14#include <linux/vexpress.h>
     15#include <linux/arm-cci.h>
     16
     17#include <asm/mcpm.h>
     18#include <asm/proc-fns.h>
     19#include <asm/cacheflush.h>
     20#include <asm/cputype.h>
     21#include <asm/cp15.h>
     22
     23#include "vexpress.h"
     24
     25#define RST_HOLD0	0x0
     26#define RST_HOLD1	0x4
     27#define SYS_SWRESET	0x8
     28#define RST_STAT0	0xc
     29#define RST_STAT1	0x10
     30#define EAG_CFG_R	0x20
     31#define EAG_CFG_W	0x24
     32#define KFC_CFG_R	0x28
     33#define KFC_CFG_W	0x2c
     34#define DCS_CFG_R	0x30
     35
     36static void __iomem *dcscb_base;
     37static int dcscb_allcpus_mask[2];
     38
     39static int dcscb_cpu_powerup(unsigned int cpu, unsigned int cluster)
     40{
     41	unsigned int rst_hold, cpumask = (1 << cpu);
     42
     43	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
     44	if (cluster >= 2 || !(cpumask & dcscb_allcpus_mask[cluster]))
     45		return -EINVAL;
     46
     47	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
     48	rst_hold &= ~(cpumask | (cpumask << 4));
     49	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
     50	return 0;
     51}
     52
     53static int dcscb_cluster_powerup(unsigned int cluster)
     54{
     55	unsigned int rst_hold;
     56
     57	pr_debug("%s: cluster %u\n", __func__, cluster);
     58	if (cluster >= 2)
     59		return -EINVAL;
     60
     61	/* remove cluster reset and add individual CPU's reset */
     62	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
     63	rst_hold &= ~(1 << 8);
     64	rst_hold |= dcscb_allcpus_mask[cluster];
     65	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
     66	return 0;
     67}
     68
     69static void dcscb_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
     70{
     71	unsigned int rst_hold;
     72
     73	pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
     74	BUG_ON(cluster >= 2 || !((1 << cpu) & dcscb_allcpus_mask[cluster]));
     75
     76	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
     77	rst_hold |= (1 << cpu);
     78	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
     79}
     80
     81static void dcscb_cluster_powerdown_prepare(unsigned int cluster)
     82{
     83	unsigned int rst_hold;
     84
     85	pr_debug("%s: cluster %u\n", __func__, cluster);
     86	BUG_ON(cluster >= 2);
     87
     88	rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
     89	rst_hold |= (1 << 8);
     90	writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
     91}
     92
     93static void dcscb_cpu_cache_disable(void)
     94{
     95	/* Disable and flush the local CPU cache. */
     96	v7_exit_coherency_flush(louis);
     97}
     98
     99static void dcscb_cluster_cache_disable(void)
    100{
    101	/* Flush all cache levels for this cluster. */
    102	v7_exit_coherency_flush(all);
    103
    104	/*
    105	 * A full outer cache flush could be needed at this point
    106	 * on platforms with such a cache, depending on where the
    107	 * outer cache sits. In some cases the notion of a "last
    108	 * cluster standing" would need to be implemented if the
    109	 * outer cache is shared across clusters. In any case, when
    110	 * the outer cache needs flushing, there is no concurrent
    111	 * access to the cache controller to worry about and no
    112	 * special locking besides what is already provided by the
    113	 * MCPM state machinery is needed.
    114	 */
    115
    116	/*
    117	 * Disable cluster-level coherency by masking
    118	 * incoming snoops and DVM messages:
    119	 */
    120	cci_disable_port_by_cpu(read_cpuid_mpidr());
    121}
    122
    123static const struct mcpm_platform_ops dcscb_power_ops = {
    124	.cpu_powerup		= dcscb_cpu_powerup,
    125	.cluster_powerup	= dcscb_cluster_powerup,
    126	.cpu_powerdown_prepare	= dcscb_cpu_powerdown_prepare,
    127	.cluster_powerdown_prepare = dcscb_cluster_powerdown_prepare,
    128	.cpu_cache_disable	= dcscb_cpu_cache_disable,
    129	.cluster_cache_disable	= dcscb_cluster_cache_disable,
    130};
    131
    132extern void dcscb_power_up_setup(unsigned int affinity_level);
    133
    134static int __init dcscb_init(void)
    135{
    136	struct device_node *node;
    137	unsigned int cfg;
    138	int ret;
    139
    140	if (!cci_probed())
    141		return -ENODEV;
    142
    143	node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
    144	if (!node)
    145		return -ENODEV;
    146	dcscb_base = of_iomap(node, 0);
    147	of_node_put(node);
    148	if (!dcscb_base)
    149		return -EADDRNOTAVAIL;
    150	cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
    151	dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
    152	dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
    153
    154	ret = mcpm_platform_register(&dcscb_power_ops);
    155	if (!ret)
    156		ret = mcpm_sync_init(dcscb_power_up_setup);
    157	if (ret) {
    158		iounmap(dcscb_base);
    159		return ret;
    160	}
    161
    162	pr_info("VExpress DCSCB support installed\n");
    163
    164	/*
    165	 * Future entries into the kernel can now go
    166	 * through the cluster entry vectors.
    167	 */
    168	vexpress_flags_set(__pa_symbol(mcpm_entry_point));
    169
    170	return 0;
    171}
    172
    173early_initcall(dcscb_init);