cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpu_ops_sbi.c (2916B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * HSM extension and cpu_ops implementation.
      4 *
      5 * Copyright (c) 2020 Western Digital Corporation or its affiliates.
      6 */
      7
      8#include <linux/init.h>
      9#include <linux/mm.h>
     10#include <linux/sched/task_stack.h>
     11#include <asm/cpu_ops.h>
     12#include <asm/cpu_ops_sbi.h>
     13#include <asm/sbi.h>
     14#include <asm/smp.h>
     15
     16extern char secondary_start_sbi[];
     17const struct cpu_operations cpu_ops_sbi;
     18
     19/*
     20 * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
     21 * be invoked from multiple threads in parallel. Define a per cpu data
     22 * to handle that.
     23 */
     24static DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
     25
     26static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
     27			      unsigned long priv)
     28{
     29	struct sbiret ret;
     30
     31	ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START,
     32			hartid, saddr, priv, 0, 0, 0);
     33	if (ret.error)
     34		return sbi_err_map_linux_errno(ret.error);
     35	else
     36		return 0;
     37}
     38
     39#ifdef CONFIG_HOTPLUG_CPU
     40static int sbi_hsm_hart_stop(void)
     41{
     42	struct sbiret ret;
     43
     44	ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0);
     45
     46	if (ret.error)
     47		return sbi_err_map_linux_errno(ret.error);
     48	else
     49		return 0;
     50}
     51
     52static int sbi_hsm_hart_get_status(unsigned long hartid)
     53{
     54	struct sbiret ret;
     55
     56	ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS,
     57			hartid, 0, 0, 0, 0, 0);
     58	if (ret.error)
     59		return sbi_err_map_linux_errno(ret.error);
     60	else
     61		return ret.value;
     62}
     63#endif
     64
     65static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
     66{
     67	unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
     68	int hartid = cpuid_to_hartid_map(cpuid);
     69	unsigned long hsm_data;
     70	struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
     71
     72	/* Make sure tidle is updated */
     73	smp_mb();
     74	bdata->task_ptr = tidle;
     75	bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
     76	/* Make sure boot data is updated */
     77	smp_mb();
     78	hsm_data = __pa(bdata);
     79	return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
     80}
     81
     82static int sbi_cpu_prepare(unsigned int cpuid)
     83{
     84	if (!cpu_ops_sbi.cpu_start) {
     85		pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
     86		return -ENODEV;
     87	}
     88	return 0;
     89}
     90
     91#ifdef CONFIG_HOTPLUG_CPU
     92static int sbi_cpu_disable(unsigned int cpuid)
     93{
     94	if (!cpu_ops_sbi.cpu_stop)
     95		return -EOPNOTSUPP;
     96	return 0;
     97}
     98
     99static void sbi_cpu_stop(void)
    100{
    101	int ret;
    102
    103	ret = sbi_hsm_hart_stop();
    104	pr_crit("Unable to stop the cpu %u (%d)\n", smp_processor_id(), ret);
    105}
    106
    107static int sbi_cpu_is_stopped(unsigned int cpuid)
    108{
    109	int rc;
    110	int hartid = cpuid_to_hartid_map(cpuid);
    111
    112	rc = sbi_hsm_hart_get_status(hartid);
    113
    114	if (rc == SBI_HSM_STATE_STOPPED)
    115		return 0;
    116	return rc;
    117}
    118#endif
    119
    120const struct cpu_operations cpu_ops_sbi = {
    121	.name		= "sbi",
    122	.cpu_prepare	= sbi_cpu_prepare,
    123	.cpu_start	= sbi_cpu_start,
    124#ifdef CONFIG_HOTPLUG_CPU
    125	.cpu_disable	= sbi_cpu_disable,
    126	.cpu_stop	= sbi_cpu_stop,
    127	.cpu_is_stopped	= sbi_cpu_is_stopped,
    128#endif
    129};