cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smp.c (6250B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3#include <linux/module.h>
      4#include <linux/init.h>
      5#include <linux/kernel.h>
      6#include <linux/mm.h>
      7#include <linux/sched.h>
      8#include <linux/kernel_stat.h>
      9#include <linux/notifier.h>
     10#include <linux/cpu.h>
     11#include <linux/percpu.h>
     12#include <linux/delay.h>
     13#include <linux/err.h>
     14#include <linux/irq.h>
     15#include <linux/irq_work.h>
     16#include <linux/irqdomain.h>
     17#include <linux/of.h>
     18#include <linux/seq_file.h>
     19#include <linux/sched/task_stack.h>
     20#include <linux/sched/mm.h>
     21#include <linux/sched/hotplug.h>
     22#include <asm/irq.h>
     23#include <asm/traps.h>
     24#include <asm/sections.h>
     25#include <asm/mmu_context.h>
     26#ifdef CONFIG_CPU_HAS_FPU
     27#include <abi/fpu.h>
     28#endif
     29
     30enum ipi_message_type {
     31	IPI_EMPTY,
     32	IPI_RESCHEDULE,
     33	IPI_CALL_FUNC,
     34	IPI_IRQ_WORK,
     35	IPI_MAX
     36};
     37
     38struct ipi_data_struct {
     39	unsigned long bits ____cacheline_aligned;
     40	unsigned long stats[IPI_MAX] ____cacheline_aligned;
     41};
     42static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
     43
     44static irqreturn_t handle_ipi(int irq, void *dev)
     45{
     46	unsigned long *stats = this_cpu_ptr(&ipi_data)->stats;
     47
     48	while (true) {
     49		unsigned long ops;
     50
     51		ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
     52		if (ops == 0)
     53			return IRQ_HANDLED;
     54
     55		if (ops & (1 << IPI_RESCHEDULE)) {
     56			stats[IPI_RESCHEDULE]++;
     57			scheduler_ipi();
     58		}
     59
     60		if (ops & (1 << IPI_CALL_FUNC)) {
     61			stats[IPI_CALL_FUNC]++;
     62			generic_smp_call_function_interrupt();
     63		}
     64
     65		if (ops & (1 << IPI_IRQ_WORK)) {
     66			stats[IPI_IRQ_WORK]++;
     67			irq_work_run();
     68		}
     69
     70		BUG_ON((ops >> IPI_MAX) != 0);
     71	}
     72
     73	return IRQ_HANDLED;
     74}
     75
     76static void (*send_arch_ipi)(const struct cpumask *mask);
     77
     78static int ipi_irq;
     79void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
     80{
     81	if (send_arch_ipi)
     82		return;
     83
     84	send_arch_ipi = func;
     85	ipi_irq = irq;
     86}
     87
     88static void
     89send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
     90{
     91	int i;
     92
     93	for_each_cpu(i, to_whom)
     94		set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
     95
     96	smp_mb();
     97	send_arch_ipi(to_whom);
     98}
     99
    100static const char * const ipi_names[] = {
    101	[IPI_EMPTY]		= "Empty interrupts",
    102	[IPI_RESCHEDULE]	= "Rescheduling interrupts",
    103	[IPI_CALL_FUNC]		= "Function call interrupts",
    104	[IPI_IRQ_WORK]		= "Irq work interrupts",
    105};
    106
    107int arch_show_interrupts(struct seq_file *p, int prec)
    108{
    109	unsigned int cpu, i;
    110
    111	for (i = 0; i < IPI_MAX; i++) {
    112		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
    113			   prec >= 4 ? " " : "");
    114		for_each_online_cpu(cpu)
    115			seq_printf(p, "%10lu ",
    116				per_cpu_ptr(&ipi_data, cpu)->stats[i]);
    117		seq_printf(p, " %s\n", ipi_names[i]);
    118	}
    119
    120	return 0;
    121}
    122
    123void arch_send_call_function_ipi_mask(struct cpumask *mask)
    124{
    125	send_ipi_message(mask, IPI_CALL_FUNC);
    126}
    127
    128void arch_send_call_function_single_ipi(int cpu)
    129{
    130	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
    131}
    132
    133static void ipi_stop(void *unused)
    134{
    135	while (1);
    136}
    137
    138void smp_send_stop(void)
    139{
    140	on_each_cpu(ipi_stop, NULL, 1);
    141}
    142
    143void smp_send_reschedule(int cpu)
    144{
    145	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
    146}
    147
    148#ifdef CONFIG_IRQ_WORK
    149void arch_irq_work_raise(void)
    150{
    151	send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
    152}
    153#endif
    154
    155void __init smp_prepare_boot_cpu(void)
    156{
    157}
    158
    159void __init smp_prepare_cpus(unsigned int max_cpus)
    160{
    161}
    162
    163static int ipi_dummy_dev;
    164
    165void __init setup_smp_ipi(void)
    166{
    167	int rc;
    168
    169	if (ipi_irq == 0)
    170		return;
    171
    172	rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
    173				&ipi_dummy_dev);
    174	if (rc)
    175		panic("%s IRQ request failed\n", __func__);
    176
    177	enable_percpu_irq(ipi_irq, 0);
    178}
    179
    180void __init setup_smp(void)
    181{
    182	struct device_node *node = NULL;
    183	unsigned int cpu;
    184
    185	for_each_of_cpu_node(node) {
    186		if (!of_device_is_available(node))
    187			continue;
    188
    189		cpu = of_get_cpu_hwid(node, 0);
    190		if (cpu >= NR_CPUS)
    191			continue;
    192
    193		set_cpu_possible(cpu, true);
    194		set_cpu_present(cpu, true);
    195	}
    196}
    197
    198extern void _start_smp_secondary(void);
    199
    200volatile unsigned int secondary_hint;
    201volatile unsigned int secondary_hint2;
    202volatile unsigned int secondary_ccr;
    203volatile unsigned int secondary_stack;
    204volatile unsigned int secondary_msa1;
    205volatile unsigned int secondary_pgd;
    206
    207int __cpu_up(unsigned int cpu, struct task_struct *tidle)
    208{
    209	unsigned long mask = 1 << cpu;
    210
    211	secondary_stack =
    212		(unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
    213	secondary_hint = mfcr("cr31");
    214	secondary_hint2 = mfcr("cr<21, 1>");
    215	secondary_ccr  = mfcr("cr18");
    216	secondary_msa1 = read_mmu_msa1();
    217	secondary_pgd = mfcr("cr<29, 15>");
    218
    219	/*
    220	 * Because other CPUs are in reset status, we must flush data
    221	 * from cache to out and secondary CPUs use them in
    222	 * csky_start_secondary(void)
    223	 */
    224	mtcr("cr17", 0x22);
    225
    226	if (mask & mfcr("cr<29, 0>")) {
    227		send_arch_ipi(cpumask_of(cpu));
    228	} else {
    229		/* Enable cpu in SMP reset ctrl reg */
    230		mask |= mfcr("cr<29, 0>");
    231		mtcr("cr<29, 0>", mask);
    232	}
    233
    234	/* Wait for the cpu online */
    235	while (!cpu_online(cpu));
    236
    237	secondary_stack = 0;
    238
    239	return 0;
    240}
    241
    242void __init smp_cpus_done(unsigned int max_cpus)
    243{
    244}
    245
    246int setup_profiling_timer(unsigned int multiplier)
    247{
    248	return -EINVAL;
    249}
    250
    251void csky_start_secondary(void)
    252{
    253	struct mm_struct *mm = &init_mm;
    254	unsigned int cpu = smp_processor_id();
    255
    256	mtcr("cr31", secondary_hint);
    257	mtcr("cr<21, 1>", secondary_hint2);
    258	mtcr("cr18", secondary_ccr);
    259
    260	mtcr("vbr", vec_base);
    261
    262	flush_tlb_all();
    263	write_mmu_pagemask(0);
    264
    265#ifdef CONFIG_CPU_HAS_FPU
    266	init_fpu();
    267#endif
    268
    269	enable_percpu_irq(ipi_irq, 0);
    270
    271	mmget(mm);
    272	mmgrab(mm);
    273	current->active_mm = mm;
    274	cpumask_set_cpu(cpu, mm_cpumask(mm));
    275
    276	notify_cpu_starting(cpu);
    277	set_cpu_online(cpu, true);
    278
    279	pr_info("CPU%u Online: %s...\n", cpu, __func__);
    280
    281	local_irq_enable();
    282	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
    283}
    284
    285#ifdef CONFIG_HOTPLUG_CPU
    286int __cpu_disable(void)
    287{
    288	unsigned int cpu = smp_processor_id();
    289
    290	set_cpu_online(cpu, false);
    291
    292	irq_migrate_all_off_this_cpu();
    293
    294	clear_tasks_mm_cpumask(cpu);
    295
    296	return 0;
    297}
    298
    299void __cpu_die(unsigned int cpu)
    300{
    301	if (!cpu_wait_death(cpu, 5)) {
    302		pr_crit("CPU%u: shutdown failed\n", cpu);
    303		return;
    304	}
    305	pr_notice("CPU%u: shutdown\n", cpu);
    306}
    307
    308void arch_cpu_idle_dead(void)
    309{
    310	idle_task_exit();
    311
    312	cpu_report_death();
    313
    314	while (!secondary_stack)
    315		arch_cpu_idle();
    316
    317	local_irq_disable();
    318
    319	asm volatile(
    320		"mov	sp, %0\n"
    321		"mov	r8, %0\n"
    322		"jmpi	csky_start_secondary"
    323		:
    324		: "r" (secondary_stack));
    325}
    326#endif