cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smp.c (3638B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * SMP support for BPA machines.
      4 *
      5 * Dave Engebretsen, Peter Bergner, and
      6 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
      7 *
      8 * Plus various changes from other IBM teams...
      9 */
     10
     11#undef DEBUG
     12
     13#include <linux/kernel.h>
     14#include <linux/sched.h>
     15#include <linux/smp.h>
     16#include <linux/interrupt.h>
     17#include <linux/delay.h>
     18#include <linux/init.h>
     19#include <linux/spinlock.h>
     20#include <linux/cache.h>
     21#include <linux/err.h>
     22#include <linux/device.h>
     23#include <linux/cpu.h>
     24#include <linux/pgtable.h>
     25
     26#include <asm/ptrace.h>
     27#include <linux/atomic.h>
     28#include <asm/irq.h>
     29#include <asm/page.h>
     30#include <asm/io.h>
     31#include <asm/smp.h>
     32#include <asm/paca.h>
     33#include <asm/machdep.h>
     34#include <asm/cputable.h>
     35#include <asm/firmware.h>
     36#include <asm/rtas.h>
     37#include <asm/cputhreads.h>
     38#include <asm/code-patching.h>
     39
     40#include "interrupt.h"
     41#include <asm/udbg.h>
     42
     43#ifdef DEBUG
     44#define DBG(fmt...) udbg_printf(fmt)
     45#else
     46#define DBG(fmt...)
     47#endif
     48
     49/*
     50 * The Primary thread of each non-boot processor was started from the OF client
     51 * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
     52 */
     53static cpumask_t of_spin_map;
     54
     55/**
     56 * smp_startup_cpu() - start the given cpu
     57 *
     58 * At boot time, there is nothing to do for primary threads which were
     59 * started from Open Firmware.  For anything else, call RTAS with the
     60 * appropriate start location.
     61 *
     62 * Returns:
     63 *	0	- failure
     64 *	1	- success
     65 */
     66static inline int smp_startup_cpu(unsigned int lcpu)
     67{
     68	int status;
     69	unsigned long start_here =
     70			__pa(ppc_function_entry(generic_secondary_smp_init));
     71	unsigned int pcpu;
     72	int start_cpu;
     73
     74	if (cpumask_test_cpu(lcpu, &of_spin_map))
     75		/* Already started by OF and sitting in spin loop */
     76		return 1;
     77
     78	pcpu = get_hard_smp_processor_id(lcpu);
     79
     80	/*
     81	 * If the RTAS start-cpu token does not exist then presume the
     82	 * cpu is already spinning.
     83	 */
     84	start_cpu = rtas_token("start-cpu");
     85	if (start_cpu == RTAS_UNKNOWN_SERVICE)
     86		return 1;
     87
     88	status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
     89	if (status != 0) {
     90		printk(KERN_ERR "start-cpu failed: %i\n", status);
     91		return 0;
     92	}
     93
     94	return 1;
     95}
     96
     97static void smp_cell_setup_cpu(int cpu)
     98{
     99	if (cpu != boot_cpuid)
    100		iic_setup_cpu();
    101
    102	/*
    103	 * change default DABRX to allow user watchpoints
    104	 */
    105	mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER);
    106}
    107
    108static int smp_cell_kick_cpu(int nr)
    109{
    110	if (nr < 0 || nr >= nr_cpu_ids)
    111		return -EINVAL;
    112
    113	if (!smp_startup_cpu(nr))
    114		return -ENOENT;
    115
    116	/*
    117	 * The processor is currently spinning, waiting for the
    118	 * cpu_start field to become non-zero After we set cpu_start,
    119	 * the processor will continue on to secondary_start
    120	 */
    121	paca_ptrs[nr]->cpu_start = 1;
    122
    123	return 0;
    124}
    125
    126static struct smp_ops_t bpa_iic_smp_ops = {
    127	.message_pass	= iic_message_pass,
    128	.probe		= iic_request_IPIs,
    129	.kick_cpu	= smp_cell_kick_cpu,
    130	.setup_cpu	= smp_cell_setup_cpu,
    131	.cpu_bootable	= smp_generic_cpu_bootable,
    132};
    133
    134/* This is called very early */
    135void __init smp_init_cell(void)
    136{
    137	int i;
    138
    139	DBG(" -> smp_init_cell()\n");
    140
    141	smp_ops = &bpa_iic_smp_ops;
    142
    143	/* Mark threads which are still spinning in hold loops. */
    144	if (cpu_has_feature(CPU_FTR_SMT)) {
    145		for_each_present_cpu(i) {
    146			if (cpu_thread_in_core(i) == 0)
    147				cpumask_set_cpu(i, &of_spin_map);
    148		}
    149	} else
    150		cpumask_copy(&of_spin_map, cpu_present_mask);
    151
    152	cpumask_clear_cpu(boot_cpuid, &of_spin_map);
    153
    154	/* Non-lpar has additional take/give timebase */
    155	if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
    156		smp_ops->give_timebase = rtas_give_timebase;
    157		smp_ops->take_timebase = rtas_take_timebase;
    158	}
    159
    160	DBG(" <- smp_init_cell()\n");
    161}