cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smp.c (11623B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3** SMP Support
      4**
      5** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
      6** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
      7** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
      8** 
      9** Lots of stuff stolen from arch/alpha/kernel/smp.c
     10** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
     11**
     12** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
     13** -grant (1/12/2001)
     14**
     15*/
     16#include <linux/types.h>
     17#include <linux/spinlock.h>
     18
     19#include <linux/kernel.h>
     20#include <linux/module.h>
     21#include <linux/sched/mm.h>
     22#include <linux/init.h>
     23#include <linux/interrupt.h>
     24#include <linux/smp.h>
     25#include <linux/kernel_stat.h>
     26#include <linux/mm.h>
     27#include <linux/err.h>
     28#include <linux/delay.h>
     29#include <linux/bitops.h>
     30#include <linux/ftrace.h>
     31#include <linux/cpu.h>
     32#include <linux/kgdb.h>
     33#include <linux/sched/hotplug.h>
     34
     35#include <linux/atomic.h>
     36#include <asm/current.h>
     37#include <asm/delay.h>
     38#include <asm/tlbflush.h>
     39
     40#include <asm/io.h>
     41#include <asm/irq.h>		/* for CPU_IRQ_REGION and friends */
     42#include <asm/mmu_context.h>
     43#include <asm/page.h>
     44#include <asm/processor.h>
     45#include <asm/ptrace.h>
     46#include <asm/unistd.h>
     47#include <asm/cacheflush.h>
     48
     49#undef DEBUG_SMP
     50#ifdef DEBUG_SMP
     51static int smp_debug_lvl = 0;
     52#define smp_debug(lvl, printargs...)		\
     53		if (lvl >= smp_debug_lvl)	\
     54			printk(printargs);
     55#else
     56#define smp_debug(lvl, ...)	do { } while(0)
     57#endif /* DEBUG_SMP */
     58
     59volatile struct task_struct *smp_init_current_idle_task;
     60
     61/* track which CPU is booting */
     62static volatile int cpu_now_booting;
     63
     64static DEFINE_PER_CPU(spinlock_t, ipi_lock);
     65
     66enum ipi_message_type {
     67	IPI_NOP=0,
     68	IPI_RESCHEDULE=1,
     69	IPI_CALL_FUNC,
     70	IPI_CPU_START,
     71	IPI_CPU_STOP,
     72	IPI_CPU_TEST,
     73#ifdef CONFIG_KGDB
     74	IPI_ENTER_KGDB,
     75#endif
     76};
     77
     78
     79/********** SMP inter processor interrupt and communication routines */
     80
     81#undef PER_CPU_IRQ_REGION
     82#ifdef PER_CPU_IRQ_REGION
     83/* XXX REVISIT Ignore for now.
     84**    *May* need this "hook" to register IPI handler
     85**    once we have perCPU ExtIntr switch tables.
     86*/
     87static void
     88ipi_init(int cpuid)
     89{
     90#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
     91
     92	if(cpu_online(cpuid) )
     93	{
     94		switch_to_idle_task(current);
     95	}
     96
     97	return;
     98}
     99#endif
    100
    101
    102/*
    103** Yoink this CPU from the runnable list... 
    104**
    105*/
    106static void
    107halt_processor(void) 
    108{
    109	/* REVISIT : redirect I/O Interrupts to another CPU? */
    110	/* REVISIT : does PM *know* this CPU isn't available? */
    111	set_cpu_online(smp_processor_id(), false);
    112	local_irq_disable();
    113	__pdc_cpu_rendezvous();
    114	for (;;)
    115		;
    116}
    117
    118
    119irqreturn_t __irq_entry
    120ipi_interrupt(int irq, void *dev_id) 
    121{
    122	int this_cpu = smp_processor_id();
    123	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
    124	unsigned long ops;
    125	unsigned long flags;
    126
    127	for (;;) {
    128		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
    129		spin_lock_irqsave(lock, flags);
    130		ops = p->pending_ipi;
    131		p->pending_ipi = 0;
    132		spin_unlock_irqrestore(lock, flags);
    133
    134		mb(); /* Order bit clearing and data access. */
    135
    136		if (!ops)
    137		    break;
    138
    139		while (ops) {
    140			unsigned long which = ffz(~ops);
    141
    142			ops &= ~(1 << which);
    143
    144			switch (which) {
    145			case IPI_NOP:
    146				smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
    147				break;
    148				
    149			case IPI_RESCHEDULE:
    150				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
    151				inc_irq_stat(irq_resched_count);
    152				scheduler_ipi();
    153				break;
    154
    155			case IPI_CALL_FUNC:
    156				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
    157				inc_irq_stat(irq_call_count);
    158				generic_smp_call_function_interrupt();
    159				break;
    160
    161			case IPI_CPU_START:
    162				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
    163				break;
    164
    165			case IPI_CPU_STOP:
    166				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
    167				halt_processor();
    168				break;
    169
    170			case IPI_CPU_TEST:
    171				smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
    172				break;
    173#ifdef CONFIG_KGDB
    174			case IPI_ENTER_KGDB:
    175				smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu);
    176				kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
    177				break;
    178#endif
    179			default:
    180				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
    181					this_cpu, which);
    182				return IRQ_NONE;
    183			} /* Switch */
    184
    185			/* before doing more, let in any pending interrupts */
    186			if (ops) {
    187				local_irq_enable();
    188				local_irq_disable();
    189			}
    190		} /* while (ops) */
    191	}
    192	return IRQ_HANDLED;
    193}
    194
    195
    196static inline void
    197ipi_send(int cpu, enum ipi_message_type op)
    198{
    199	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
    200	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
    201	unsigned long flags;
    202
    203	spin_lock_irqsave(lock, flags);
    204	p->pending_ipi |= 1 << op;
    205	gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
    206	spin_unlock_irqrestore(lock, flags);
    207}
    208
    209static void
    210send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
    211{
    212	int cpu;
    213
    214	for_each_cpu(cpu, mask)
    215		ipi_send(cpu, op);
    216}
    217
    218static inline void
    219send_IPI_single(int dest_cpu, enum ipi_message_type op)
    220{
    221	BUG_ON(dest_cpu == NO_PROC_ID);
    222
    223	ipi_send(dest_cpu, op);
    224}
    225
    226static inline void
    227send_IPI_allbutself(enum ipi_message_type op)
    228{
    229	int i;
    230
    231	preempt_disable();
    232	for_each_online_cpu(i) {
    233		if (i != smp_processor_id())
    234			send_IPI_single(i, op);
    235	}
    236	preempt_enable();
    237}
    238
    239#ifdef CONFIG_KGDB
    240void kgdb_roundup_cpus(void)
    241{
    242	send_IPI_allbutself(IPI_ENTER_KGDB);
    243}
    244#endif
    245
    246inline void 
    247smp_send_stop(void)	{ send_IPI_allbutself(IPI_CPU_STOP); }
    248
    249void 
    250smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
    251
    252void
    253smp_send_all_nop(void)
    254{
    255	send_IPI_allbutself(IPI_NOP);
    256}
    257
    258void arch_send_call_function_ipi_mask(const struct cpumask *mask)
    259{
    260	send_IPI_mask(mask, IPI_CALL_FUNC);
    261}
    262
    263void arch_send_call_function_single_ipi(int cpu)
    264{
    265	send_IPI_single(cpu, IPI_CALL_FUNC);
    266}
    267
    268/*
    269 * Called by secondaries to update state and initialize CPU registers.
    270 */
    271static void
    272smp_cpu_init(int cpunum)
    273{
    274	extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
    275	extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
    276
    277	/* Set modes and Enable floating point coprocessor */
    278	init_per_cpu(cpunum);
    279
    280	disable_sr_hashing();
    281
    282	mb();
    283
    284	/* Well, support 2.4 linux scheme as well. */
    285	if (cpu_online(cpunum))	{
    286		extern void machine_halt(void); /* arch/parisc.../process.c */
    287
    288		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
    289		machine_halt();
    290	}
    291
    292	notify_cpu_starting(cpunum);
    293
    294	set_cpu_online(cpunum, true);
    295
    296	/* Initialise the idle task for this CPU */
    297	mmgrab(&init_mm);
    298	current->active_mm = &init_mm;
    299	BUG_ON(current->mm);
    300	enter_lazy_tlb(&init_mm, current);
    301
    302	init_IRQ();   /* make sure no IRQs are enabled or pending */
    303	start_cpu_itimer();
    304}
    305
    306
    307/*
    308 * Slaves start using C here. Indirectly called from smp_slave_stext.
    309 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
    310 */
    311void smp_callin(unsigned long pdce_proc)
    312{
    313	int slave_id = cpu_now_booting;
    314
    315#ifdef CONFIG_64BIT
    316	WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
    317			| PAGE0->mem_pdc) != pdce_proc);
    318#endif
    319
    320	smp_cpu_init(slave_id);
    321
    322	flush_cache_all_local(); /* start with known state */
    323	flush_tlb_all_local(NULL);
    324
    325	local_irq_enable();  /* Interrupts have been off until now */
    326
    327	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
    328
    329	/* NOTREACHED */
    330	panic("smp_callin() AAAAaaaaahhhh....\n");
    331}
    332
    333/*
    334 * Bring one cpu online.
    335 */
    336static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
    337{
    338	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
    339	long timeout;
    340
    341#ifdef CONFIG_HOTPLUG_CPU
    342	int i;
    343
    344	/* reset irq statistics for this CPU */
    345	memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
    346	for (i = 0; i < NR_IRQS; i++) {
    347		struct irq_desc *desc = irq_to_desc(i);
    348
    349		if (desc && desc->kstat_irqs)
    350			*per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
    351	}
    352#endif
    353
    354	/* wait until last booting CPU has started. */
    355	while (cpu_now_booting)
    356		;
    357
    358	/* Let _start know what logical CPU we're booting
    359	** (offset into init_tasks[],cpu_data[])
    360	*/
    361	cpu_now_booting = cpuid;
    362
    363	/* 
    364	** boot strap code needs to know the task address since
    365	** it also contains the process stack.
    366	*/
    367	smp_init_current_idle_task = idle ;
    368	mb();
    369
    370	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
    371
    372	/*
    373	** This gets PDC to release the CPU from a very tight loop.
    374	**
    375	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
    376	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 
    377	** is executed after receiving the rendezvous signal (an interrupt to 
    378	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 
    379	** contents of memory are valid."
    380	*/
    381	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
    382	mb();
    383
    384	/* 
    385	 * OK, wait a bit for that CPU to finish staggering about. 
    386	 * Slave will set a bit when it reaches smp_cpu_init().
    387	 * Once the "monarch CPU" sees the bit change, it can move on.
    388	 */
    389	for (timeout = 0; timeout < 10000; timeout++) {
    390		if(cpu_online(cpuid)) {
    391			/* Which implies Slave has started up */
    392			cpu_now_booting = 0;
    393			goto alive ;
    394		}
    395		udelay(100);
    396		barrier();
    397	}
    398	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
    399	return -1;
    400
    401alive:
    402	/* Remember the Slave data */
    403	smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
    404		cpuid, timeout * 100);
    405	return 0;
    406}
    407
    408void __init smp_prepare_boot_cpu(void)
    409{
    410	int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
    411
    412	/* Setup BSP mappings */
    413	printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
    414
    415	set_cpu_online(bootstrap_processor, true);
    416	set_cpu_present(bootstrap_processor, true);
    417}
    418
    419
    420
    421/*
    422** inventory.c:do_inventory() hasn't yet been run and thus we
    423** don't 'discover' the additional CPUs until later.
    424*/
    425void __init smp_prepare_cpus(unsigned int max_cpus)
    426{
    427	int cpu;
    428
    429	for_each_possible_cpu(cpu)
    430		spin_lock_init(&per_cpu(ipi_lock, cpu));
    431
    432	init_cpu_present(cpumask_of(0));
    433}
    434
    435
    436void __init smp_cpus_done(unsigned int cpu_max)
    437{
    438}
    439
    440
    441int __cpu_up(unsigned int cpu, struct task_struct *tidle)
    442{
    443	if (cpu_online(cpu))
    444		return 0;
    445
    446	if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
    447		return -EIO;
    448
    449	return cpu_online(cpu) ? 0 : -EIO;
    450}
    451
    452/*
    453 * __cpu_disable runs on the processor to be shutdown.
    454 */
    455int __cpu_disable(void)
    456{
    457#ifdef CONFIG_HOTPLUG_CPU
    458	unsigned int cpu = smp_processor_id();
    459
    460	remove_cpu_topology(cpu);
    461
    462	/*
    463	 * Take this CPU offline.  Once we clear this, we can't return,
    464	 * and we must not schedule until we're ready to give up the cpu.
    465	 */
    466	set_cpu_online(cpu, false);
    467
    468	/* Find a new timesync master */
    469	if (cpu == time_keeper_id) {
    470		time_keeper_id = cpumask_first(cpu_online_mask);
    471		pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
    472	}
    473
    474	disable_percpu_irq(IPI_IRQ);
    475
    476	irq_migrate_all_off_this_cpu();
    477
    478	flush_cache_all_local();
    479	flush_tlb_all_local(NULL);
    480
    481	/* disable all irqs, including timer irq */
    482	local_irq_disable();
    483
    484	/* wait for next timer irq ... */
    485	mdelay(1000/HZ+100);
    486
    487	/* ... and then clear all pending external irqs */
    488	set_eiem(0);
    489	mtctl(~0UL, CR_EIRR);
    490	mfctl(CR_EIRR);
    491	mtctl(0, CR_EIRR);
    492#endif
    493	return 0;
    494}
    495
    496/*
    497 * called on the thread which is asking for a CPU to be shutdown -
    498 * waits until shutdown has completed, or it is timed out.
    499 */
    500void __cpu_die(unsigned int cpu)
    501{
    502	pdc_cpu_rendezvous_lock();
    503
    504	if (!cpu_wait_death(cpu, 5)) {
    505		pr_crit("CPU%u: cpu didn't die\n", cpu);
    506		return;
    507	}
    508	pr_info("CPU%u: is shutting down\n", cpu);
    509
    510	/* set task's state to interruptible sleep */
    511	set_current_state(TASK_INTERRUPTIBLE);
    512	schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
    513
    514	pdc_cpu_rendezvous_unlock();
    515}
    516
    517#ifdef CONFIG_PROC_FS
    518int setup_profiling_timer(unsigned int multiplier)
    519{
    520	return -EINVAL;
    521}
    522#endif