cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smp.c (13699B)


      1/*
      2 * Xtensa SMP support functions.
      3 *
      4 * This file is subject to the terms and conditions of the GNU General Public
      5 * License.  See the file "COPYING" in the main directory of this archive
      6 * for more details.
      7 *
      8 * Copyright (C) 2008 - 2013 Tensilica Inc.
      9 *
     10 * Chris Zankel <chris@zankel.net>
     11 * Joe Taylor <joe@tensilica.com>
     12 * Pete Delaney <piet@tensilica.com
     13 */
     14
     15#include <linux/cpu.h>
     16#include <linux/cpumask.h>
     17#include <linux/delay.h>
     18#include <linux/init.h>
     19#include <linux/interrupt.h>
     20#include <linux/irqdomain.h>
     21#include <linux/irq.h>
     22#include <linux/kdebug.h>
     23#include <linux/module.h>
     24#include <linux/sched/mm.h>
     25#include <linux/sched/hotplug.h>
     26#include <linux/sched/task_stack.h>
     27#include <linux/reboot.h>
     28#include <linux/seq_file.h>
     29#include <linux/smp.h>
     30#include <linux/thread_info.h>
     31
     32#include <asm/cacheflush.h>
     33#include <asm/coprocessor.h>
     34#include <asm/kdebug.h>
     35#include <asm/mmu_context.h>
     36#include <asm/mxregs.h>
     37#include <asm/platform.h>
     38#include <asm/tlbflush.h>
     39#include <asm/traps.h>
     40
     41#ifdef CONFIG_SMP
     42# if XCHAL_HAVE_S32C1I == 0
     43#  error "The S32C1I option is required for SMP."
     44# endif
     45#endif
     46
     47static void system_invalidate_dcache_range(unsigned long start,
     48		unsigned long size);
     49static void system_flush_invalidate_dcache_range(unsigned long start,
     50		unsigned long size);
     51
     52/* IPI (Inter Process Interrupt) */
     53
     54#define IPI_IRQ	0
     55
     56static irqreturn_t ipi_interrupt(int irq, void *dev_id);
     57
     58void ipi_init(void)
     59{
     60	unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
     61	if (request_irq(irq, ipi_interrupt, IRQF_PERCPU, "ipi", NULL))
     62		pr_err("Failed to request irq %u (ipi)\n", irq);
     63}
     64
     65static inline unsigned int get_core_count(void)
     66{
     67	/* Bits 18..21 of SYSCFGID contain the core count minus 1. */
     68	unsigned int syscfgid = get_er(SYSCFGID);
     69	return ((syscfgid >> 18) & 0xf) + 1;
     70}
     71
     72static inline int get_core_id(void)
     73{
     74	/* Bits 0...18 of SYSCFGID contain the core id  */
     75	unsigned int core_id = get_er(SYSCFGID);
     76	return core_id & 0x3fff;
     77}
     78
     79void __init smp_prepare_cpus(unsigned int max_cpus)
     80{
     81	unsigned i;
     82
     83	for_each_possible_cpu(i)
     84		set_cpu_present(i, true);
     85}
     86
     87void __init smp_init_cpus(void)
     88{
     89	unsigned i;
     90	unsigned int ncpus = get_core_count();
     91	unsigned int core_id = get_core_id();
     92
     93	pr_info("%s: Core Count = %d\n", __func__, ncpus);
     94	pr_info("%s: Core Id = %d\n", __func__, core_id);
     95
     96	if (ncpus > NR_CPUS) {
     97		ncpus = NR_CPUS;
     98		pr_info("%s: limiting core count by %d\n", __func__, ncpus);
     99	}
    100
    101	for (i = 0; i < ncpus; ++i)
    102		set_cpu_possible(i, true);
    103}
    104
    105void __init smp_prepare_boot_cpu(void)
    106{
    107	unsigned int cpu = smp_processor_id();
    108	BUG_ON(cpu != 0);
    109	cpu_asid_cache(cpu) = ASID_USER_FIRST;
    110}
    111
    112void __init smp_cpus_done(unsigned int max_cpus)
    113{
    114}
    115
    116static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
    117static DECLARE_COMPLETION(cpu_running);
    118
    119void secondary_start_kernel(void)
    120{
    121	struct mm_struct *mm = &init_mm;
    122	unsigned int cpu = smp_processor_id();
    123
    124	init_mmu();
    125
    126#ifdef CONFIG_DEBUG_MISC
    127	if (boot_secondary_processors == 0) {
    128		pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
    129			__func__, boot_secondary_processors, cpu);
    130		for (;;)
    131			__asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
    132	}
    133
    134	pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
    135		__func__, boot_secondary_processors, cpu);
    136#endif
    137	/* Init EXCSAVE1 */
    138
    139	secondary_trap_init();
    140
    141	/* All kernel threads share the same mm context. */
    142
    143	mmget(mm);
    144	mmgrab(mm);
    145	current->active_mm = mm;
    146	cpumask_set_cpu(cpu, mm_cpumask(mm));
    147	enter_lazy_tlb(mm, current);
    148
    149	trace_hardirqs_off();
    150
    151	calibrate_delay();
    152
    153	notify_cpu_starting(cpu);
    154
    155	secondary_init_irq();
    156	local_timer_setup(cpu);
    157
    158	set_cpu_online(cpu, true);
    159
    160	local_irq_enable();
    161
    162	complete(&cpu_running);
    163
    164	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
    165}
    166
    167static void mx_cpu_start(void *p)
    168{
    169	unsigned cpu = (unsigned)p;
    170	unsigned long run_stall_mask = get_er(MPSCORE);
    171
    172	set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
    173	pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
    174			__func__, cpu, run_stall_mask, get_er(MPSCORE));
    175}
    176
    177static void mx_cpu_stop(void *p)
    178{
    179	unsigned cpu = (unsigned)p;
    180	unsigned long run_stall_mask = get_er(MPSCORE);
    181
    182	set_er(run_stall_mask | (1u << cpu), MPSCORE);
    183	pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
    184			__func__, cpu, run_stall_mask, get_er(MPSCORE));
    185}
    186
    187#ifdef CONFIG_HOTPLUG_CPU
    188unsigned long cpu_start_id __cacheline_aligned;
    189#endif
    190unsigned long cpu_start_ccount;
    191
    192static int boot_secondary(unsigned int cpu, struct task_struct *ts)
    193{
    194	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
    195	unsigned long ccount;
    196	int i;
    197
    198#ifdef CONFIG_HOTPLUG_CPU
    199	WRITE_ONCE(cpu_start_id, cpu);
    200	/* Pairs with the third memw in the cpu_restart */
    201	mb();
    202	system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
    203					     sizeof(cpu_start_id));
    204#endif
    205	smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
    206
    207	for (i = 0; i < 2; ++i) {
    208		do
    209			ccount = get_ccount();
    210		while (!ccount);
    211
    212		WRITE_ONCE(cpu_start_ccount, ccount);
    213
    214		do {
    215			/*
    216			 * Pairs with the first two memws in the
    217			 * .Lboot_secondary.
    218			 */
    219			mb();
    220			ccount = READ_ONCE(cpu_start_ccount);
    221		} while (ccount && time_before(jiffies, timeout));
    222
    223		if (ccount) {
    224			smp_call_function_single(0, mx_cpu_stop,
    225						 (void *)cpu, 1);
    226			WRITE_ONCE(cpu_start_ccount, 0);
    227			return -EIO;
    228		}
    229	}
    230	return 0;
    231}
    232
    233int __cpu_up(unsigned int cpu, struct task_struct *idle)
    234{
    235	int ret = 0;
    236
    237	if (cpu_asid_cache(cpu) == 0)
    238		cpu_asid_cache(cpu) = ASID_USER_FIRST;
    239
    240	start_info.stack = (unsigned long)task_pt_regs(idle);
    241	wmb();
    242
    243	pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
    244			__func__, cpu, idle, start_info.stack);
    245
    246	init_completion(&cpu_running);
    247	ret = boot_secondary(cpu, idle);
    248	if (ret == 0) {
    249		wait_for_completion_timeout(&cpu_running,
    250				msecs_to_jiffies(1000));
    251		if (!cpu_online(cpu))
    252			ret = -EIO;
    253	}
    254
    255	if (ret)
    256		pr_err("CPU %u failed to boot\n", cpu);
    257
    258	return ret;
    259}
    260
    261#ifdef CONFIG_HOTPLUG_CPU
    262
    263/*
    264 * __cpu_disable runs on the processor to be shutdown.
    265 */
    266int __cpu_disable(void)
    267{
    268	unsigned int cpu = smp_processor_id();
    269
    270	/*
    271	 * Take this CPU offline.  Once we clear this, we can't return,
    272	 * and we must not schedule until we're ready to give up the cpu.
    273	 */
    274	set_cpu_online(cpu, false);
    275
    276#if XTENSA_HAVE_COPROCESSORS
    277	/*
    278	 * Flush coprocessor contexts that are active on the current CPU.
    279	 */
    280	local_coprocessors_flush_release_all();
    281#endif
    282	/*
    283	 * OK - migrate IRQs away from this CPU
    284	 */
    285	migrate_irqs();
    286
    287	/*
    288	 * Flush user cache and TLB mappings, and then remove this CPU
    289	 * from the vm mask set of all processes.
    290	 */
    291	local_flush_cache_all();
    292	local_flush_tlb_all();
    293	invalidate_page_directory();
    294
    295	clear_tasks_mm_cpumask(cpu);
    296
    297	return 0;
    298}
    299
    300static void platform_cpu_kill(unsigned int cpu)
    301{
    302	smp_call_function_single(0, mx_cpu_stop, (void *)cpu, true);
    303}
    304
    305/*
    306 * called on the thread which is asking for a CPU to be shutdown -
    307 * waits until shutdown has completed, or it is timed out.
    308 */
    309void __cpu_die(unsigned int cpu)
    310{
    311	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
    312	while (time_before(jiffies, timeout)) {
    313		system_invalidate_dcache_range((unsigned long)&cpu_start_id,
    314					       sizeof(cpu_start_id));
    315		/* Pairs with the second memw in the cpu_restart */
    316		mb();
    317		if (READ_ONCE(cpu_start_id) == -cpu) {
    318			platform_cpu_kill(cpu);
    319			return;
    320		}
    321	}
    322	pr_err("CPU%u: unable to kill\n", cpu);
    323}
    324
    325void arch_cpu_idle_dead(void)
    326{
    327	cpu_die();
    328}
    329/*
    330 * Called from the idle thread for the CPU which has been shutdown.
    331 *
    332 * Note that we disable IRQs here, but do not re-enable them
    333 * before returning to the caller. This is also the behaviour
    334 * of the other hotplug-cpu capable cores, so presumably coming
    335 * out of idle fixes this.
    336 */
    337void __ref cpu_die(void)
    338{
    339	idle_task_exit();
    340	local_irq_disable();
    341	__asm__ __volatile__(
    342			"	movi	a2, cpu_restart\n"
    343			"	jx	a2\n");
    344}
    345
    346#endif /* CONFIG_HOTPLUG_CPU */
    347
    348enum ipi_msg_type {
    349	IPI_RESCHEDULE = 0,
    350	IPI_CALL_FUNC,
    351	IPI_CPU_STOP,
    352	IPI_MAX
    353};
    354
    355static const struct {
    356	const char *short_text;
    357	const char *long_text;
    358} ipi_text[] = {
    359	{ .short_text = "RES", .long_text = "Rescheduling interrupts" },
    360	{ .short_text = "CAL", .long_text = "Function call interrupts" },
    361	{ .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
    362};
    363
    364struct ipi_data {
    365	unsigned long ipi_count[IPI_MAX];
    366};
    367
    368static DEFINE_PER_CPU(struct ipi_data, ipi_data);
    369
    370static void send_ipi_message(const struct cpumask *callmask,
    371		enum ipi_msg_type msg_id)
    372{
    373	int index;
    374	unsigned long mask = 0;
    375
    376	for_each_cpu(index, callmask)
    377		mask |= 1 << index;
    378
    379	set_er(mask, MIPISET(msg_id));
    380}
    381
    382void arch_send_call_function_ipi_mask(const struct cpumask *mask)
    383{
    384	send_ipi_message(mask, IPI_CALL_FUNC);
    385}
    386
    387void arch_send_call_function_single_ipi(int cpu)
    388{
    389	send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
    390}
    391
    392void smp_send_reschedule(int cpu)
    393{
    394	send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
    395}
    396
    397void smp_send_stop(void)
    398{
    399	struct cpumask targets;
    400
    401	cpumask_copy(&targets, cpu_online_mask);
    402	cpumask_clear_cpu(smp_processor_id(), &targets);
    403	send_ipi_message(&targets, IPI_CPU_STOP);
    404}
    405
    406static void ipi_cpu_stop(unsigned int cpu)
    407{
    408	set_cpu_online(cpu, false);
    409	machine_halt();
    410}
    411
    412irqreturn_t ipi_interrupt(int irq, void *dev_id)
    413{
    414	unsigned int cpu = smp_processor_id();
    415	struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
    416
    417	for (;;) {
    418		unsigned int msg;
    419
    420		msg = get_er(MIPICAUSE(cpu));
    421		set_er(msg, MIPICAUSE(cpu));
    422
    423		if (!msg)
    424			break;
    425
    426		if (msg & (1 << IPI_CALL_FUNC)) {
    427			++ipi->ipi_count[IPI_CALL_FUNC];
    428			generic_smp_call_function_interrupt();
    429		}
    430
    431		if (msg & (1 << IPI_RESCHEDULE)) {
    432			++ipi->ipi_count[IPI_RESCHEDULE];
    433			scheduler_ipi();
    434		}
    435
    436		if (msg & (1 << IPI_CPU_STOP)) {
    437			++ipi->ipi_count[IPI_CPU_STOP];
    438			ipi_cpu_stop(cpu);
    439		}
    440	}
    441
    442	return IRQ_HANDLED;
    443}
    444
    445void show_ipi_list(struct seq_file *p, int prec)
    446{
    447	unsigned int cpu;
    448	unsigned i;
    449
    450	for (i = 0; i < IPI_MAX; ++i) {
    451		seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
    452		for_each_online_cpu(cpu)
    453			seq_printf(p, " %10lu",
    454					per_cpu(ipi_data, cpu).ipi_count[i]);
    455		seq_printf(p, "   %s\n", ipi_text[i].long_text);
    456	}
    457}
    458
    459int setup_profiling_timer(unsigned int multiplier)
    460{
    461	pr_debug("setup_profiling_timer %d\n", multiplier);
    462	return 0;
    463}
    464
    465/* TLB flush functions */
    466
    467struct flush_data {
    468	struct vm_area_struct *vma;
    469	unsigned long addr1;
    470	unsigned long addr2;
    471};
    472
    473static void ipi_flush_tlb_all(void *arg)
    474{
    475	local_flush_tlb_all();
    476}
    477
    478void flush_tlb_all(void)
    479{
    480	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
    481}
    482
    483static void ipi_flush_tlb_mm(void *arg)
    484{
    485	local_flush_tlb_mm(arg);
    486}
    487
    488void flush_tlb_mm(struct mm_struct *mm)
    489{
    490	on_each_cpu(ipi_flush_tlb_mm, mm, 1);
    491}
    492
    493static void ipi_flush_tlb_page(void *arg)
    494{
    495	struct flush_data *fd = arg;
    496	local_flush_tlb_page(fd->vma, fd->addr1);
    497}
    498
    499void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
    500{
    501	struct flush_data fd = {
    502		.vma = vma,
    503		.addr1 = addr,
    504	};
    505	on_each_cpu(ipi_flush_tlb_page, &fd, 1);
    506}
    507
    508static void ipi_flush_tlb_range(void *arg)
    509{
    510	struct flush_data *fd = arg;
    511	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
    512}
    513
    514void flush_tlb_range(struct vm_area_struct *vma,
    515		     unsigned long start, unsigned long end)
    516{
    517	struct flush_data fd = {
    518		.vma = vma,
    519		.addr1 = start,
    520		.addr2 = end,
    521	};
    522	on_each_cpu(ipi_flush_tlb_range, &fd, 1);
    523}
    524
    525static void ipi_flush_tlb_kernel_range(void *arg)
    526{
    527	struct flush_data *fd = arg;
    528	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
    529}
    530
    531void flush_tlb_kernel_range(unsigned long start, unsigned long end)
    532{
    533	struct flush_data fd = {
    534		.addr1 = start,
    535		.addr2 = end,
    536	};
    537	on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
    538}
    539
    540/* Cache flush functions */
    541
    542static void ipi_flush_cache_all(void *arg)
    543{
    544	local_flush_cache_all();
    545}
    546
    547void flush_cache_all(void)
    548{
    549	on_each_cpu(ipi_flush_cache_all, NULL, 1);
    550}
    551
    552static void ipi_flush_cache_page(void *arg)
    553{
    554	struct flush_data *fd = arg;
    555	local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
    556}
    557
    558void flush_cache_page(struct vm_area_struct *vma,
    559		     unsigned long address, unsigned long pfn)
    560{
    561	struct flush_data fd = {
    562		.vma = vma,
    563		.addr1 = address,
    564		.addr2 = pfn,
    565	};
    566	on_each_cpu(ipi_flush_cache_page, &fd, 1);
    567}
    568
    569static void ipi_flush_cache_range(void *arg)
    570{
    571	struct flush_data *fd = arg;
    572	local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
    573}
    574
    575void flush_cache_range(struct vm_area_struct *vma,
    576		     unsigned long start, unsigned long end)
    577{
    578	struct flush_data fd = {
    579		.vma = vma,
    580		.addr1 = start,
    581		.addr2 = end,
    582	};
    583	on_each_cpu(ipi_flush_cache_range, &fd, 1);
    584}
    585
    586static void ipi_flush_icache_range(void *arg)
    587{
    588	struct flush_data *fd = arg;
    589	local_flush_icache_range(fd->addr1, fd->addr2);
    590}
    591
    592void flush_icache_range(unsigned long start, unsigned long end)
    593{
    594	struct flush_data fd = {
    595		.addr1 = start,
    596		.addr2 = end,
    597	};
    598	on_each_cpu(ipi_flush_icache_range, &fd, 1);
    599}
    600EXPORT_SYMBOL(flush_icache_range);
    601
    602/* ------------------------------------------------------------------------- */
    603
    604static void ipi_invalidate_dcache_range(void *arg)
    605{
    606	struct flush_data *fd = arg;
    607	__invalidate_dcache_range(fd->addr1, fd->addr2);
    608}
    609
    610static void system_invalidate_dcache_range(unsigned long start,
    611		unsigned long size)
    612{
    613	struct flush_data fd = {
    614		.addr1 = start,
    615		.addr2 = size,
    616	};
    617	on_each_cpu(ipi_invalidate_dcache_range, &fd, 1);
    618}
    619
    620static void ipi_flush_invalidate_dcache_range(void *arg)
    621{
    622	struct flush_data *fd = arg;
    623	__flush_invalidate_dcache_range(fd->addr1, fd->addr2);
    624}
    625
    626static void system_flush_invalidate_dcache_range(unsigned long start,
    627		unsigned long size)
    628{
    629	struct flush_data fd = {
    630		.addr1 = start,
    631		.addr2 = size,
    632	};
    633	on_each_cpu(ipi_flush_invalidate_dcache_range, &fd, 1);
    634}