cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smp.c (7064B)


      1/*
      2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
      3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
      4 *
      5 * Based on arm64 and arc implementations
      6 * Copyright (C) 2013 ARM Ltd.
      7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
      8 *
      9 * This file is licensed under the terms of the GNU General Public License
     10 * version 2.  This program is licensed "as is" without any warranty of any
     11 * kind, whether express or implied.
     12 */
     13
     14#include <linux/smp.h>
     15#include <linux/cpu.h>
     16#include <linux/sched.h>
     17#include <linux/sched/mm.h>
     18#include <linux/irq.h>
     19#include <linux/of.h>
     20#include <asm/cpuinfo.h>
     21#include <asm/mmu_context.h>
     22#include <asm/tlbflush.h>
     23#include <asm/cacheflush.h>
     24#include <asm/time.h>
     25
     26static void (*smp_cross_call)(const struct cpumask *, unsigned int);
     27
     28unsigned long secondary_release = -1;
     29struct thread_info *secondary_thread_info;
     30
     31enum ipi_msg_type {
     32	IPI_WAKEUP,
     33	IPI_RESCHEDULE,
     34	IPI_CALL_FUNC,
     35	IPI_CALL_FUNC_SINGLE,
     36};
     37
     38static DEFINE_SPINLOCK(boot_lock);
     39
     40static void boot_secondary(unsigned int cpu, struct task_struct *idle)
     41{
     42	/*
     43	 * set synchronisation state between this boot processor
     44	 * and the secondary one
     45	 */
     46	spin_lock(&boot_lock);
     47
     48	secondary_release = cpu;
     49	smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
     50
     51	/*
     52	 * now the secondary core is starting up let it run its
     53	 * calibrations, then wait for it to finish
     54	 */
     55	spin_unlock(&boot_lock);
     56}
     57
     58void __init smp_prepare_boot_cpu(void)
     59{
     60}
     61
     62void __init smp_init_cpus(void)
     63{
     64	struct device_node *cpu;
     65	u32 cpu_id;
     66
     67	for_each_of_cpu_node(cpu) {
     68		cpu_id = of_get_cpu_hwid(cpu, 0);
     69		if (cpu_id < NR_CPUS)
     70			set_cpu_possible(cpu_id, true);
     71	}
     72}
     73
     74void __init smp_prepare_cpus(unsigned int max_cpus)
     75{
     76	unsigned int cpu;
     77
     78	/*
     79	 * Initialise the present map, which describes the set of CPUs
     80	 * actually populated at the present time.
     81	 */
     82	for_each_possible_cpu(cpu) {
     83		if (cpu < max_cpus)
     84			set_cpu_present(cpu, true);
     85	}
     86}
     87
     88void __init smp_cpus_done(unsigned int max_cpus)
     89{
     90}
     91
     92static DECLARE_COMPLETION(cpu_running);
     93
     94int __cpu_up(unsigned int cpu, struct task_struct *idle)
     95{
     96	if (smp_cross_call == NULL) {
     97		pr_warn("CPU%u: failed to start, IPI controller missing",
     98			cpu);
     99		return -EIO;
    100	}
    101
    102	secondary_thread_info = task_thread_info(idle);
    103	current_pgd[cpu] = init_mm.pgd;
    104
    105	boot_secondary(cpu, idle);
    106	if (!wait_for_completion_timeout(&cpu_running,
    107					msecs_to_jiffies(1000))) {
    108		pr_crit("CPU%u: failed to start\n", cpu);
    109		return -EIO;
    110	}
    111	synchronise_count_master(cpu);
    112
    113	return 0;
    114}
    115
    116asmlinkage __init void secondary_start_kernel(void)
    117{
    118	struct mm_struct *mm = &init_mm;
    119	unsigned int cpu = smp_processor_id();
    120	/*
    121	 * All kernel threads share the same mm context; grab a
    122	 * reference and switch to it.
    123	 */
    124	mmgrab(mm);
    125	current->active_mm = mm;
    126	cpumask_set_cpu(cpu, mm_cpumask(mm));
    127
    128	pr_info("CPU%u: Booted secondary processor\n", cpu);
    129
    130	setup_cpuinfo();
    131	openrisc_clockevent_init();
    132
    133	notify_cpu_starting(cpu);
    134
    135	/*
    136	 * OK, now it's safe to let the boot CPU continue
    137	 */
    138	complete(&cpu_running);
    139
    140	synchronise_count_slave(cpu);
    141	set_cpu_online(cpu, true);
    142
    143	local_irq_enable();
    144	/*
    145	 * OK, it's off to the idle thread for us
    146	 */
    147	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
    148}
    149
    150void handle_IPI(unsigned int ipi_msg)
    151{
    152	unsigned int cpu = smp_processor_id();
    153
    154	switch (ipi_msg) {
    155	case IPI_WAKEUP:
    156		break;
    157
    158	case IPI_RESCHEDULE:
    159		scheduler_ipi();
    160		break;
    161
    162	case IPI_CALL_FUNC:
    163		generic_smp_call_function_interrupt();
    164		break;
    165
    166	case IPI_CALL_FUNC_SINGLE:
    167		generic_smp_call_function_single_interrupt();
    168		break;
    169
    170	default:
    171		WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
    172		break;
    173	}
    174}
    175
    176void smp_send_reschedule(int cpu)
    177{
    178	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
    179}
    180
    181static void stop_this_cpu(void *dummy)
    182{
    183	/* Remove this CPU */
    184	set_cpu_online(smp_processor_id(), false);
    185
    186	local_irq_disable();
    187	/* CPU Doze */
    188	if (mfspr(SPR_UPR) & SPR_UPR_PMP)
    189		mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
    190	/* If that didn't work, infinite loop */
    191	while (1)
    192		;
    193}
    194
    195void smp_send_stop(void)
    196{
    197	smp_call_function(stop_this_cpu, NULL, 0);
    198}
    199
    200/* not supported, yet */
    201int setup_profiling_timer(unsigned int multiplier)
    202{
    203	return -EINVAL;
    204}
    205
    206void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
    207{
    208	smp_cross_call = fn;
    209}
    210
    211void arch_send_call_function_single_ipi(int cpu)
    212{
    213	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
    214}
    215
    216void arch_send_call_function_ipi_mask(const struct cpumask *mask)
    217{
    218	smp_cross_call(mask, IPI_CALL_FUNC);
    219}
    220
    221/* TLB flush operations - Performed on each CPU*/
    222static inline void ipi_flush_tlb_all(void *ignored)
    223{
    224	local_flush_tlb_all();
    225}
    226
    227static inline void ipi_flush_tlb_mm(void *info)
    228{
    229	struct mm_struct *mm = (struct mm_struct *)info;
    230
    231	local_flush_tlb_mm(mm);
    232}
    233
    234static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
    235{
    236	unsigned int cpuid;
    237
    238	if (cpumask_empty(cmask))
    239		return;
    240
    241	cpuid = get_cpu();
    242
    243	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
    244		/* local cpu is the only cpu present in cpumask */
    245		local_flush_tlb_mm(mm);
    246	} else {
    247		on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
    248	}
    249	put_cpu();
    250}
    251
    252struct flush_tlb_data {
    253	unsigned long addr1;
    254	unsigned long addr2;
    255};
    256
    257static inline void ipi_flush_tlb_page(void *info)
    258{
    259	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
    260
    261	local_flush_tlb_page(NULL, fd->addr1);
    262}
    263
    264static inline void ipi_flush_tlb_range(void *info)
    265{
    266	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
    267
    268	local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
    269}
    270
    271static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
    272				unsigned long end)
    273{
    274	unsigned int cpuid;
    275
    276	if (cpumask_empty(cmask))
    277		return;
    278
    279	cpuid = get_cpu();
    280
    281	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
    282		/* local cpu is the only cpu present in cpumask */
    283		if ((end - start) <= PAGE_SIZE)
    284			local_flush_tlb_page(NULL, start);
    285		else
    286			local_flush_tlb_range(NULL, start, end);
    287	} else {
    288		struct flush_tlb_data fd;
    289
    290		fd.addr1 = start;
    291		fd.addr2 = end;
    292
    293		if ((end - start) <= PAGE_SIZE)
    294			on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
    295		else
    296			on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
    297	}
    298	put_cpu();
    299}
    300
    301void flush_tlb_all(void)
    302{
    303	on_each_cpu(ipi_flush_tlb_all, NULL, 1);
    304}
    305
    306void flush_tlb_mm(struct mm_struct *mm)
    307{
    308	smp_flush_tlb_mm(mm_cpumask(mm), mm);
    309}
    310
    311void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
    312{
    313	smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
    314}
    315
    316void flush_tlb_range(struct vm_area_struct *vma,
    317		     unsigned long start, unsigned long end)
    318{
    319	const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
    320					  : cpu_online_mask;
    321	smp_flush_tlb_range(cmask, start, end);
    322}
    323
    324/* Instruction cache invalidate - performed on each cpu */
    325static void ipi_icache_page_inv(void *arg)
    326{
    327	struct page *page = arg;
    328
    329	local_icache_page_inv(page);
    330}
    331
    332void smp_icache_page_inv(struct page *page)
    333{
    334	on_each_cpu(ipi_icache_page_inv, page, 1);
    335}
    336EXPORT_SYMBOL(smp_icache_page_inv);