cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

idle.c (5975B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * MIPS idle loop and WAIT instruction support.
      4 *
      5 * Copyright (C) xxxx  the Anonymous
      6 * Copyright (C) 1994 - 2006 Ralf Baechle
      7 * Copyright (C) 2003, 2004  Maciej W. Rozycki
      8 * Copyright (C) 2001, 2004, 2011, 2012	 MIPS Technologies, Inc.
      9 */
     10#include <linux/cpu.h>
     11#include <linux/export.h>
     12#include <linux/init.h>
     13#include <linux/irqflags.h>
     14#include <linux/printk.h>
     15#include <linux/sched.h>
     16#include <asm/cpu.h>
     17#include <asm/cpu-info.h>
     18#include <asm/cpu-type.h>
     19#include <asm/idle.h>
     20#include <asm/mipsregs.h>
     21
     22/*
     23 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
     24 * the implementation of the "wait" feature differs between CPU families. This
     25 * points to the function that implements CPU specific wait.
     26 * The wait instruction stops the pipeline and reduces the power consumption of
     27 * the CPU very much.
     28 */
     29void (*cpu_wait)(void);
     30EXPORT_SYMBOL(cpu_wait);
     31
     32static void __cpuidle r3081_wait(void)
     33{
     34	unsigned long cfg = read_c0_conf();
     35	write_c0_conf(cfg | R30XX_CONF_HALT);
     36	raw_local_irq_enable();
     37}
     38
     39void __cpuidle r4k_wait(void)
     40{
     41	raw_local_irq_enable();
     42	__r4k_wait();
     43}
     44
     45/*
     46 * This variant is preferable as it allows testing need_resched and going to
     47 * sleep depending on the outcome atomically.  Unfortunately the "It is
     48 * implementation-dependent whether the pipeline restarts when a non-enabled
     49 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
     50 * using this version a gamble.
     51 */
     52void __cpuidle r4k_wait_irqoff(void)
     53{
     54	if (!need_resched())
     55		__asm__(
     56		"	.set	push		\n"
     57		"	.set	arch=r4000	\n"
     58		"	wait			\n"
     59		"	.set	pop		\n");
     60	raw_local_irq_enable();
     61}
     62
     63/*
     64 * The RM7000 variant has to handle erratum 38.	 The workaround is to not
     65 * have any pending stores when the WAIT instruction is executed.
     66 */
     67static void __cpuidle rm7k_wait_irqoff(void)
     68{
     69	if (!need_resched())
     70		__asm__(
     71		"	.set	push					\n"
     72		"	.set	arch=r4000				\n"
     73		"	.set	noat					\n"
     74		"	mfc0	$1, $12					\n"
     75		"	sync						\n"
     76		"	mtc0	$1, $12		# stalls until W stage	\n"
     77		"	wait						\n"
     78		"	mtc0	$1, $12		# stalls until W stage	\n"
     79		"	.set	pop					\n");
     80	raw_local_irq_enable();
     81}
     82
     83/*
     84 * Au1 'wait' is only useful when the 32kHz counter is used as timer,
     85 * since coreclock (and the cp0 counter) stops upon executing it. Only an
     86 * interrupt can wake it, so they must be enabled before entering idle modes.
     87 */
     88static void __cpuidle au1k_wait(void)
     89{
     90	unsigned long c0status = read_c0_status() | 1;	/* irqs on */
     91
     92	__asm__(
     93	"	.set	push			\n"
     94	"	.set	arch=r4000		\n"
     95	"	cache	0x14, 0(%0)		\n"
     96	"	cache	0x14, 32(%0)		\n"
     97	"	sync				\n"
     98	"	mtc0	%1, $12			\n" /* wr c0status */
     99	"	wait				\n"
    100	"	nop				\n"
    101	"	nop				\n"
    102	"	nop				\n"
    103	"	nop				\n"
    104	"	.set	pop			\n"
    105	: : "r" (au1k_wait), "r" (c0status));
    106}
    107
    108static int __initdata nowait;
    109
    110static int __init wait_disable(char *s)
    111{
    112	nowait = 1;
    113
    114	return 1;
    115}
    116
    117__setup("nowait", wait_disable);
    118
    119void __init check_wait(void)
    120{
    121	struct cpuinfo_mips *c = &current_cpu_data;
    122
    123	if (nowait) {
    124		printk("Wait instruction disabled.\n");
    125		return;
    126	}
    127
    128	/*
    129	 * MIPSr6 specifies that masked interrupts should unblock an executing
    130	 * wait instruction, and thus that it is safe for us to use
    131	 * r4k_wait_irqoff. Yippee!
    132	 */
    133	if (cpu_has_mips_r6) {
    134		cpu_wait = r4k_wait_irqoff;
    135		return;
    136	}
    137
    138	switch (current_cpu_type()) {
    139	case CPU_R3081:
    140	case CPU_R3081E:
    141		cpu_wait = r3081_wait;
    142		break;
    143	case CPU_R4200:
    144/*	case CPU_R4300: */
    145	case CPU_R4600:
    146	case CPU_R4640:
    147	case CPU_R4650:
    148	case CPU_R4700:
    149	case CPU_R5000:
    150	case CPU_R5500:
    151	case CPU_NEVADA:
    152	case CPU_4KC:
    153	case CPU_4KEC:
    154	case CPU_4KSC:
    155	case CPU_5KC:
    156	case CPU_5KE:
    157	case CPU_25KF:
    158	case CPU_PR4450:
    159	case CPU_BMIPS3300:
    160	case CPU_BMIPS4350:
    161	case CPU_BMIPS4380:
    162	case CPU_CAVIUM_OCTEON:
    163	case CPU_CAVIUM_OCTEON_PLUS:
    164	case CPU_CAVIUM_OCTEON2:
    165	case CPU_CAVIUM_OCTEON3:
    166	case CPU_XBURST:
    167	case CPU_LOONGSON32:
    168		cpu_wait = r4k_wait;
    169		break;
    170	case CPU_LOONGSON64:
    171		if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
    172				(PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
    173				(c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
    174			cpu_wait = r4k_wait;
    175		break;
    176
    177	case CPU_BMIPS5000:
    178		cpu_wait = r4k_wait_irqoff;
    179		break;
    180	case CPU_RM7000:
    181		cpu_wait = rm7k_wait_irqoff;
    182		break;
    183
    184	case CPU_PROAPTIV:
    185	case CPU_P5600:
    186		/*
    187		 * Incoming Fast Debug Channel (FDC) data during a wait
    188		 * instruction causes the wait never to resume, even if an
    189		 * interrupt is received. Avoid using wait at all if FDC data is
    190		 * likely to be received.
    191		 */
    192		if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
    193			break;
    194		fallthrough;
    195	case CPU_M14KC:
    196	case CPU_M14KEC:
    197	case CPU_24K:
    198	case CPU_34K:
    199	case CPU_1004K:
    200	case CPU_1074K:
    201	case CPU_INTERAPTIV:
    202	case CPU_M5150:
    203	case CPU_QEMU_GENERIC:
    204		cpu_wait = r4k_wait;
    205		if (read_c0_config7() & MIPS_CONF7_WII)
    206			cpu_wait = r4k_wait_irqoff;
    207		break;
    208
    209	case CPU_74K:
    210		cpu_wait = r4k_wait;
    211		if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
    212			cpu_wait = r4k_wait_irqoff;
    213		break;
    214
    215	case CPU_TX49XX:
    216		cpu_wait = r4k_wait_irqoff;
    217		break;
    218	case CPU_ALCHEMY:
    219		cpu_wait = au1k_wait;
    220		break;
    221	case CPU_20KC:
    222		/*
    223		 * WAIT on Rev1.0 has E1, E2, E3 and E16.
    224		 * WAIT on Rev2.0 and Rev3.0 has E16.
    225		 * Rev3.1 WAIT is nop, why bother
    226		 */
    227		if ((c->processor_id & 0xff) <= 0x64)
    228			break;
    229
    230		/*
    231		 * Another rev is incrementing c0_count at a reduced clock
    232		 * rate while in WAIT mode.  So we basically have the choice
    233		 * between using the cp0 timer as clocksource or avoiding
    234		 * the WAIT instruction.  Until more details are known,
    235		 * disable the use of WAIT for 20Kc entirely.
    236		   cpu_wait = r4k_wait;
    237		 */
    238		break;
    239	default:
    240		break;
    241	}
    242}
    243
    244void arch_cpu_idle(void)
    245{
    246	if (cpu_wait)
    247		cpu_wait();
    248	else
    249		raw_local_irq_enable();
    250}
    251
    252#ifdef CONFIG_CPU_IDLE
    253
    254int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
    255			    struct cpuidle_driver *drv, int index)
    256{
    257	arch_cpu_idle();
    258	return index;
    259}
    260
    261#endif