cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cstate.c (6678B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2005 Intel Corporation
      4 * 	Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
      5 * 	- Added _PDC for SMP C-states on Intel CPUs
      6 */
      7
      8#include <linux/kernel.h>
      9#include <linux/export.h>
     10#include <linux/init.h>
     11#include <linux/acpi.h>
     12#include <linux/cpu.h>
     13#include <linux/sched.h>
     14
     15#include <acpi/processor.h>
     16#include <asm/mwait.h>
     17#include <asm/special_insns.h>
     18
     19/*
     20 * Initialize bm_flags based on the CPU cache properties
     21 * On SMP it depends on cache configuration
     22 * - When cache is not shared among all CPUs, we flush cache
     23 *   before entering C3.
     24 * - When cache is shared among all CPUs, we use bm_check
     25 *   mechanism as in UP case
     26 *
     27 * This routine is called only after all the CPUs are online
     28 */
     29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
     30					unsigned int cpu)
     31{
     32	struct cpuinfo_x86 *c = &cpu_data(cpu);
     33
     34	flags->bm_check = 0;
     35	if (num_online_cpus() == 1)
     36		flags->bm_check = 1;
     37	else if (c->x86_vendor == X86_VENDOR_INTEL) {
     38		/*
     39		 * Today all MP CPUs that support C3 share cache.
     40		 * And caches should not be flushed by software while
     41		 * entering C3 type state.
     42		 */
     43		flags->bm_check = 1;
     44	}
     45
     46	/*
     47	 * On all recent Intel platforms, ARB_DISABLE is a nop.
     48	 * So, set bm_control to zero to indicate that ARB_DISABLE
     49	 * is not required while entering C3 type state on
     50	 * P4, Core and beyond CPUs
     51	 */
     52	if (c->x86_vendor == X86_VENDOR_INTEL &&
     53	    (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
     54			flags->bm_control = 0;
     55	/*
     56	 * For all recent Centaur CPUs, the ucode will make sure that each
     57	 * core can keep cache coherence with each other while entering C3
     58	 * type state. So, set bm_check to 1 to indicate that the kernel
     59	 * doesn't need to execute a cache flush operation (WBINVD) when
     60	 * entering C3 type state.
     61	 */
     62	if (c->x86_vendor == X86_VENDOR_CENTAUR) {
     63		if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
     64		    c->x86_stepping >= 0x0e))
     65			flags->bm_check = 1;
     66	}
     67
     68	if (c->x86_vendor == X86_VENDOR_ZHAOXIN) {
     69		/*
     70		 * All Zhaoxin CPUs that support C3 share cache.
     71		 * And caches should not be flushed by software while
     72		 * entering C3 type state.
     73		 */
     74		flags->bm_check = 1;
     75		/*
     76		 * On all recent Zhaoxin platforms, ARB_DISABLE is a nop.
     77		 * So, set bm_control to zero to indicate that ARB_DISABLE
     78		 * is not required while entering C3 type state.
     79		 */
     80		flags->bm_control = 0;
     81	}
     82	if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) {
     83		/*
     84		 * For all AMD Zen or newer CPUs that support C3, caches
     85		 * should not be flushed by software while entering C3
     86		 * type state. Set bm->check to 1 so that kernel doesn't
     87		 * need to execute cache flush operation.
     88		 */
     89		flags->bm_check = 1;
     90		/*
     91		 * In current AMD C state implementation ARB_DIS is no longer
     92		 * used. So set bm_control to zero to indicate ARB_DIS is not
     93		 * required while entering C3 type state.
     94		 */
     95		flags->bm_control = 0;
     96	}
     97}
     98EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
     99
    100/* The code below handles cstate entry with monitor-mwait pair on Intel*/
    101
    102struct cstate_entry {
    103	struct {
    104		unsigned int eax;
    105		unsigned int ecx;
    106	} states[ACPI_PROCESSOR_MAX_POWER];
    107};
    108static struct cstate_entry __percpu *cpu_cstate_entry;	/* per CPU ptr */
    109
    110static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
    111
    112#define NATIVE_CSTATE_BEYOND_HALT	(2)
    113
    114static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
    115{
    116	struct acpi_processor_cx *cx = _cx;
    117	long retval;
    118	unsigned int eax, ebx, ecx, edx;
    119	unsigned int edx_part;
    120	unsigned int cstate_type; /* C-state type and not ACPI C-state type */
    121	unsigned int num_cstate_subtype;
    122
    123	cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
    124
    125	/* Check whether this particular cx_type (in CST) is supported or not */
    126	cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
    127			MWAIT_CSTATE_MASK) + 1;
    128	edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
    129	num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
    130
    131	retval = 0;
    132	/* If the HW does not support any sub-states in this C-state */
    133	if (num_cstate_subtype == 0) {
    134		pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
    135				cx->address, edx_part);
    136		retval = -1;
    137		goto out;
    138	}
    139
    140	/* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
    141	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
    142	    !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
    143		retval = -1;
    144		goto out;
    145	}
    146
    147	if (!mwait_supported[cstate_type]) {
    148		mwait_supported[cstate_type] = 1;
    149		printk(KERN_DEBUG
    150			"Monitor-Mwait will be used to enter C-%d state\n",
    151			cx->type);
    152	}
    153	snprintf(cx->desc,
    154			ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
    155			cx->address);
    156out:
    157	return retval;
    158}
    159
    160int acpi_processor_ffh_cstate_probe(unsigned int cpu,
    161		struct acpi_processor_cx *cx, struct acpi_power_register *reg)
    162{
    163	struct cstate_entry *percpu_entry;
    164	struct cpuinfo_x86 *c = &cpu_data(cpu);
    165	long retval;
    166
    167	if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
    168		return -1;
    169
    170	if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
    171		return -1;
    172
    173	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
    174	percpu_entry->states[cx->index].eax = 0;
    175	percpu_entry->states[cx->index].ecx = 0;
    176
    177	/* Make sure we are running on right CPU */
    178
    179	retval = call_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx,
    180			     false);
    181	if (retval == 0) {
    182		/* Use the hint in CST */
    183		percpu_entry->states[cx->index].eax = cx->address;
    184		percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
    185	}
    186
    187	/*
    188	 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
    189	 * then we should skip checking BM_STS for this C-state.
    190	 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
    191	 */
    192	if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
    193		cx->bm_sts_skip = 1;
    194
    195	return retval;
    196}
    197EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
    198
    199void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
    200{
    201	unsigned int cpu = smp_processor_id();
    202	struct cstate_entry *percpu_entry;
    203
    204	percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
    205	mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
    206	                      percpu_entry->states[cx->index].ecx);
    207}
    208EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
    209
    210static int __init ffh_cstate_init(void)
    211{
    212	struct cpuinfo_x86 *c = &boot_cpu_data;
    213
    214	if (c->x86_vendor != X86_VENDOR_INTEL &&
    215	    c->x86_vendor != X86_VENDOR_AMD &&
    216	    c->x86_vendor != X86_VENDOR_HYGON)
    217		return -1;
    218
    219	cpu_cstate_entry = alloc_percpu(struct cstate_entry);
    220	return 0;
    221}
    222
    223static void __exit ffh_cstate_exit(void)
    224{
    225	free_percpu(cpu_cstate_entry);
    226	cpu_cstate_entry = NULL;
    227}
    228
    229arch_initcall(ffh_cstate_init);
    230__exitcall(ffh_cstate_exit);