cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpuflags.c (2947B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/types.h>
      3#include "bitops.h"
      4
      5#include <asm/processor-flags.h>
      6#include <asm/required-features.h>
      7#include <asm/msr-index.h>
      8#include "cpuflags.h"
      9
     10struct cpu_features cpu;
     11u32 cpu_vendor[3];
     12
     13static bool loaded_flags;
     14
     15static int has_fpu(void)
     16{
     17	u16 fcw = -1, fsw = -1;
     18	unsigned long cr0;
     19
     20	asm volatile("mov %%cr0,%0" : "=r" (cr0));
     21	if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
     22		cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
     23		asm volatile("mov %0,%%cr0" : : "r" (cr0));
     24	}
     25
     26	asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
     27		     : "+m" (fsw), "+m" (fcw));
     28
     29	return fsw == 0 && (fcw & 0x103f) == 0x003f;
     30}
     31
     32/*
     33 * For building the 16-bit code we want to explicitly specify 32-bit
     34 * push/pop operations, rather than just saying 'pushf' or 'popf' and
     35 * letting the compiler choose. But this is also included from the
     36 * compressed/ directory where it may be 64-bit code, and thus needs
     37 * to be 'pushfq' or 'popfq' in that case.
     38 */
     39#ifdef __x86_64__
     40#define PUSHF "pushfq"
     41#define POPF "popfq"
     42#else
     43#define PUSHF "pushfl"
     44#define POPF "popfl"
     45#endif
     46
     47int has_eflag(unsigned long mask)
     48{
     49	unsigned long f0, f1;
     50
     51	asm volatile(PUSHF "	\n\t"
     52		     PUSHF "	\n\t"
     53		     "pop %0	\n\t"
     54		     "mov %0,%1	\n\t"
     55		     "xor %2,%1	\n\t"
     56		     "push %1	\n\t"
     57		     POPF "	\n\t"
     58		     PUSHF "	\n\t"
     59		     "pop %1	\n\t"
     60		     POPF
     61		     : "=&r" (f0), "=&r" (f1)
     62		     : "ri" (mask));
     63
     64	return !!((f0^f1) & mask);
     65}
     66
     67/* Handle x86_32 PIC using ebx. */
     68#if defined(__i386__) && defined(__PIC__)
     69# define EBX_REG "=r"
     70#else
     71# define EBX_REG "=b"
     72#endif
     73
     74void cpuid_count(u32 id, u32 count, u32 *a, u32 *b, u32 *c, u32 *d)
     75{
     76	asm volatile(".ifnc %%ebx,%3 ; movl  %%ebx,%3 ; .endif	\n\t"
     77		     "cpuid					\n\t"
     78		     ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif	\n\t"
     79		    : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
     80		    : "a" (id), "c" (count)
     81	);
     82}
     83
     84#define cpuid(id, a, b, c, d) cpuid_count(id, 0, a, b, c, d)
     85
     86void get_cpuflags(void)
     87{
     88	u32 max_intel_level, max_amd_level;
     89	u32 tfms;
     90	u32 ignored;
     91
     92	if (loaded_flags)
     93		return;
     94	loaded_flags = true;
     95
     96	if (has_fpu())
     97		set_bit(X86_FEATURE_FPU, cpu.flags);
     98
     99	if (has_eflag(X86_EFLAGS_ID)) {
    100		cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
    101		      &cpu_vendor[1]);
    102
    103		if (max_intel_level >= 0x00000001 &&
    104		    max_intel_level <= 0x0000ffff) {
    105			cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
    106			      &cpu.flags[0]);
    107			cpu.level = (tfms >> 8) & 15;
    108			cpu.family = cpu.level;
    109			cpu.model = (tfms >> 4) & 15;
    110			if (cpu.level >= 6)
    111				cpu.model += ((tfms >> 16) & 0xf) << 4;
    112		}
    113
    114		if (max_intel_level >= 0x00000007) {
    115			cpuid_count(0x00000007, 0, &ignored, &ignored,
    116					&cpu.flags[16], &ignored);
    117		}
    118
    119		cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
    120		      &ignored);
    121
    122		if (max_amd_level >= 0x80000001 &&
    123		    max_amd_level <= 0x8000ffff) {
    124			cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
    125			      &cpu.flags[1]);
    126		}
    127	}
    128}