cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

generic.c (23940B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
      4 * because MTRRs can span up to 40 bits (36bits on most modern x86)
      5 */
      6
      7#include <linux/export.h>
      8#include <linux/init.h>
      9#include <linux/io.h>
     10#include <linux/mm.h>
     11
     12#include <asm/processor-flags.h>
     13#include <asm/cpufeature.h>
     14#include <asm/tlbflush.h>
     15#include <asm/mtrr.h>
     16#include <asm/msr.h>
     17#include <asm/memtype.h>
     18
     19#include "mtrr.h"
     20
     21struct fixed_range_block {
     22	int base_msr;		/* start address of an MTRR block */
     23	int ranges;		/* number of MTRRs in this block  */
     24};
     25
     26static struct fixed_range_block fixed_range_blocks[] = {
     27	{ MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
     28	{ MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
     29	{ MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
     30	{}
     31};
     32
     33static unsigned long smp_changes_mask;
     34static int mtrr_state_set;
     35u64 mtrr_tom2;
     36
     37struct mtrr_state_type mtrr_state;
     38EXPORT_SYMBOL_GPL(mtrr_state);
     39
     40/*
     41 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
     42 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
     43 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
     44 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
     45 * to 1 during BIOS initialization of the fixed MTRRs, then cleared to
     46 * 0 for operation."
     47 */
     48static inline void k8_check_syscfg_dram_mod_en(void)
     49{
     50	u32 lo, hi;
     51
     52	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
     53	      (boot_cpu_data.x86 >= 0x0f)))
     54		return;
     55
     56	rdmsr(MSR_AMD64_SYSCFG, lo, hi);
     57	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
     58		pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
     59		       " not cleared by BIOS, clearing this bit\n",
     60		       smp_processor_id());
     61		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
     62		mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
     63	}
     64}
     65
     66/* Get the size of contiguous MTRR range */
     67static u64 get_mtrr_size(u64 mask)
     68{
     69	u64 size;
     70
     71	mask >>= PAGE_SHIFT;
     72	mask |= size_or_mask;
     73	size = -mask;
     74	size <<= PAGE_SHIFT;
     75	return size;
     76}
     77
     78/*
     79 * Check and return the effective type for MTRR-MTRR type overlap.
     80 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
     81 */
     82static int check_type_overlap(u8 *prev, u8 *curr)
     83{
     84	if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
     85		*prev = MTRR_TYPE_UNCACHABLE;
     86		*curr = MTRR_TYPE_UNCACHABLE;
     87		return 1;
     88	}
     89
     90	if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
     91	    (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
     92		*prev = MTRR_TYPE_WRTHROUGH;
     93		*curr = MTRR_TYPE_WRTHROUGH;
     94	}
     95
     96	if (*prev != *curr) {
     97		*prev = MTRR_TYPE_UNCACHABLE;
     98		*curr = MTRR_TYPE_UNCACHABLE;
     99		return 1;
    100	}
    101
    102	return 0;
    103}
    104
    105/**
    106 * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
    107 *
    108 * Return the MTRR fixed memory type of 'start'.
    109 *
    110 * MTRR fixed entries are divided into the following ways:
    111 *  0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
    112 *  0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
    113 *  0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
    114 *
    115 * Return Values:
    116 * MTRR_TYPE_(type)  - Matched memory type
    117 * MTRR_TYPE_INVALID - Unmatched
    118 */
    119static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
    120{
    121	int idx;
    122
    123	if (start >= 0x100000)
    124		return MTRR_TYPE_INVALID;
    125
    126	/* 0x0 - 0x7FFFF */
    127	if (start < 0x80000) {
    128		idx = 0;
    129		idx += (start >> 16);
    130		return mtrr_state.fixed_ranges[idx];
    131	/* 0x80000 - 0xBFFFF */
    132	} else if (start < 0xC0000) {
    133		idx = 1 * 8;
    134		idx += ((start - 0x80000) >> 14);
    135		return mtrr_state.fixed_ranges[idx];
    136	}
    137
    138	/* 0xC0000 - 0xFFFFF */
    139	idx = 3 * 8;
    140	idx += ((start - 0xC0000) >> 12);
    141	return mtrr_state.fixed_ranges[idx];
    142}
    143
    144/**
    145 * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
    146 *
    147 * Return Value:
    148 * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
    149 *
    150 * Output Arguments:
    151 * repeat - Set to 1 when [start:end] spanned across MTRR range and type
    152 *	    returned corresponds only to [start:*partial_end].  Caller has
    153 *	    to lookup again for [*partial_end:end].
    154 *
    155 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
    156 *	     region is fully covered by a single MTRR entry or the default
    157 *	     type.
    158 */
    159static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
    160				    int *repeat, u8 *uniform)
    161{
    162	int i;
    163	u64 base, mask;
    164	u8 prev_match, curr_match;
    165
    166	*repeat = 0;
    167	*uniform = 1;
    168
    169	prev_match = MTRR_TYPE_INVALID;
    170	for (i = 0; i < num_var_ranges; ++i) {
    171		unsigned short start_state, end_state, inclusive;
    172
    173		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
    174			continue;
    175
    176		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
    177		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
    178		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
    179		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);
    180
    181		start_state = ((start & mask) == (base & mask));
    182		end_state = ((end & mask) == (base & mask));
    183		inclusive = ((start < base) && (end > base));
    184
    185		if ((start_state != end_state) || inclusive) {
    186			/*
    187			 * We have start:end spanning across an MTRR.
    188			 * We split the region into either
    189			 *
    190			 * - start_state:1
    191			 * (start:mtrr_end)(mtrr_end:end)
    192			 * - end_state:1
    193			 * (start:mtrr_start)(mtrr_start:end)
    194			 * - inclusive:1
    195			 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
    196			 *
    197			 * depending on kind of overlap.
    198			 *
    199			 * Return the type of the first region and a pointer
    200			 * to the start of next region so that caller will be
    201			 * advised to lookup again after having adjusted start
    202			 * and end.
    203			 *
    204			 * Note: This way we handle overlaps with multiple
    205			 * entries and the default type properly.
    206			 */
    207			if (start_state)
    208				*partial_end = base + get_mtrr_size(mask);
    209			else
    210				*partial_end = base;
    211
    212			if (unlikely(*partial_end <= start)) {
    213				WARN_ON(1);
    214				*partial_end = start + PAGE_SIZE;
    215			}
    216
    217			end = *partial_end - 1; /* end is inclusive */
    218			*repeat = 1;
    219			*uniform = 0;
    220		}
    221
    222		if ((start & mask) != (base & mask))
    223			continue;
    224
    225		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
    226		if (prev_match == MTRR_TYPE_INVALID) {
    227			prev_match = curr_match;
    228			continue;
    229		}
    230
    231		*uniform = 0;
    232		if (check_type_overlap(&prev_match, &curr_match))
    233			return curr_match;
    234	}
    235
    236	if (prev_match != MTRR_TYPE_INVALID)
    237		return prev_match;
    238
    239	return mtrr_state.def_type;
    240}
    241
    242/**
    243 * mtrr_type_lookup - look up memory type in MTRR
    244 *
    245 * Return Values:
    246 * MTRR_TYPE_(type)  - The effective MTRR type for the region
    247 * MTRR_TYPE_INVALID - MTRR is disabled
    248 *
    249 * Output Argument:
    250 * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
    251 *	     region is fully covered by a single MTRR entry or the default
    252 *	     type.
    253 */
    254u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
    255{
    256	u8 type, prev_type, is_uniform = 1, dummy;
    257	int repeat;
    258	u64 partial_end;
    259
    260	/* Make end inclusive instead of exclusive */
    261	end--;
    262
    263	if (!mtrr_state_set)
    264		return MTRR_TYPE_INVALID;
    265
    266	if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
    267		return MTRR_TYPE_INVALID;
    268
    269	/*
    270	 * Look up the fixed ranges first, which take priority over
    271	 * the variable ranges.
    272	 */
    273	if ((start < 0x100000) &&
    274	    (mtrr_state.have_fixed) &&
    275	    (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
    276		is_uniform = 0;
    277		type = mtrr_type_lookup_fixed(start, end);
    278		goto out;
    279	}
    280
    281	/*
    282	 * Look up the variable ranges.  Look of multiple ranges matching
    283	 * this address and pick type as per MTRR precedence.
    284	 */
    285	type = mtrr_type_lookup_variable(start, end, &partial_end,
    286					 &repeat, &is_uniform);
    287
    288	/*
    289	 * Common path is with repeat = 0.
    290	 * However, we can have cases where [start:end] spans across some
    291	 * MTRR ranges and/or the default type.  Do repeated lookups for
    292	 * that case here.
    293	 */
    294	while (repeat) {
    295		prev_type = type;
    296		start = partial_end;
    297		is_uniform = 0;
    298		type = mtrr_type_lookup_variable(start, end, &partial_end,
    299						 &repeat, &dummy);
    300
    301		if (check_type_overlap(&prev_type, &type))
    302			goto out;
    303	}
    304
    305	if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
    306		type = MTRR_TYPE_WRBACK;
    307
    308out:
    309	*uniform = is_uniform;
    310	return type;
    311}
    312
    313/* Get the MSR pair relating to a var range */
    314static void
    315get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
    316{
    317	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
    318	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
    319}
    320
    321/* Fill the MSR pair relating to a var range */
    322void fill_mtrr_var_range(unsigned int index,
    323		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
    324{
    325	struct mtrr_var_range *vr;
    326
    327	vr = mtrr_state.var_ranges;
    328
    329	vr[index].base_lo = base_lo;
    330	vr[index].base_hi = base_hi;
    331	vr[index].mask_lo = mask_lo;
    332	vr[index].mask_hi = mask_hi;
    333}
    334
    335static void get_fixed_ranges(mtrr_type *frs)
    336{
    337	unsigned int *p = (unsigned int *)frs;
    338	int i;
    339
    340	k8_check_syscfg_dram_mod_en();
    341
    342	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
    343
    344	for (i = 0; i < 2; i++)
    345		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
    346	for (i = 0; i < 8; i++)
    347		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
    348}
    349
    350void mtrr_save_fixed_ranges(void *info)
    351{
    352	if (boot_cpu_has(X86_FEATURE_MTRR))
    353		get_fixed_ranges(mtrr_state.fixed_ranges);
    354}
    355
    356static unsigned __initdata last_fixed_start;
    357static unsigned __initdata last_fixed_end;
    358static mtrr_type __initdata last_fixed_type;
    359
    360static void __init print_fixed_last(void)
    361{
    362	if (!last_fixed_end)
    363		return;
    364
    365	pr_debug("  %05X-%05X %s\n", last_fixed_start,
    366		 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
    367
    368	last_fixed_end = 0;
    369}
    370
    371static void __init update_fixed_last(unsigned base, unsigned end,
    372				     mtrr_type type)
    373{
    374	last_fixed_start = base;
    375	last_fixed_end = end;
    376	last_fixed_type = type;
    377}
    378
    379static void __init
    380print_fixed(unsigned base, unsigned step, const mtrr_type *types)
    381{
    382	unsigned i;
    383
    384	for (i = 0; i < 8; ++i, ++types, base += step) {
    385		if (last_fixed_end == 0) {
    386			update_fixed_last(base, base + step, *types);
    387			continue;
    388		}
    389		if (last_fixed_end == base && last_fixed_type == *types) {
    390			last_fixed_end = base + step;
    391			continue;
    392		}
    393		/* new segments: gap or different type */
    394		print_fixed_last();
    395		update_fixed_last(base, base + step, *types);
    396	}
    397}
    398
    399static void prepare_set(void);
    400static void post_set(void);
    401
    402static void __init print_mtrr_state(void)
    403{
    404	unsigned int i;
    405	int high_width;
    406
    407	pr_debug("MTRR default type: %s\n",
    408		 mtrr_attrib_to_str(mtrr_state.def_type));
    409	if (mtrr_state.have_fixed) {
    410		pr_debug("MTRR fixed ranges %sabled:\n",
    411			((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
    412			 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
    413			 "en" : "dis");
    414		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
    415		for (i = 0; i < 2; ++i)
    416			print_fixed(0x80000 + i * 0x20000, 0x04000,
    417				    mtrr_state.fixed_ranges + (i + 1) * 8);
    418		for (i = 0; i < 8; ++i)
    419			print_fixed(0xC0000 + i * 0x08000, 0x01000,
    420				    mtrr_state.fixed_ranges + (i + 3) * 8);
    421
    422		/* tail */
    423		print_fixed_last();
    424	}
    425	pr_debug("MTRR variable ranges %sabled:\n",
    426		 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
    427	high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
    428
    429	for (i = 0; i < num_var_ranges; ++i) {
    430		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
    431			pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
    432				 i,
    433				 high_width,
    434				 mtrr_state.var_ranges[i].base_hi,
    435				 mtrr_state.var_ranges[i].base_lo >> 12,
    436				 high_width,
    437				 mtrr_state.var_ranges[i].mask_hi,
    438				 mtrr_state.var_ranges[i].mask_lo >> 12,
    439				 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
    440		else
    441			pr_debug("  %u disabled\n", i);
    442	}
    443	if (mtrr_tom2)
    444		pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
    445}
    446
    447/* PAT setup for BP. We need to go through sync steps here */
    448void __init mtrr_bp_pat_init(void)
    449{
    450	unsigned long flags;
    451
    452	local_irq_save(flags);
    453	prepare_set();
    454
    455	pat_init();
    456
    457	post_set();
    458	local_irq_restore(flags);
    459}
    460
    461/* Grab all of the MTRR state for this CPU into *state */
    462bool __init get_mtrr_state(void)
    463{
    464	struct mtrr_var_range *vrs;
    465	unsigned lo, dummy;
    466	unsigned int i;
    467
    468	vrs = mtrr_state.var_ranges;
    469
    470	rdmsr(MSR_MTRRcap, lo, dummy);
    471	mtrr_state.have_fixed = (lo >> 8) & 1;
    472
    473	for (i = 0; i < num_var_ranges; i++)
    474		get_mtrr_var_range(i, &vrs[i]);
    475	if (mtrr_state.have_fixed)
    476		get_fixed_ranges(mtrr_state.fixed_ranges);
    477
    478	rdmsr(MSR_MTRRdefType, lo, dummy);
    479	mtrr_state.def_type = (lo & 0xff);
    480	mtrr_state.enabled = (lo & 0xc00) >> 10;
    481
    482	if (amd_special_default_mtrr()) {
    483		unsigned low, high;
    484
    485		/* TOP_MEM2 */
    486		rdmsr(MSR_K8_TOP_MEM2, low, high);
    487		mtrr_tom2 = high;
    488		mtrr_tom2 <<= 32;
    489		mtrr_tom2 |= low;
    490		mtrr_tom2 &= 0xffffff800000ULL;
    491	}
    492
    493	print_mtrr_state();
    494
    495	mtrr_state_set = 1;
    496
    497	return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
    498}
    499
    500/* Some BIOS's are messed up and don't set all MTRRs the same! */
    501void __init mtrr_state_warn(void)
    502{
    503	unsigned long mask = smp_changes_mask;
    504
    505	if (!mask)
    506		return;
    507	if (mask & MTRR_CHANGE_MASK_FIXED)
    508		pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
    509	if (mask & MTRR_CHANGE_MASK_VARIABLE)
    510		pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
    511	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
    512		pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
    513
    514	pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
    515	pr_info("mtrr: corrected configuration.\n");
    516}
    517
    518/*
    519 * Doesn't attempt to pass an error out to MTRR users
    520 * because it's quite complicated in some cases and probably not
    521 * worth it because the best error handling is to ignore it.
    522 */
    523void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
    524{
    525	if (wrmsr_safe(msr, a, b) < 0) {
    526		pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
    527			smp_processor_id(), msr, a, b);
    528	}
    529}
    530
    531/**
    532 * set_fixed_range - checks & updates a fixed-range MTRR if it
    533 *		     differs from the value it should have
    534 * @msr: MSR address of the MTTR which should be checked and updated
    535 * @changed: pointer which indicates whether the MTRR needed to be changed
    536 * @msrwords: pointer to the MSR values which the MSR should have
    537 */
    538static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
    539{
    540	unsigned lo, hi;
    541
    542	rdmsr(msr, lo, hi);
    543
    544	if (lo != msrwords[0] || hi != msrwords[1]) {
    545		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
    546		*changed = true;
    547	}
    548}
    549
    550/**
    551 * generic_get_free_region - Get a free MTRR.
    552 * @base: The starting (base) address of the region.
    553 * @size: The size (in bytes) of the region.
    554 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
    555 *
    556 * Returns: The index of the region on success, else negative on error.
    557 */
    558int
    559generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
    560{
    561	unsigned long lbase, lsize;
    562	mtrr_type ltype;
    563	int i, max;
    564
    565	max = num_var_ranges;
    566	if (replace_reg >= 0 && replace_reg < max)
    567		return replace_reg;
    568
    569	for (i = 0; i < max; ++i) {
    570		mtrr_if->get(i, &lbase, &lsize, &ltype);
    571		if (lsize == 0)
    572			return i;
    573	}
    574
    575	return -ENOSPC;
    576}
    577
    578static void generic_get_mtrr(unsigned int reg, unsigned long *base,
    579			     unsigned long *size, mtrr_type *type)
    580{
    581	u32 mask_lo, mask_hi, base_lo, base_hi;
    582	unsigned int hi;
    583	u64 tmp, mask;
    584
    585	/*
    586	 * get_mtrr doesn't need to update mtrr_state, also it could be called
    587	 * from any cpu, so try to print it out directly.
    588	 */
    589	get_cpu();
    590
    591	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
    592
    593	if ((mask_lo & 0x800) == 0) {
    594		/*  Invalid (i.e. free) range */
    595		*base = 0;
    596		*size = 0;
    597		*type = 0;
    598		goto out_put_cpu;
    599	}
    600
    601	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
    602
    603	/* Work out the shifted address mask: */
    604	tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
    605	mask = size_or_mask | tmp;
    606
    607	/* Expand tmp with high bits to all 1s: */
    608	hi = fls64(tmp);
    609	if (hi > 0) {
    610		tmp |= ~((1ULL<<(hi - 1)) - 1);
    611
    612		if (tmp != mask) {
    613			pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
    614			add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
    615			mask = tmp;
    616		}
    617	}
    618
    619	/*
    620	 * This works correctly if size is a power of two, i.e. a
    621	 * contiguous range:
    622	 */
    623	*size = -mask;
    624	*base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
    625	*type = base_lo & 0xff;
    626
    627out_put_cpu:
    628	put_cpu();
    629}
    630
    631/**
    632 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
    633 *		      differ from the saved set
    634 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
    635 */
    636static int set_fixed_ranges(mtrr_type *frs)
    637{
    638	unsigned long long *saved = (unsigned long long *)frs;
    639	bool changed = false;
    640	int block = -1, range;
    641
    642	k8_check_syscfg_dram_mod_en();
    643
    644	while (fixed_range_blocks[++block].ranges) {
    645		for (range = 0; range < fixed_range_blocks[block].ranges; range++)
    646			set_fixed_range(fixed_range_blocks[block].base_msr + range,
    647					&changed, (unsigned int *)saved++);
    648	}
    649
    650	return changed;
    651}
    652
    653/*
    654 * Set the MSR pair relating to a var range.
    655 * Returns true if changes are made.
    656 */
    657static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
    658{
    659	unsigned int lo, hi;
    660	bool changed = false;
    661
    662	rdmsr(MTRRphysBase_MSR(index), lo, hi);
    663	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
    664	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
    665		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
    666
    667		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
    668		changed = true;
    669	}
    670
    671	rdmsr(MTRRphysMask_MSR(index), lo, hi);
    672
    673	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
    674	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
    675		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
    676		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
    677		changed = true;
    678	}
    679	return changed;
    680}
    681
    682static u32 deftype_lo, deftype_hi;
    683
    684/**
    685 * set_mtrr_state - Set the MTRR state for this CPU.
    686 *
    687 * NOTE: The CPU must already be in a safe state for MTRR changes.
    688 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
    689 */
    690static unsigned long set_mtrr_state(void)
    691{
    692	unsigned long change_mask = 0;
    693	unsigned int i;
    694
    695	for (i = 0; i < num_var_ranges; i++) {
    696		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
    697			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
    698	}
    699
    700	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
    701		change_mask |= MTRR_CHANGE_MASK_FIXED;
    702
    703	/*
    704	 * Set_mtrr_restore restores the old value of MTRRdefType,
    705	 * so to set it we fiddle with the saved value:
    706	 */
    707	if ((deftype_lo & 0xff) != mtrr_state.def_type
    708	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
    709
    710		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
    711			     (mtrr_state.enabled << 10);
    712		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
    713	}
    714
    715	return change_mask;
    716}
    717
    718
    719static unsigned long cr4;
    720static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
    721
    722/*
    723 * Since we are disabling the cache don't allow any interrupts,
    724 * they would run extremely slow and would only increase the pain.
    725 *
    726 * The caller must ensure that local interrupts are disabled and
    727 * are reenabled after post_set() has been called.
    728 */
    729static void prepare_set(void) __acquires(set_atomicity_lock)
    730{
    731	unsigned long cr0;
    732
    733	/*
    734	 * Note that this is not ideal
    735	 * since the cache is only flushed/disabled for this CPU while the
    736	 * MTRRs are changed, but changing this requires more invasive
    737	 * changes to the way the kernel boots
    738	 */
    739
    740	raw_spin_lock(&set_atomicity_lock);
    741
    742	/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
    743	cr0 = read_cr0() | X86_CR0_CD;
    744	write_cr0(cr0);
    745
    746	/*
    747	 * Cache flushing is the most time-consuming step when programming
    748	 * the MTRRs. Fortunately, as per the Intel Software Development
    749	 * Manual, we can skip it if the processor supports cache self-
    750	 * snooping.
    751	 */
    752	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
    753		wbinvd();
    754
    755	/* Save value of CR4 and clear Page Global Enable (bit 7) */
    756	if (boot_cpu_has(X86_FEATURE_PGE)) {
    757		cr4 = __read_cr4();
    758		__write_cr4(cr4 & ~X86_CR4_PGE);
    759	}
    760
    761	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
    762	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
    763	flush_tlb_local();
    764
    765	/* Save MTRR state */
    766	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
    767
    768	/* Disable MTRRs, and set the default type to uncached */
    769	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
    770
    771	/* Again, only flush caches if we have to. */
    772	if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
    773		wbinvd();
    774}
    775
    776static void post_set(void) __releases(set_atomicity_lock)
    777{
    778	/* Flush TLBs (no need to flush caches - they are disabled) */
    779	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
    780	flush_tlb_local();
    781
    782	/* Intel (P6) standard MTRRs */
    783	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
    784
    785	/* Enable caches */
    786	write_cr0(read_cr0() & ~X86_CR0_CD);
    787
    788	/* Restore value of CR4 */
    789	if (boot_cpu_has(X86_FEATURE_PGE))
    790		__write_cr4(cr4);
    791	raw_spin_unlock(&set_atomicity_lock);
    792}
    793
    794static void generic_set_all(void)
    795{
    796	unsigned long mask, count;
    797	unsigned long flags;
    798
    799	local_irq_save(flags);
    800	prepare_set();
    801
    802	/* Actually set the state */
    803	mask = set_mtrr_state();
    804
    805	/* also set PAT */
    806	pat_init();
    807
    808	post_set();
    809	local_irq_restore(flags);
    810
    811	/* Use the atomic bitops to update the global mask */
    812	for (count = 0; count < sizeof(mask) * 8; ++count) {
    813		if (mask & 0x01)
    814			set_bit(count, &smp_changes_mask);
    815		mask >>= 1;
    816	}
    817
    818}
    819
    820/**
    821 * generic_set_mtrr - set variable MTRR register on the local CPU.
    822 *
    823 * @reg: The register to set.
    824 * @base: The base address of the region.
    825 * @size: The size of the region. If this is 0 the region is disabled.
    826 * @type: The type of the region.
    827 *
    828 * Returns nothing.
    829 */
    830static void generic_set_mtrr(unsigned int reg, unsigned long base,
    831			     unsigned long size, mtrr_type type)
    832{
    833	unsigned long flags;
    834	struct mtrr_var_range *vr;
    835
    836	vr = &mtrr_state.var_ranges[reg];
    837
    838	local_irq_save(flags);
    839	prepare_set();
    840
    841	if (size == 0) {
    842		/*
    843		 * The invalid bit is kept in the mask, so we simply
    844		 * clear the relevant mask register to disable a range.
    845		 */
    846		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
    847		memset(vr, 0, sizeof(struct mtrr_var_range));
    848	} else {
    849		vr->base_lo = base << PAGE_SHIFT | type;
    850		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
    851		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
    852		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);
    853
    854		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
    855		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
    856	}
    857
    858	post_set();
    859	local_irq_restore(flags);
    860}
    861
    862int generic_validate_add_page(unsigned long base, unsigned long size,
    863			      unsigned int type)
    864{
    865	unsigned long lbase, last;
    866
    867	/*
    868	 * For Intel PPro stepping <= 7
    869	 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
    870	 */
    871	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
    872	    boot_cpu_data.x86_model == 1 &&
    873	    boot_cpu_data.x86_stepping <= 7) {
    874		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
    875			pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
    876			return -EINVAL;
    877		}
    878		if (!(base + size < 0x70000 || base > 0x7003F) &&
    879		    (type == MTRR_TYPE_WRCOMB
    880		     || type == MTRR_TYPE_WRBACK)) {
    881			pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
    882			return -EINVAL;
    883		}
    884	}
    885
    886	/*
    887	 * Check upper bits of base and last are equal and lower bits are 0
    888	 * for base and 1 for last
    889	 */
    890	last = base + size - 1;
    891	for (lbase = base; !(lbase & 1) && (last & 1);
    892	     lbase = lbase >> 1, last = last >> 1)
    893		;
    894	if (lbase != last) {
    895		pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
    896		return -EINVAL;
    897	}
    898	return 0;
    899}
    900
    901static int generic_have_wrcomb(void)
    902{
    903	unsigned long config, dummy;
    904	rdmsr(MSR_MTRRcap, config, dummy);
    905	return config & (1 << 10);
    906}
    907
    908int positive_have_wrcomb(void)
    909{
    910	return 1;
    911}
    912
    913/*
    914 * Generic structure...
    915 */
    916const struct mtrr_ops generic_mtrr_ops = {
    917	.use_intel_if		= 1,
    918	.set_all		= generic_set_all,
    919	.get			= generic_get_mtrr,
    920	.get_free_region	= generic_get_free_region,
    921	.set			= generic_set_mtrr,
    922	.validate_add_page	= generic_validate_add_page,
    923	.have_wrcomb		= generic_have_wrcomb,
    924};