cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mtrr.c (16376B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * vMTRR implementation
      4 *
      5 * Copyright (C) 2006 Qumranet, Inc.
      6 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
      7 * Copyright(C) 2015 Intel Corporation.
      8 *
      9 * Authors:
     10 *   Yaniv Kamay  <yaniv@qumranet.com>
     11 *   Avi Kivity   <avi@qumranet.com>
     12 *   Marcelo Tosatti <mtosatti@redhat.com>
     13 *   Paolo Bonzini <pbonzini@redhat.com>
     14 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
     15 */
     16
     17#include <linux/kvm_host.h>
     18#include <asm/mtrr.h>
     19
     20#include "cpuid.h"
     21#include "mmu.h"
     22
     23#define IA32_MTRR_DEF_TYPE_E		(1ULL << 11)
     24#define IA32_MTRR_DEF_TYPE_FE		(1ULL << 10)
     25#define IA32_MTRR_DEF_TYPE_TYPE_MASK	(0xff)
     26
     27static bool msr_mtrr_valid(unsigned msr)
     28{
     29	switch (msr) {
     30	case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
     31	case MSR_MTRRfix64K_00000:
     32	case MSR_MTRRfix16K_80000:
     33	case MSR_MTRRfix16K_A0000:
     34	case MSR_MTRRfix4K_C0000:
     35	case MSR_MTRRfix4K_C8000:
     36	case MSR_MTRRfix4K_D0000:
     37	case MSR_MTRRfix4K_D8000:
     38	case MSR_MTRRfix4K_E0000:
     39	case MSR_MTRRfix4K_E8000:
     40	case MSR_MTRRfix4K_F0000:
     41	case MSR_MTRRfix4K_F8000:
     42	case MSR_MTRRdefType:
     43	case MSR_IA32_CR_PAT:
     44		return true;
     45	}
     46	return false;
     47}
     48
     49static bool valid_mtrr_type(unsigned t)
     50{
     51	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
     52}
     53
     54bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
     55{
     56	int i;
     57	u64 mask;
     58
     59	if (!msr_mtrr_valid(msr))
     60		return false;
     61
     62	if (msr == MSR_IA32_CR_PAT) {
     63		return kvm_pat_valid(data);
     64	} else if (msr == MSR_MTRRdefType) {
     65		if (data & ~0xcff)
     66			return false;
     67		return valid_mtrr_type(data & 0xff);
     68	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
     69		for (i = 0; i < 8 ; i++)
     70			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
     71				return false;
     72		return true;
     73	}
     74
     75	/* variable MTRRs */
     76	WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
     77
     78	mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
     79	if ((msr & 1) == 0) {
     80		/* MTRR base */
     81		if (!valid_mtrr_type(data & 0xff))
     82			return false;
     83		mask |= 0xf00;
     84	} else
     85		/* MTRR mask */
     86		mask |= 0x7ff;
     87
     88	return (data & mask) == 0;
     89}
     90EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
     91
     92static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
     93{
     94	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
     95}
     96
     97static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
     98{
     99	return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
    100}
    101
    102static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
    103{
    104	return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
    105}
    106
    107static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
    108{
    109	/*
    110	 * Intel SDM 11.11.2.2: all MTRRs are disabled when
    111	 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
    112	 * memory type is applied to all of physical memory.
    113	 *
    114	 * However, virtual machines can be run with CPUID such that
    115	 * there are no MTRRs.  In that case, the firmware will never
    116	 * enable MTRRs and it is obviously undesirable to run the
    117	 * guest entirely with UC memory and we use WB.
    118	 */
    119	if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
    120		return MTRR_TYPE_UNCACHABLE;
    121	else
    122		return MTRR_TYPE_WRBACK;
    123}
    124
    125/*
    126* Three terms are used in the following code:
    127* - segment, it indicates the address segments covered by fixed MTRRs.
    128* - unit, it corresponds to the MSR entry in the segment.
    129* - range, a range is covered in one memory cache type.
    130*/
    131struct fixed_mtrr_segment {
    132	u64 start;
    133	u64 end;
    134
    135	int range_shift;
    136
    137	/* the start position in kvm_mtrr.fixed_ranges[]. */
    138	int range_start;
    139};
    140
    141static struct fixed_mtrr_segment fixed_seg_table[] = {
    142	/* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
    143	{
    144		.start = 0x0,
    145		.end = 0x80000,
    146		.range_shift = 16, /* 64K */
    147		.range_start = 0,
    148	},
    149
    150	/*
    151	 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
    152	 * 16K fixed mtrr.
    153	 */
    154	{
    155		.start = 0x80000,
    156		.end = 0xc0000,
    157		.range_shift = 14, /* 16K */
    158		.range_start = 8,
    159	},
    160
    161	/*
    162	 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
    163	 * 4K fixed mtrr.
    164	 */
    165	{
    166		.start = 0xc0000,
    167		.end = 0x100000,
    168		.range_shift = 12, /* 12K */
    169		.range_start = 24,
    170	}
    171};
    172
    173/*
    174 * The size of unit is covered in one MSR, one MSR entry contains
    175 * 8 ranges so that unit size is always 8 * 2^range_shift.
    176 */
    177static u64 fixed_mtrr_seg_unit_size(int seg)
    178{
    179	return 8 << fixed_seg_table[seg].range_shift;
    180}
    181
    182static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
    183{
    184	switch (msr) {
    185	case MSR_MTRRfix64K_00000:
    186		*seg = 0;
    187		*unit = 0;
    188		break;
    189	case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
    190		*seg = 1;
    191		*unit = array_index_nospec(
    192			msr - MSR_MTRRfix16K_80000,
    193			MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
    194		break;
    195	case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
    196		*seg = 2;
    197		*unit = array_index_nospec(
    198			msr - MSR_MTRRfix4K_C0000,
    199			MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
    200		break;
    201	default:
    202		return false;
    203	}
    204
    205	return true;
    206}
    207
    208static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
    209{
    210	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
    211	u64 unit_size = fixed_mtrr_seg_unit_size(seg);
    212
    213	*start = mtrr_seg->start + unit * unit_size;
    214	*end = *start + unit_size;
    215	WARN_ON(*end > mtrr_seg->end);
    216}
    217
    218static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
    219{
    220	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
    221
    222	WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
    223		> mtrr_seg->end);
    224
    225	/* each unit has 8 ranges. */
    226	return mtrr_seg->range_start + 8 * unit;
    227}
    228
    229static int fixed_mtrr_seg_end_range_index(int seg)
    230{
    231	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
    232	int n;
    233
    234	n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
    235	return mtrr_seg->range_start + n - 1;
    236}
    237
    238static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
    239{
    240	int seg, unit;
    241
    242	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
    243		return false;
    244
    245	fixed_mtrr_seg_unit_range(seg, unit, start, end);
    246	return true;
    247}
    248
    249static int fixed_msr_to_range_index(u32 msr)
    250{
    251	int seg, unit;
    252
    253	if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
    254		return -1;
    255
    256	return fixed_mtrr_seg_unit_range_index(seg, unit);
    257}
    258
    259static int fixed_mtrr_addr_to_seg(u64 addr)
    260{
    261	struct fixed_mtrr_segment *mtrr_seg;
    262	int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
    263
    264	for (seg = 0; seg < seg_num; seg++) {
    265		mtrr_seg = &fixed_seg_table[seg];
    266		if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
    267			return seg;
    268	}
    269
    270	return -1;
    271}
    272
    273static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
    274{
    275	struct fixed_mtrr_segment *mtrr_seg;
    276	int index;
    277
    278	mtrr_seg = &fixed_seg_table[seg];
    279	index = mtrr_seg->range_start;
    280	index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
    281	return index;
    282}
    283
    284static u64 fixed_mtrr_range_end_addr(int seg, int index)
    285{
    286	struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
    287	int pos = index - mtrr_seg->range_start;
    288
    289	return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
    290}
    291
    292static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
    293{
    294	u64 mask;
    295
    296	*start = range->base & PAGE_MASK;
    297
    298	mask = range->mask & PAGE_MASK;
    299
    300	/* This cannot overflow because writing to the reserved bits of
    301	 * variable MTRRs causes a #GP.
    302	 */
    303	*end = (*start | ~mask) + 1;
    304}
    305
    306static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
    307{
    308	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
    309	gfn_t start, end;
    310	int index;
    311
    312	if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
    313	      !kvm_arch_has_noncoherent_dma(vcpu->kvm))
    314		return;
    315
    316	if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
    317		return;
    318
    319	/* fixed MTRRs. */
    320	if (fixed_msr_to_range(msr, &start, &end)) {
    321		if (!fixed_mtrr_is_enabled(mtrr_state))
    322			return;
    323	} else if (msr == MSR_MTRRdefType) {
    324		start = 0x0;
    325		end = ~0ULL;
    326	} else {
    327		/* variable range MTRRs. */
    328		index = (msr - 0x200) / 2;
    329		var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
    330	}
    331
    332	kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
    333}
    334
    335static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
    336{
    337	return (range->mask & (1 << 11)) != 0;
    338}
    339
    340static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
    341{
    342	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
    343	struct kvm_mtrr_range *tmp, *cur;
    344	int index, is_mtrr_mask;
    345
    346	index = (msr - 0x200) / 2;
    347	is_mtrr_mask = msr - 0x200 - 2 * index;
    348	cur = &mtrr_state->var_ranges[index];
    349
    350	/* remove the entry if it's in the list. */
    351	if (var_mtrr_range_is_valid(cur))
    352		list_del(&mtrr_state->var_ranges[index].node);
    353
    354	/*
    355	 * Set all illegal GPA bits in the mask, since those bits must
    356	 * implicitly be 0.  The bits are then cleared when reading them.
    357	 */
    358	if (!is_mtrr_mask)
    359		cur->base = data;
    360	else
    361		cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
    362
    363	/* add it to the list if it's enabled. */
    364	if (var_mtrr_range_is_valid(cur)) {
    365		list_for_each_entry(tmp, &mtrr_state->head, node)
    366			if (cur->base >= tmp->base)
    367				break;
    368		list_add_tail(&cur->node, &tmp->node);
    369	}
    370}
    371
    372int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
    373{
    374	int index;
    375
    376	if (!kvm_mtrr_valid(vcpu, msr, data))
    377		return 1;
    378
    379	index = fixed_msr_to_range_index(msr);
    380	if (index >= 0)
    381		*(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
    382	else if (msr == MSR_MTRRdefType)
    383		vcpu->arch.mtrr_state.deftype = data;
    384	else if (msr == MSR_IA32_CR_PAT)
    385		vcpu->arch.pat = data;
    386	else
    387		set_var_mtrr_msr(vcpu, msr, data);
    388
    389	update_mtrr(vcpu, msr);
    390	return 0;
    391}
    392
    393int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
    394{
    395	int index;
    396
    397	/* MSR_MTRRcap is a readonly MSR. */
    398	if (msr == MSR_MTRRcap) {
    399		/*
    400		 * SMRR = 0
    401		 * WC = 1
    402		 * FIX = 1
    403		 * VCNT = KVM_NR_VAR_MTRR
    404		 */
    405		*pdata = 0x500 | KVM_NR_VAR_MTRR;
    406		return 0;
    407	}
    408
    409	if (!msr_mtrr_valid(msr))
    410		return 1;
    411
    412	index = fixed_msr_to_range_index(msr);
    413	if (index >= 0)
    414		*pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
    415	else if (msr == MSR_MTRRdefType)
    416		*pdata = vcpu->arch.mtrr_state.deftype;
    417	else if (msr == MSR_IA32_CR_PAT)
    418		*pdata = vcpu->arch.pat;
    419	else {	/* Variable MTRRs */
    420		int is_mtrr_mask;
    421
    422		index = (msr - 0x200) / 2;
    423		is_mtrr_mask = msr - 0x200 - 2 * index;
    424		if (!is_mtrr_mask)
    425			*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
    426		else
    427			*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
    428
    429		*pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
    430	}
    431
    432	return 0;
    433}
    434
    435void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
    436{
    437	INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
    438}
    439
    440struct mtrr_iter {
    441	/* input fields. */
    442	struct kvm_mtrr *mtrr_state;
    443	u64 start;
    444	u64 end;
    445
    446	/* output fields. */
    447	int mem_type;
    448	/* mtrr is completely disabled? */
    449	bool mtrr_disabled;
    450	/* [start, end) is not fully covered in MTRRs? */
    451	bool partial_map;
    452
    453	/* private fields. */
    454	union {
    455		/* used for fixed MTRRs. */
    456		struct {
    457			int index;
    458			int seg;
    459		};
    460
    461		/* used for var MTRRs. */
    462		struct {
    463			struct kvm_mtrr_range *range;
    464			/* max address has been covered in var MTRRs. */
    465			u64 start_max;
    466		};
    467	};
    468
    469	bool fixed;
    470};
    471
    472static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
    473{
    474	int seg, index;
    475
    476	if (!fixed_mtrr_is_enabled(iter->mtrr_state))
    477		return false;
    478
    479	seg = fixed_mtrr_addr_to_seg(iter->start);
    480	if (seg < 0)
    481		return false;
    482
    483	iter->fixed = true;
    484	index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
    485	iter->index = index;
    486	iter->seg = seg;
    487	return true;
    488}
    489
    490static bool match_var_range(struct mtrr_iter *iter,
    491			    struct kvm_mtrr_range *range)
    492{
    493	u64 start, end;
    494
    495	var_mtrr_range(range, &start, &end);
    496	if (!(start >= iter->end || end <= iter->start)) {
    497		iter->range = range;
    498
    499		/*
    500		 * the function is called when we do kvm_mtrr.head walking.
    501		 * Range has the minimum base address which interleaves
    502		 * [looker->start_max, looker->end).
    503		 */
    504		iter->partial_map |= iter->start_max < start;
    505
    506		/* update the max address has been covered. */
    507		iter->start_max = max(iter->start_max, end);
    508		return true;
    509	}
    510
    511	return false;
    512}
    513
    514static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
    515{
    516	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
    517
    518	list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
    519		if (match_var_range(iter, iter->range))
    520			return;
    521
    522	iter->range = NULL;
    523	iter->partial_map |= iter->start_max < iter->end;
    524}
    525
    526static void mtrr_lookup_var_start(struct mtrr_iter *iter)
    527{
    528	struct kvm_mtrr *mtrr_state = iter->mtrr_state;
    529
    530	iter->fixed = false;
    531	iter->start_max = iter->start;
    532	iter->range = NULL;
    533	iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
    534
    535	__mtrr_lookup_var_next(iter);
    536}
    537
    538static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
    539{
    540	/* terminate the lookup. */
    541	if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
    542		iter->fixed = false;
    543		iter->range = NULL;
    544		return;
    545	}
    546
    547	iter->index++;
    548
    549	/* have looked up for all fixed MTRRs. */
    550	if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
    551		return mtrr_lookup_var_start(iter);
    552
    553	/* switch to next segment. */
    554	if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
    555		iter->seg++;
    556}
    557
    558static void mtrr_lookup_var_next(struct mtrr_iter *iter)
    559{
    560	__mtrr_lookup_var_next(iter);
    561}
    562
    563static void mtrr_lookup_start(struct mtrr_iter *iter)
    564{
    565	if (!mtrr_is_enabled(iter->mtrr_state)) {
    566		iter->mtrr_disabled = true;
    567		return;
    568	}
    569
    570	if (!mtrr_lookup_fixed_start(iter))
    571		mtrr_lookup_var_start(iter);
    572}
    573
    574static void mtrr_lookup_init(struct mtrr_iter *iter,
    575			     struct kvm_mtrr *mtrr_state, u64 start, u64 end)
    576{
    577	iter->mtrr_state = mtrr_state;
    578	iter->start = start;
    579	iter->end = end;
    580	iter->mtrr_disabled = false;
    581	iter->partial_map = false;
    582	iter->fixed = false;
    583	iter->range = NULL;
    584
    585	mtrr_lookup_start(iter);
    586}
    587
    588static bool mtrr_lookup_okay(struct mtrr_iter *iter)
    589{
    590	if (iter->fixed) {
    591		iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
    592		return true;
    593	}
    594
    595	if (iter->range) {
    596		iter->mem_type = iter->range->base & 0xff;
    597		return true;
    598	}
    599
    600	return false;
    601}
    602
    603static void mtrr_lookup_next(struct mtrr_iter *iter)
    604{
    605	if (iter->fixed)
    606		mtrr_lookup_fixed_next(iter);
    607	else
    608		mtrr_lookup_var_next(iter);
    609}
    610
    611#define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
    612	for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
    613	     mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
    614
    615u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
    616{
    617	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
    618	struct mtrr_iter iter;
    619	u64 start, end;
    620	int type = -1;
    621	const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
    622			       | (1 << MTRR_TYPE_WRTHROUGH);
    623
    624	start = gfn_to_gpa(gfn);
    625	end = start + PAGE_SIZE;
    626
    627	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
    628		int curr_type = iter.mem_type;
    629
    630		/*
    631		 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
    632		 * Precedences.
    633		 */
    634
    635		if (type == -1) {
    636			type = curr_type;
    637			continue;
    638		}
    639
    640		/*
    641		 * If two or more variable memory ranges match and the
    642		 * memory types are identical, then that memory type is
    643		 * used.
    644		 */
    645		if (type == curr_type)
    646			continue;
    647
    648		/*
    649		 * If two or more variable memory ranges match and one of
    650		 * the memory types is UC, the UC memory type used.
    651		 */
    652		if (curr_type == MTRR_TYPE_UNCACHABLE)
    653			return MTRR_TYPE_UNCACHABLE;
    654
    655		/*
    656		 * If two or more variable memory ranges match and the
    657		 * memory types are WT and WB, the WT memory type is used.
    658		 */
    659		if (((1 << type) & wt_wb_mask) &&
    660		      ((1 << curr_type) & wt_wb_mask)) {
    661			type = MTRR_TYPE_WRTHROUGH;
    662			continue;
    663		}
    664
    665		/*
    666		 * For overlaps not defined by the above rules, processor
    667		 * behavior is undefined.
    668		 */
    669
    670		/* We use WB for this undefined behavior. :( */
    671		return MTRR_TYPE_WRBACK;
    672	}
    673
    674	if (iter.mtrr_disabled)
    675		return mtrr_disabled_type(vcpu);
    676
    677	/* not contained in any MTRRs. */
    678	if (type == -1)
    679		return mtrr_default_type(mtrr_state);
    680
    681	/*
    682	 * We just check one page, partially covered by MTRRs is
    683	 * impossible.
    684	 */
    685	WARN_ON(iter.partial_map);
    686
    687	return type;
    688}
    689EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
    690
    691bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
    692					  int page_num)
    693{
    694	struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
    695	struct mtrr_iter iter;
    696	u64 start, end;
    697	int type = -1;
    698
    699	start = gfn_to_gpa(gfn);
    700	end = gfn_to_gpa(gfn + page_num);
    701	mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
    702		if (type == -1) {
    703			type = iter.mem_type;
    704			continue;
    705		}
    706
    707		if (type != iter.mem_type)
    708			return false;
    709	}
    710
    711	if (iter.mtrr_disabled)
    712		return true;
    713
    714	if (!iter.partial_map)
    715		return true;
    716
    717	if (type == -1)
    718		return true;
    719
    720	return type == mtrr_default_type(mtrr_state);
    721}