cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_gt_gmch.c (17110B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2022 Intel Corporation
      4 */
      5
      6#include <drm/intel-gtt.h>
      7#include <drm/i915_drm.h>
      8
      9#include <linux/agp_backend.h>
     10#include <linux/stop_machine.h>
     11
     12#include "i915_drv.h"
     13#include "intel_gt_gmch.h"
     14#include "intel_gt_regs.h"
     15#include "intel_gt.h"
     16#include "i915_utils.h"
     17
     18#include "gen8_ppgtt.h"
     19
     20struct insert_page {
     21	struct i915_address_space *vm;
     22	dma_addr_t addr;
     23	u64 offset;
     24	enum i915_cache_level level;
     25};
     26
     27static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
     28{
     29	writeq(pte, addr);
     30}
     31
     32static void nop_clear_range(struct i915_address_space *vm,
     33			    u64 start, u64 length)
     34{
     35}
     36
     37static u64 snb_pte_encode(dma_addr_t addr,
     38			  enum i915_cache_level level,
     39			  u32 flags)
     40{
     41	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
     42
     43	switch (level) {
     44	case I915_CACHE_L3_LLC:
     45	case I915_CACHE_LLC:
     46		pte |= GEN6_PTE_CACHE_LLC;
     47		break;
     48	case I915_CACHE_NONE:
     49		pte |= GEN6_PTE_UNCACHED;
     50		break;
     51	default:
     52		MISSING_CASE(level);
     53	}
     54
     55	return pte;
     56}
     57
     58static u64 ivb_pte_encode(dma_addr_t addr,
     59			  enum i915_cache_level level,
     60			  u32 flags)
     61{
     62	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
     63
     64	switch (level) {
     65	case I915_CACHE_L3_LLC:
     66		pte |= GEN7_PTE_CACHE_L3_LLC;
     67		break;
     68	case I915_CACHE_LLC:
     69		pte |= GEN6_PTE_CACHE_LLC;
     70		break;
     71	case I915_CACHE_NONE:
     72		pte |= GEN6_PTE_UNCACHED;
     73		break;
     74	default:
     75		MISSING_CASE(level);
     76	}
     77
     78	return pte;
     79}
     80
     81static u64 byt_pte_encode(dma_addr_t addr,
     82			  enum i915_cache_level level,
     83			  u32 flags)
     84{
     85	gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
     86
     87	if (!(flags & PTE_READ_ONLY))
     88		pte |= BYT_PTE_WRITEABLE;
     89
     90	if (level != I915_CACHE_NONE)
     91		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
     92
     93	return pte;
     94}
     95
     96static u64 hsw_pte_encode(dma_addr_t addr,
     97			  enum i915_cache_level level,
     98			  u32 flags)
     99{
    100	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
    101
    102	if (level != I915_CACHE_NONE)
    103		pte |= HSW_WB_LLC_AGE3;
    104
    105	return pte;
    106}
    107
    108static u64 iris_pte_encode(dma_addr_t addr,
    109			   enum i915_cache_level level,
    110			   u32 flags)
    111{
    112	gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
    113
    114	switch (level) {
    115	case I915_CACHE_NONE:
    116		break;
    117	case I915_CACHE_WT:
    118		pte |= HSW_WT_ELLC_LLC_AGE3;
    119		break;
    120	default:
    121		pte |= HSW_WB_ELLC_LLC_AGE3;
    122		break;
    123	}
    124
    125	return pte;
    126}
    127
    128static void gen5_ggtt_insert_page(struct i915_address_space *vm,
    129				  dma_addr_t addr,
    130				  u64 offset,
    131				  enum i915_cache_level cache_level,
    132				  u32 unused)
    133{
    134	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
    135		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
    136
    137	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
    138}
    139
    140static void gen6_ggtt_insert_page(struct i915_address_space *vm,
    141				  dma_addr_t addr,
    142				  u64 offset,
    143				  enum i915_cache_level level,
    144				  u32 flags)
    145{
    146	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
    147	gen6_pte_t __iomem *pte =
    148		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
    149
    150	iowrite32(vm->pte_encode(addr, level, flags), pte);
    151
    152	ggtt->invalidate(ggtt);
    153}
    154
    155static void gen8_ggtt_insert_page(struct i915_address_space *vm,
    156				  dma_addr_t addr,
    157				  u64 offset,
    158				  enum i915_cache_level level,
    159				  u32 flags)
    160{
    161	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
    162	gen8_pte_t __iomem *pte =
    163		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
    164
    165	gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
    166
    167	ggtt->invalidate(ggtt);
    168}
    169
    170static void gen5_ggtt_insert_entries(struct i915_address_space *vm,
    171				     struct i915_vma_resource *vma_res,
    172				     enum i915_cache_level cache_level,
    173				     u32 unused)
    174{
    175	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
    176		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
    177
    178	intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
    179				    flags);
    180}
    181
    182/*
    183 * Binds an object into the global gtt with the specified cache level.
    184 * The object will be accessible to the GPU via commands whose operands
    185 * reference offsets within the global GTT as well as accessible by the GPU
    186 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
    187 */
    188static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
    189				     struct i915_vma_resource *vma_res,
    190				     enum i915_cache_level level,
    191				     u32 flags)
    192{
    193	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
    194	gen6_pte_t __iomem *gte;
    195	gen6_pte_t __iomem *end;
    196	struct sgt_iter iter;
    197	dma_addr_t addr;
    198
    199	gte = (gen6_pte_t __iomem *)ggtt->gsm;
    200	gte += vma_res->start / I915_GTT_PAGE_SIZE;
    201	end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
    202
    203	for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
    204		iowrite32(vm->pte_encode(addr, level, flags), gte++);
    205	GEM_BUG_ON(gte > end);
    206
    207	/* Fill the allocated but "unused" space beyond the end of the buffer */
    208	while (gte < end)
    209		iowrite32(vm->scratch[0]->encode, gte++);
    210
    211	/*
    212	 * We want to flush the TLBs only after we're certain all the PTE
    213	 * updates have finished.
    214	 */
    215	ggtt->invalidate(ggtt);
    216}
    217
    218static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
    219				     struct i915_vma_resource *vma_res,
    220				     enum i915_cache_level level,
    221				     u32 flags)
    222{
    223	const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
    224	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
    225	gen8_pte_t __iomem *gte;
    226	gen8_pte_t __iomem *end;
    227	struct sgt_iter iter;
    228	dma_addr_t addr;
    229
    230	/*
    231	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
    232	 * not to allow the user to override access to a read only page.
    233	 */
    234
    235	gte = (gen8_pte_t __iomem *)ggtt->gsm;
    236	gte += vma_res->start / I915_GTT_PAGE_SIZE;
    237	end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
    238
    239	for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
    240		gen8_set_pte(gte++, pte_encode | addr);
    241	GEM_BUG_ON(gte > end);
    242
    243	/* Fill the allocated but "unused" space beyond the end of the buffer */
    244	while (gte < end)
    245		gen8_set_pte(gte++, vm->scratch[0]->encode);
    246
    247	/*
    248	 * We want to flush the TLBs only after we're certain all the PTE
    249	 * updates have finished.
    250	 */
    251	ggtt->invalidate(ggtt);
    252}
    253
    254static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
    255{
    256	/*
    257	 * Make sure the internal GAM fifo has been cleared of all GTT
    258	 * writes before exiting stop_machine(). This guarantees that
    259	 * any aperture accesses waiting to start in another process
    260	 * cannot back up behind the GTT writes causing a hang.
    261	 * The register can be any arbitrary GAM register.
    262	 */
    263	intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
    264}
    265
    266static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
    267{
    268	struct insert_page *arg = _arg;
    269
    270	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
    271	bxt_vtd_ggtt_wa(arg->vm);
    272
    273	return 0;
    274}
    275
    276static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
    277					  dma_addr_t addr,
    278					  u64 offset,
    279					  enum i915_cache_level level,
    280					  u32 unused)
    281{
    282	struct insert_page arg = { vm, addr, offset, level };
    283
    284	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
    285}
    286
    287static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
    288{
    289	struct insert_entries *arg = _arg;
    290
    291	gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
    292	bxt_vtd_ggtt_wa(arg->vm);
    293
    294	return 0;
    295}
    296
    297static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
    298					     struct i915_vma_resource *vma_res,
    299					     enum i915_cache_level level,
    300					     u32 flags)
    301{
    302	struct insert_entries arg = { vm, vma_res, level, flags };
    303
    304	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
    305}
    306
    307void intel_gt_gmch_gen5_chipset_flush(struct intel_gt *gt)
    308{
    309	intel_gtt_chipset_flush();
    310}
    311
    312static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
    313{
    314	intel_gtt_chipset_flush();
    315}
    316
    317static void gen5_ggtt_clear_range(struct i915_address_space *vm,
    318					 u64 start, u64 length)
    319{
    320	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
    321}
    322
    323static void gen6_ggtt_clear_range(struct i915_address_space *vm,
    324				  u64 start, u64 length)
    325{
    326	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
    327	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
    328	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
    329	gen6_pte_t scratch_pte, __iomem *gtt_base =
    330		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
    331	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
    332	int i;
    333
    334	if (WARN(num_entries > max_entries,
    335		 "First entry = %d; Num entries = %d (max=%d)\n",
    336		 first_entry, num_entries, max_entries))
    337		num_entries = max_entries;
    338
    339	scratch_pte = vm->scratch[0]->encode;
    340	for (i = 0; i < num_entries; i++)
    341		iowrite32(scratch_pte, &gtt_base[i]);
    342}
    343
    344static void gen8_ggtt_clear_range(struct i915_address_space *vm,
    345				  u64 start, u64 length)
    346{
    347	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
    348	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
    349	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
    350	const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
    351	gen8_pte_t __iomem *gtt_base =
    352		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
    353	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
    354	int i;
    355
    356	if (WARN(num_entries > max_entries,
    357		 "First entry = %d; Num entries = %d (max=%d)\n",
    358		 first_entry, num_entries, max_entries))
    359		num_entries = max_entries;
    360
    361	for (i = 0; i < num_entries; i++)
    362		gen8_set_pte(&gtt_base[i], scratch_pte);
    363}
    364
    365static void gen5_gmch_remove(struct i915_address_space *vm)
    366{
    367	intel_gmch_remove();
    368}
    369
    370static void gen6_gmch_remove(struct i915_address_space *vm)
    371{
    372	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
    373
    374	iounmap(ggtt->gsm);
    375	free_scratch(vm);
    376}
    377
    378/*
    379 * Certain Gen5 chipsets require idling the GPU before
    380 * unmapping anything from the GTT when VT-d is enabled.
    381 */
    382static bool needs_idle_maps(struct drm_i915_private *i915)
    383{
    384	/*
    385	 * Query intel_iommu to see if we need the workaround. Presumably that
    386	 * was loaded first.
    387	 */
    388	if (!i915_vtd_active(i915))
    389		return false;
    390
    391	if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
    392		return true;
    393
    394	if (GRAPHICS_VER(i915) == 12)
    395		return true; /* XXX DMAR fault reason 7 */
    396
    397	return false;
    398}
    399
    400static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
    401{
    402	/*
    403	 * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
    404	 * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
    405	 */
    406	GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
    407	return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
    408}
    409
    410static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
    411{
    412	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
    413	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
    414	return snb_gmch_ctl << 20;
    415}
    416
    417static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
    418{
    419	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
    420	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
    421	if (bdw_gmch_ctl)
    422		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
    423
    424#ifdef CONFIG_X86_32
    425	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
    426	if (bdw_gmch_ctl > 4)
    427		bdw_gmch_ctl = 4;
    428#endif
    429
    430	return bdw_gmch_ctl << 20;
    431}
    432
    433static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
    434{
    435	return gen6_gttmmadr_size(i915) / 2;
    436}
    437
    438static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
    439{
    440	struct drm_i915_private *i915 = ggtt->vm.i915;
    441	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
    442	phys_addr_t phys_addr;
    443	u32 pte_flags;
    444	int ret;
    445
    446	GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
    447	phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
    448
    449	/*
    450	 * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
    451	 * will be dropped. For WC mappings in general we have 64 byte burst
    452	 * writes when the WC buffer is flushed, so we can't use it, but have to
    453	 * resort to an uncached mapping. The WC issue is easily caught by the
    454	 * readback check when writing GTT PTE entries.
    455	 */
    456	if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
    457		ggtt->gsm = ioremap(phys_addr, size);
    458	else
    459		ggtt->gsm = ioremap_wc(phys_addr, size);
    460	if (!ggtt->gsm) {
    461		drm_err(&i915->drm, "Failed to map the ggtt page table\n");
    462		return -ENOMEM;
    463	}
    464
    465	kref_init(&ggtt->vm.resv_ref);
    466	ret = setup_scratch_page(&ggtt->vm);
    467	if (ret) {
    468		drm_err(&i915->drm, "Scratch setup failed\n");
    469		/* iounmap will also get called at remove, but meh */
    470		iounmap(ggtt->gsm);
    471		return ret;
    472	}
    473
    474	pte_flags = 0;
    475	if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
    476		pte_flags |= PTE_LM;
    477
    478	ggtt->vm.scratch[0]->encode =
    479		ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
    480				    I915_CACHE_NONE, pte_flags);
    481
    482	return 0;
    483}
    484
    485int intel_gt_gmch_gen5_probe(struct i915_ggtt *ggtt)
    486{
    487	struct drm_i915_private *i915 = ggtt->vm.i915;
    488	phys_addr_t gmadr_base;
    489	int ret;
    490
    491	ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
    492	if (!ret) {
    493		drm_err(&i915->drm, "failed to set up gmch\n");
    494		return -EIO;
    495	}
    496
    497	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
    498
    499	ggtt->gmadr =
    500		(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
    501
    502	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
    503	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
    504
    505	if (needs_idle_maps(i915)) {
    506		drm_notice(&i915->drm,
    507			   "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
    508		ggtt->do_idle_maps = true;
    509	}
    510
    511	ggtt->vm.insert_page = gen5_ggtt_insert_page;
    512	ggtt->vm.insert_entries = gen5_ggtt_insert_entries;
    513	ggtt->vm.clear_range = gen5_ggtt_clear_range;
    514	ggtt->vm.cleanup = gen5_gmch_remove;
    515
    516	ggtt->invalidate = gmch_ggtt_invalidate;
    517
    518	ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
    519	ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
    520
    521	if (unlikely(ggtt->do_idle_maps))
    522		drm_notice(&i915->drm,
    523			   "Applying Ironlake quirks for intel_iommu\n");
    524
    525	return 0;
    526}
    527
    528int intel_gt_gmch_gen6_probe(struct i915_ggtt *ggtt)
    529{
    530	struct drm_i915_private *i915 = ggtt->vm.i915;
    531	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
    532	unsigned int size;
    533	u16 snb_gmch_ctl;
    534
    535	ggtt->gmadr = intel_pci_resource(pdev, 2);
    536	ggtt->mappable_end = resource_size(&ggtt->gmadr);
    537
    538	/*
    539	 * 64/512MB is the current min/max we actually know of, but this is
    540	 * just a coarse sanity check.
    541	 */
    542	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
    543		drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
    544			&ggtt->mappable_end);
    545		return -ENXIO;
    546	}
    547
    548	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
    549
    550	size = gen6_get_total_gtt_size(snb_gmch_ctl);
    551	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
    552
    553	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
    554	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
    555
    556	ggtt->vm.clear_range = nop_clear_range;
    557	if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
    558		ggtt->vm.clear_range = gen6_ggtt_clear_range;
    559	ggtt->vm.insert_page = gen6_ggtt_insert_page;
    560	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
    561	ggtt->vm.cleanup = gen6_gmch_remove;
    562
    563	ggtt->invalidate = gen6_ggtt_invalidate;
    564
    565	if (HAS_EDRAM(i915))
    566		ggtt->vm.pte_encode = iris_pte_encode;
    567	else if (IS_HASWELL(i915))
    568		ggtt->vm.pte_encode = hsw_pte_encode;
    569	else if (IS_VALLEYVIEW(i915))
    570		ggtt->vm.pte_encode = byt_pte_encode;
    571	else if (GRAPHICS_VER(i915) >= 7)
    572		ggtt->vm.pte_encode = ivb_pte_encode;
    573	else
    574		ggtt->vm.pte_encode = snb_pte_encode;
    575
    576	ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
    577	ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
    578
    579	return ggtt_probe_common(ggtt, size);
    580}
    581
    582static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
    583{
    584	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
    585	gmch_ctrl &= SNB_GMCH_GGMS_MASK;
    586
    587	if (gmch_ctrl)
    588		return 1 << (20 + gmch_ctrl);
    589
    590	return 0;
    591}
    592
    593int intel_gt_gmch_gen8_probe(struct i915_ggtt *ggtt)
    594{
    595	struct drm_i915_private *i915 = ggtt->vm.i915;
    596	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
    597	unsigned int size;
    598	u16 snb_gmch_ctl;
    599
    600	/* TODO: We're not aware of mappable constraints on gen8 yet */
    601	if (!HAS_LMEM(i915)) {
    602		ggtt->gmadr = intel_pci_resource(pdev, 2);
    603		ggtt->mappable_end = resource_size(&ggtt->gmadr);
    604	}
    605
    606	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
    607	if (IS_CHERRYVIEW(i915))
    608		size = chv_get_total_gtt_size(snb_gmch_ctl);
    609	else
    610		size = gen8_get_total_gtt_size(snb_gmch_ctl);
    611
    612	ggtt->vm.alloc_pt_dma = alloc_pt_dma;
    613	ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
    614	ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
    615
    616	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
    617	ggtt->vm.cleanup = gen6_gmch_remove;
    618	ggtt->vm.insert_page = gen8_ggtt_insert_page;
    619	ggtt->vm.clear_range = nop_clear_range;
    620	if (intel_scanout_needs_vtd_wa(i915))
    621		ggtt->vm.clear_range = gen8_ggtt_clear_range;
    622
    623	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
    624
    625	/*
    626	 * Serialize GTT updates with aperture access on BXT if VT-d is on,
    627	 * and always on CHV.
    628	 */
    629	if (intel_vm_no_concurrent_access_wa(i915)) {
    630		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
    631		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
    632		ggtt->vm.bind_async_flags =
    633			I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
    634	}
    635
    636	ggtt->invalidate = gen8_ggtt_invalidate;
    637
    638	ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
    639	ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
    640
    641	ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
    642
    643	setup_private_pat(ggtt->vm.gt->uncore);
    644
    645	return ggtt_probe_common(ggtt, size);
    646}
    647
    648int intel_gt_gmch_gen5_enable_hw(struct drm_i915_private *i915)
    649{
    650	if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
    651		return -EIO;
    652
    653	return 0;
    654}