cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_stolen.c (23497B)


      1/*
      2 * SPDX-License-Identifier: MIT
      3 *
      4 * Copyright © 2008-2012 Intel Corporation
      5 */
      6
      7#include <linux/errno.h>
      8#include <linux/mutex.h>
      9
     10#include <drm/drm_mm.h>
     11#include <drm/i915_drm.h>
     12
     13#include "gem/i915_gem_lmem.h"
     14#include "gem/i915_gem_region.h"
     15#include "gt/intel_gt.h"
     16#include "gt/intel_region_lmem.h"
     17#include "i915_drv.h"
     18#include "i915_gem_stolen.h"
     19#include "i915_reg.h"
     20#include "i915_utils.h"
     21#include "i915_vgpu.h"
     22#include "intel_mchbar_regs.h"
     23
     24/*
     25 * The BIOS typically reserves some of the system's memory for the exclusive
     26 * use of the integrated graphics. This memory is no longer available for
     27 * use by the OS and so the user finds that his system has less memory
     28 * available than he put in. We refer to this memory as stolen.
     29 *
     30 * The BIOS will allocate its framebuffer from the stolen memory. Our
     31 * goal is try to reuse that object for our own fbcon which must always
     32 * be available for panics. Anything else we can reuse the stolen memory
     33 * for is a boon.
     34 */
     35
     36int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
     37					 struct drm_mm_node *node, u64 size,
     38					 unsigned alignment, u64 start, u64 end)
     39{
     40	int ret;
     41
     42	if (!drm_mm_initialized(&i915->mm.stolen))
     43		return -ENODEV;
     44
     45	/* WaSkipStolenMemoryFirstPage:bdw+ */
     46	if (GRAPHICS_VER(i915) >= 8 && start < 4096)
     47		start = 4096;
     48
     49	mutex_lock(&i915->mm.stolen_lock);
     50	ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
     51					  size, alignment, 0,
     52					  start, end, DRM_MM_INSERT_BEST);
     53	mutex_unlock(&i915->mm.stolen_lock);
     54
     55	return ret;
     56}
     57
     58int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
     59				struct drm_mm_node *node, u64 size,
     60				unsigned alignment)
     61{
     62	return i915_gem_stolen_insert_node_in_range(i915, node,
     63						    size, alignment,
     64						    I915_GEM_STOLEN_BIAS,
     65						    U64_MAX);
     66}
     67
     68void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
     69				 struct drm_mm_node *node)
     70{
     71	mutex_lock(&i915->mm.stolen_lock);
     72	drm_mm_remove_node(node);
     73	mutex_unlock(&i915->mm.stolen_lock);
     74}
     75
     76static int i915_adjust_stolen(struct drm_i915_private *i915,
     77			      struct resource *dsm)
     78{
     79	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
     80	struct intel_uncore *uncore = ggtt->vm.gt->uncore;
     81	struct resource *r;
     82
     83	if (dsm->start == 0 || dsm->end <= dsm->start)
     84		return -EINVAL;
     85
     86	/*
     87	 * TODO: We have yet too encounter the case where the GTT wasn't at the
     88	 * end of stolen. With that assumption we could simplify this.
     89	 */
     90
     91	/* Make sure we don't clobber the GTT if it's within stolen memory */
     92	if (GRAPHICS_VER(i915) <= 4 &&
     93	    !IS_G33(i915) && !IS_PINEVIEW(i915) && !IS_G4X(i915)) {
     94		struct resource stolen[2] = {*dsm, *dsm};
     95		struct resource ggtt_res;
     96		resource_size_t ggtt_start;
     97
     98		ggtt_start = intel_uncore_read(uncore, PGTBL_CTL);
     99		if (GRAPHICS_VER(i915) == 4)
    100			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
    101				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
    102		else
    103			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
    104
    105		ggtt_res =
    106			(struct resource) DEFINE_RES_MEM(ggtt_start,
    107							 ggtt_total_entries(ggtt) * 4);
    108
    109		if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
    110			stolen[0].end = ggtt_res.start;
    111		if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
    112			stolen[1].start = ggtt_res.end;
    113
    114		/* Pick the larger of the two chunks */
    115		if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
    116			*dsm = stolen[0];
    117		else
    118			*dsm = stolen[1];
    119
    120		if (stolen[0].start != stolen[1].start ||
    121		    stolen[0].end != stolen[1].end) {
    122			drm_dbg(&i915->drm,
    123				"GTT within stolen memory at %pR\n",
    124				&ggtt_res);
    125			drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
    126				dsm);
    127		}
    128	}
    129
    130	/*
    131	 * With stolen lmem, we don't need to check if the address range
    132	 * overlaps with the non-stolen system memory range, since lmem is local
    133	 * to the gpu.
    134	 */
    135	if (HAS_LMEM(i915))
    136		return 0;
    137
    138	/*
    139	 * Verify that nothing else uses this physical address. Stolen
    140	 * memory should be reserved by the BIOS and hidden from the
    141	 * kernel. So if the region is already marked as busy, something
    142	 * is seriously wrong.
    143	 */
    144	r = devm_request_mem_region(i915->drm.dev, dsm->start,
    145				    resource_size(dsm),
    146				    "Graphics Stolen Memory");
    147	if (r == NULL) {
    148		/*
    149		 * One more attempt but this time requesting region from
    150		 * start + 1, as we have seen that this resolves the region
    151		 * conflict with the PCI Bus.
    152		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
    153		 * PCI bus, but have an off-by-one error. Hence retry the
    154		 * reservation starting from 1 instead of 0.
    155		 * There's also BIOS with off-by-one on the other end.
    156		 */
    157		r = devm_request_mem_region(i915->drm.dev, dsm->start + 1,
    158					    resource_size(dsm) - 2,
    159					    "Graphics Stolen Memory");
    160		/*
    161		 * GEN3 firmware likes to smash pci bridges into the stolen
    162		 * range. Apparently this works.
    163		 */
    164		if (!r && GRAPHICS_VER(i915) != 3) {
    165			drm_err(&i915->drm,
    166				"conflict detected with stolen region: %pR\n",
    167				dsm);
    168
    169			return -EBUSY;
    170		}
    171	}
    172
    173	return 0;
    174}
    175
    176static void i915_gem_cleanup_stolen(struct drm_i915_private *i915)
    177{
    178	if (!drm_mm_initialized(&i915->mm.stolen))
    179		return;
    180
    181	drm_mm_takedown(&i915->mm.stolen);
    182}
    183
    184static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
    185				    struct intel_uncore *uncore,
    186				    resource_size_t *base,
    187				    resource_size_t *size)
    188{
    189	u32 reg_val = intel_uncore_read(uncore,
    190					IS_GM45(i915) ?
    191					CTG_STOLEN_RESERVED :
    192					ELK_STOLEN_RESERVED);
    193	resource_size_t stolen_top = i915->dsm.end + 1;
    194
    195	drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
    196		IS_GM45(i915) ? "CTG" : "ELK", reg_val);
    197
    198	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
    199		return;
    200
    201	/*
    202	 * Whether ILK really reuses the ELK register for this is unclear.
    203	 * Let's see if we catch anyone with this supposedly enabled on ILK.
    204	 */
    205	drm_WARN(&i915->drm, GRAPHICS_VER(i915) == 5,
    206		 "ILK stolen reserved found? 0x%08x\n",
    207		 reg_val);
    208
    209	if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
    210		return;
    211
    212	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
    213	drm_WARN_ON(&i915->drm,
    214		    (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
    215
    216	*size = stolen_top - *base;
    217}
    218
    219static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
    220				     struct intel_uncore *uncore,
    221				     resource_size_t *base,
    222				     resource_size_t *size)
    223{
    224	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
    225
    226	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
    227
    228	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
    229		return;
    230
    231	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
    232
    233	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
    234	case GEN6_STOLEN_RESERVED_1M:
    235		*size = 1024 * 1024;
    236		break;
    237	case GEN6_STOLEN_RESERVED_512K:
    238		*size = 512 * 1024;
    239		break;
    240	case GEN6_STOLEN_RESERVED_256K:
    241		*size = 256 * 1024;
    242		break;
    243	case GEN6_STOLEN_RESERVED_128K:
    244		*size = 128 * 1024;
    245		break;
    246	default:
    247		*size = 1024 * 1024;
    248		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
    249	}
    250}
    251
    252static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
    253				    struct intel_uncore *uncore,
    254				    resource_size_t *base,
    255				    resource_size_t *size)
    256{
    257	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
    258	resource_size_t stolen_top = i915->dsm.end + 1;
    259
    260	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
    261
    262	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
    263		return;
    264
    265	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
    266	default:
    267		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
    268		fallthrough;
    269	case GEN7_STOLEN_RESERVED_1M:
    270		*size = 1024 * 1024;
    271		break;
    272	}
    273
    274	/*
    275	 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
    276	 * reserved location as (top - size).
    277	 */
    278	*base = stolen_top - *size;
    279}
    280
    281static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
    282				     struct intel_uncore *uncore,
    283				     resource_size_t *base,
    284				     resource_size_t *size)
    285{
    286	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
    287
    288	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
    289
    290	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
    291		return;
    292
    293	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
    294
    295	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
    296	case GEN7_STOLEN_RESERVED_1M:
    297		*size = 1024 * 1024;
    298		break;
    299	case GEN7_STOLEN_RESERVED_256K:
    300		*size = 256 * 1024;
    301		break;
    302	default:
    303		*size = 1024 * 1024;
    304		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
    305	}
    306}
    307
    308static void chv_get_stolen_reserved(struct drm_i915_private *i915,
    309				    struct intel_uncore *uncore,
    310				    resource_size_t *base,
    311				    resource_size_t *size)
    312{
    313	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
    314
    315	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
    316
    317	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
    318		return;
    319
    320	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
    321
    322	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
    323	case GEN8_STOLEN_RESERVED_1M:
    324		*size = 1024 * 1024;
    325		break;
    326	case GEN8_STOLEN_RESERVED_2M:
    327		*size = 2 * 1024 * 1024;
    328		break;
    329	case GEN8_STOLEN_RESERVED_4M:
    330		*size = 4 * 1024 * 1024;
    331		break;
    332	case GEN8_STOLEN_RESERVED_8M:
    333		*size = 8 * 1024 * 1024;
    334		break;
    335	default:
    336		*size = 8 * 1024 * 1024;
    337		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
    338	}
    339}
    340
    341static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
    342				    struct intel_uncore *uncore,
    343				    resource_size_t *base,
    344				    resource_size_t *size)
    345{
    346	u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
    347	resource_size_t stolen_top = i915->dsm.end + 1;
    348
    349	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
    350
    351	if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
    352		return;
    353
    354	if (!(reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK))
    355		return;
    356
    357	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
    358	*size = stolen_top - *base;
    359}
    360
    361static void icl_get_stolen_reserved(struct drm_i915_private *i915,
    362				    struct intel_uncore *uncore,
    363				    resource_size_t *base,
    364				    resource_size_t *size)
    365{
    366	u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
    367
    368	drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
    369
    370	*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
    371
    372	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
    373	case GEN8_STOLEN_RESERVED_1M:
    374		*size = 1024 * 1024;
    375		break;
    376	case GEN8_STOLEN_RESERVED_2M:
    377		*size = 2 * 1024 * 1024;
    378		break;
    379	case GEN8_STOLEN_RESERVED_4M:
    380		*size = 4 * 1024 * 1024;
    381		break;
    382	case GEN8_STOLEN_RESERVED_8M:
    383		*size = 8 * 1024 * 1024;
    384		break;
    385	default:
    386		*size = 8 * 1024 * 1024;
    387		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
    388	}
    389}
    390
    391static int i915_gem_init_stolen(struct intel_memory_region *mem)
    392{
    393	struct drm_i915_private *i915 = mem->i915;
    394	struct intel_uncore *uncore = &i915->uncore;
    395	resource_size_t reserved_base, stolen_top;
    396	resource_size_t reserved_total, reserved_size;
    397
    398	mutex_init(&i915->mm.stolen_lock);
    399
    400	if (intel_vgpu_active(i915)) {
    401		drm_notice(&i915->drm,
    402			   "%s, disabling use of stolen memory\n",
    403			   "iGVT-g active");
    404		return 0;
    405	}
    406
    407	if (i915_vtd_active(i915) && GRAPHICS_VER(i915) < 8) {
    408		drm_notice(&i915->drm,
    409			   "%s, disabling use of stolen memory\n",
    410			   "DMAR active");
    411		return 0;
    412	}
    413
    414	if (resource_size(&mem->region) == 0)
    415		return 0;
    416
    417	i915->dsm = mem->region;
    418
    419	if (i915_adjust_stolen(i915, &i915->dsm))
    420		return 0;
    421
    422	GEM_BUG_ON(i915->dsm.start == 0);
    423	GEM_BUG_ON(i915->dsm.end <= i915->dsm.start);
    424
    425	stolen_top = i915->dsm.end + 1;
    426	reserved_base = stolen_top;
    427	reserved_size = 0;
    428
    429	switch (GRAPHICS_VER(i915)) {
    430	case 2:
    431	case 3:
    432		break;
    433	case 4:
    434		if (!IS_G4X(i915))
    435			break;
    436		fallthrough;
    437	case 5:
    438		g4x_get_stolen_reserved(i915, uncore,
    439					&reserved_base, &reserved_size);
    440		break;
    441	case 6:
    442		gen6_get_stolen_reserved(i915, uncore,
    443					 &reserved_base, &reserved_size);
    444		break;
    445	case 7:
    446		if (IS_VALLEYVIEW(i915))
    447			vlv_get_stolen_reserved(i915, uncore,
    448						&reserved_base, &reserved_size);
    449		else
    450			gen7_get_stolen_reserved(i915, uncore,
    451						 &reserved_base, &reserved_size);
    452		break;
    453	case 8:
    454	case 9:
    455		if (IS_LP(i915))
    456			chv_get_stolen_reserved(i915, uncore,
    457						&reserved_base, &reserved_size);
    458		else
    459			bdw_get_stolen_reserved(i915, uncore,
    460						&reserved_base, &reserved_size);
    461		break;
    462	default:
    463		MISSING_CASE(GRAPHICS_VER(i915));
    464		fallthrough;
    465	case 11:
    466	case 12:
    467		icl_get_stolen_reserved(i915, uncore,
    468					&reserved_base,
    469					&reserved_size);
    470		break;
    471	}
    472
    473	/*
    474	 * Our expectation is that the reserved space is at the top of the
    475	 * stolen region and *never* at the bottom. If we see !reserved_base,
    476	 * it likely means we failed to read the registers correctly.
    477	 */
    478	if (!reserved_base) {
    479		drm_err(&i915->drm,
    480			"inconsistent reservation %pa + %pa; ignoring\n",
    481			&reserved_base, &reserved_size);
    482		reserved_base = stolen_top;
    483		reserved_size = 0;
    484	}
    485
    486	i915->dsm_reserved =
    487		(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
    488
    489	if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
    490		drm_err(&i915->drm,
    491			"Stolen reserved area %pR outside stolen memory %pR\n",
    492			&i915->dsm_reserved, &i915->dsm);
    493		return 0;
    494	}
    495
    496	/* Exclude the reserved region from driver use */
    497	mem->region.end = reserved_base - 1;
    498	mem->io_size = min(mem->io_size, resource_size(&mem->region));
    499
    500	/* It is possible for the reserved area to end before the end of stolen
    501	 * memory, so just consider the start. */
    502	reserved_total = stolen_top - reserved_base;
    503
    504	i915->stolen_usable_size =
    505		resource_size(&i915->dsm) - reserved_total;
    506
    507	drm_dbg(&i915->drm,
    508		"Memory reserved for graphics device: %lluK, usable: %lluK\n",
    509		(u64)resource_size(&i915->dsm) >> 10,
    510		(u64)i915->stolen_usable_size >> 10);
    511
    512	if (i915->stolen_usable_size == 0)
    513		return 0;
    514
    515	/* Basic memrange allocator for stolen space. */
    516	drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
    517
    518	return 0;
    519}
    520
    521static void dbg_poison(struct i915_ggtt *ggtt,
    522		       dma_addr_t addr, resource_size_t size,
    523		       u8 x)
    524{
    525#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
    526	if (!drm_mm_node_allocated(&ggtt->error_capture))
    527		return;
    528
    529	if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
    530		return; /* beware stop_machine() inversion */
    531
    532	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
    533
    534	mutex_lock(&ggtt->error_mutex);
    535	while (size) {
    536		void __iomem *s;
    537
    538		ggtt->vm.insert_page(&ggtt->vm, addr,
    539				     ggtt->error_capture.start,
    540				     I915_CACHE_NONE, 0);
    541		mb();
    542
    543		s = io_mapping_map_wc(&ggtt->iomap,
    544				      ggtt->error_capture.start,
    545				      PAGE_SIZE);
    546		memset_io(s, x, PAGE_SIZE);
    547		io_mapping_unmap(s);
    548
    549		addr += PAGE_SIZE;
    550		size -= PAGE_SIZE;
    551	}
    552	mb();
    553	ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
    554	mutex_unlock(&ggtt->error_mutex);
    555#endif
    556}
    557
    558static struct sg_table *
    559i915_pages_create_for_stolen(struct drm_device *dev,
    560			     resource_size_t offset, resource_size_t size)
    561{
    562	struct drm_i915_private *i915 = to_i915(dev);
    563	struct sg_table *st;
    564	struct scatterlist *sg;
    565
    566	GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
    567
    568	/* We hide that we have no struct page backing our stolen object
    569	 * by wrapping the contiguous physical allocation with a fake
    570	 * dma mapping in a single scatterlist.
    571	 */
    572
    573	st = kmalloc(sizeof(*st), GFP_KERNEL);
    574	if (st == NULL)
    575		return ERR_PTR(-ENOMEM);
    576
    577	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
    578		kfree(st);
    579		return ERR_PTR(-ENOMEM);
    580	}
    581
    582	sg = st->sgl;
    583	sg->offset = 0;
    584	sg->length = size;
    585
    586	sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
    587	sg_dma_len(sg) = size;
    588
    589	return st;
    590}
    591
    592static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
    593{
    594	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    595	struct sg_table *pages =
    596		i915_pages_create_for_stolen(obj->base.dev,
    597					     obj->stolen->start,
    598					     obj->stolen->size);
    599	if (IS_ERR(pages))
    600		return PTR_ERR(pages);
    601
    602	dbg_poison(to_gt(i915)->ggtt,
    603		   sg_dma_address(pages->sgl),
    604		   sg_dma_len(pages->sgl),
    605		   POISON_INUSE);
    606
    607	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
    608
    609	return 0;
    610}
    611
    612static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
    613					     struct sg_table *pages)
    614{
    615	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    616	/* Should only be called from i915_gem_object_release_stolen() */
    617
    618	dbg_poison(to_gt(i915)->ggtt,
    619		   sg_dma_address(pages->sgl),
    620		   sg_dma_len(pages->sgl),
    621		   POISON_FREE);
    622
    623	sg_free_table(pages);
    624	kfree(pages);
    625}
    626
    627static void
    628i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
    629{
    630	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    631	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
    632
    633	GEM_BUG_ON(!stolen);
    634	i915_gem_stolen_remove_node(i915, stolen);
    635	kfree(stolen);
    636
    637	i915_gem_object_release_memory_region(obj);
    638}
    639
    640static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
    641	.name = "i915_gem_object_stolen",
    642	.get_pages = i915_gem_object_get_pages_stolen,
    643	.put_pages = i915_gem_object_put_pages_stolen,
    644	.release = i915_gem_object_release_stolen,
    645};
    646
    647static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
    648					   struct drm_i915_gem_object *obj,
    649					   struct drm_mm_node *stolen)
    650{
    651	static struct lock_class_key lock_class;
    652	unsigned int cache_level;
    653	unsigned int flags;
    654	int err;
    655
    656	/*
    657	 * Stolen objects are always physically contiguous since we just
    658	 * allocate one big block underneath using the drm_mm range allocator.
    659	 */
    660	flags = I915_BO_ALLOC_CONTIGUOUS;
    661
    662	drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
    663	i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class, flags);
    664
    665	obj->stolen = stolen;
    666	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
    667	cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
    668	i915_gem_object_set_cache_coherency(obj, cache_level);
    669
    670	if (WARN_ON(!i915_gem_object_trylock(obj, NULL)))
    671		return -EBUSY;
    672
    673	i915_gem_object_init_memory_region(obj, mem);
    674
    675	err = i915_gem_object_pin_pages(obj);
    676	if (err)
    677		i915_gem_object_release_memory_region(obj);
    678	i915_gem_object_unlock(obj);
    679
    680	return err;
    681}
    682
    683static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
    684					struct drm_i915_gem_object *obj,
    685					resource_size_t offset,
    686					resource_size_t size,
    687					resource_size_t page_size,
    688					unsigned int flags)
    689{
    690	struct drm_i915_private *i915 = mem->i915;
    691	struct drm_mm_node *stolen;
    692	int ret;
    693
    694	if (!drm_mm_initialized(&i915->mm.stolen))
    695		return -ENODEV;
    696
    697	if (size == 0)
    698		return -EINVAL;
    699
    700	/*
    701	 * With discrete devices, where we lack a mappable aperture there is no
    702	 * possible way to ever access this memory on the CPU side.
    703	 */
    704	if (mem->type == INTEL_MEMORY_STOLEN_LOCAL && !mem->io_size &&
    705	    !(flags & I915_BO_ALLOC_GPU_ONLY))
    706		return -ENOSPC;
    707
    708	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
    709	if (!stolen)
    710		return -ENOMEM;
    711
    712	if (offset != I915_BO_INVALID_OFFSET) {
    713		drm_dbg(&i915->drm,
    714			"creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
    715			&offset, &size);
    716
    717		stolen->start = offset;
    718		stolen->size = size;
    719		mutex_lock(&i915->mm.stolen_lock);
    720		ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
    721		mutex_unlock(&i915->mm.stolen_lock);
    722	} else {
    723		ret = i915_gem_stolen_insert_node(i915, stolen, size,
    724						  mem->min_page_size);
    725	}
    726	if (ret)
    727		goto err_free;
    728
    729	ret = __i915_gem_object_create_stolen(mem, obj, stolen);
    730	if (ret)
    731		goto err_remove;
    732
    733	return 0;
    734
    735err_remove:
    736	i915_gem_stolen_remove_node(i915, stolen);
    737err_free:
    738	kfree(stolen);
    739	return ret;
    740}
    741
    742struct drm_i915_gem_object *
    743i915_gem_object_create_stolen(struct drm_i915_private *i915,
    744			      resource_size_t size)
    745{
    746	return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
    747}
    748
    749static int init_stolen_smem(struct intel_memory_region *mem)
    750{
    751	/*
    752	 * Initialise stolen early so that we may reserve preallocated
    753	 * objects for the BIOS to KMS transition.
    754	 */
    755	return i915_gem_init_stolen(mem);
    756}
    757
    758static int release_stolen_smem(struct intel_memory_region *mem)
    759{
    760	i915_gem_cleanup_stolen(mem->i915);
    761	return 0;
    762}
    763
    764static const struct intel_memory_region_ops i915_region_stolen_smem_ops = {
    765	.init = init_stolen_smem,
    766	.release = release_stolen_smem,
    767	.init_object = _i915_gem_object_stolen_init,
    768};
    769
    770static int init_stolen_lmem(struct intel_memory_region *mem)
    771{
    772	int err;
    773
    774	if (GEM_WARN_ON(resource_size(&mem->region) == 0))
    775		return -ENODEV;
    776
    777	/*
    778	 * TODO: For stolen lmem we mostly just care about populating the dsm
    779	 * related bits and setting up the drm_mm allocator for the range.
    780	 * Perhaps split up i915_gem_init_stolen() for this.
    781	 */
    782	err = i915_gem_init_stolen(mem);
    783	if (err)
    784		return err;
    785
    786	if (mem->io_size && !io_mapping_init_wc(&mem->iomap,
    787						mem->io_start,
    788						mem->io_size)) {
    789		err = -EIO;
    790		goto err_cleanup;
    791	}
    792
    793	return 0;
    794
    795err_cleanup:
    796	i915_gem_cleanup_stolen(mem->i915);
    797	return err;
    798}
    799
    800static int release_stolen_lmem(struct intel_memory_region *mem)
    801{
    802	if (mem->io_size)
    803		io_mapping_fini(&mem->iomap);
    804	i915_gem_cleanup_stolen(mem->i915);
    805	return 0;
    806}
    807
    808static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = {
    809	.init = init_stolen_lmem,
    810	.release = release_stolen_lmem,
    811	.init_object = _i915_gem_object_stolen_init,
    812};
    813
    814struct intel_memory_region *
    815i915_gem_stolen_lmem_setup(struct drm_i915_private *i915, u16 type,
    816			   u16 instance)
    817{
    818	struct intel_uncore *uncore = &i915->uncore;
    819	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
    820	resource_size_t dsm_size, dsm_base, lmem_size;
    821	struct intel_memory_region *mem;
    822	resource_size_t io_start, io_size;
    823	resource_size_t min_page_size;
    824
    825	if (WARN_ON_ONCE(instance))
    826		return ERR_PTR(-ENODEV);
    827
    828	/* Use DSM base address instead for stolen memory */
    829	dsm_base = intel_uncore_read64(uncore, GEN12_DSMBASE);
    830	if (IS_DG1(uncore->i915)) {
    831		lmem_size = pci_resource_len(pdev, 2);
    832		if (WARN_ON(lmem_size < dsm_base))
    833			return ERR_PTR(-ENODEV);
    834	} else {
    835		resource_size_t lmem_range;
    836
    837		lmem_range = intel_gt_read_register(&i915->gt0, XEHPSDV_TILE0_ADDR_RANGE) & 0xFFFF;
    838		lmem_size = lmem_range >> XEHPSDV_TILE_LMEM_RANGE_SHIFT;
    839		lmem_size *= SZ_1G;
    840	}
    841
    842	dsm_size = lmem_size - dsm_base;
    843	if (pci_resource_len(pdev, 2) < lmem_size) {
    844		io_start = 0;
    845		io_size = 0;
    846	} else {
    847		io_start = pci_resource_start(pdev, 2) + dsm_base;
    848		io_size = dsm_size;
    849	}
    850
    851	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
    852						I915_GTT_PAGE_SIZE_4K;
    853
    854	mem = intel_memory_region_create(i915, dsm_base, dsm_size,
    855					 min_page_size,
    856					 io_start, io_size,
    857					 type, instance,
    858					 &i915_region_stolen_lmem_ops);
    859	if (IS_ERR(mem))
    860		return mem;
    861
    862	/*
    863	 * TODO: consider creating common helper to just print all the
    864	 * interesting stuff from intel_memory_region, which we can use for all
    865	 * our probed regions.
    866	 */
    867
    868	drm_dbg(&i915->drm, "Stolen Local memory IO start: %pa\n",
    869		&mem->io_start);
    870	drm_dbg(&i915->drm, "Stolen Local DSM base: %pa\n", &dsm_base);
    871
    872	intel_memory_region_set_name(mem, "stolen-local");
    873
    874	mem->private = true;
    875
    876	return mem;
    877}
    878
    879struct intel_memory_region*
    880i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
    881			   u16 instance)
    882{
    883	struct intel_memory_region *mem;
    884
    885	mem = intel_memory_region_create(i915,
    886					 intel_graphics_stolen_res.start,
    887					 resource_size(&intel_graphics_stolen_res),
    888					 PAGE_SIZE, 0, 0, type, instance,
    889					 &i915_region_stolen_smem_ops);
    890	if (IS_ERR(mem))
    891		return mem;
    892
    893	intel_memory_region_set_name(mem, "stolen-system");
    894
    895	mem->private = true;
    896	return mem;
    897}
    898
    899bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
    900{
    901	return obj->ops == &i915_gem_object_stolen_ops;
    902}