cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_region_lmem.c (4008B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2019 Intel Corporation
      4 */
      5
      6#include "i915_drv.h"
      7#include "i915_reg.h"
      8#include "intel_memory_region.h"
      9#include "intel_region_lmem.h"
     10#include "intel_region_ttm.h"
     11#include "gem/i915_gem_lmem.h"
     12#include "gem/i915_gem_region.h"
     13#include "gem/i915_gem_ttm.h"
     14#include "gt/intel_gt.h"
     15#include "gt/intel_gt_regs.h"
     16
     17static int
     18region_lmem_release(struct intel_memory_region *mem)
     19{
     20	int ret;
     21
     22	ret = intel_region_ttm_fini(mem);
     23	io_mapping_fini(&mem->iomap);
     24
     25	return ret;
     26}
     27
     28static int
     29region_lmem_init(struct intel_memory_region *mem)
     30{
     31	int ret;
     32
     33	if (!io_mapping_init_wc(&mem->iomap,
     34				mem->io_start,
     35				mem->io_size))
     36		return -EIO;
     37
     38	ret = intel_region_ttm_init(mem);
     39	if (ret)
     40		goto out_no_buddy;
     41
     42	return 0;
     43
     44out_no_buddy:
     45	io_mapping_fini(&mem->iomap);
     46
     47	return ret;
     48}
     49
     50static const struct intel_memory_region_ops intel_region_lmem_ops = {
     51	.init = region_lmem_init,
     52	.release = region_lmem_release,
     53	.init_object = __i915_gem_ttm_object_init,
     54};
     55
     56static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
     57				     u64 *start, u32 *size)
     58{
     59	if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0))
     60		return false;
     61
     62	*start = 0;
     63	*size = SZ_1M;
     64
     65	drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n",
     66		*start, *start + *size);
     67
     68	return true;
     69}
     70
     71static int reserve_lowmem_region(struct intel_uncore *uncore,
     72				 struct intel_memory_region *mem)
     73{
     74	u64 reserve_start;
     75	u32 reserve_size;
     76	int ret;
     77
     78	if (!get_legacy_lowmem_region(uncore, &reserve_start, &reserve_size))
     79		return 0;
     80
     81	ret = intel_memory_region_reserve(mem, reserve_start, reserve_size);
     82	if (ret)
     83		drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n");
     84
     85	return ret;
     86}
     87
     88static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
     89{
     90	struct drm_i915_private *i915 = gt->i915;
     91	struct intel_uncore *uncore = gt->uncore;
     92	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
     93	struct intel_memory_region *mem;
     94	resource_size_t min_page_size;
     95	resource_size_t io_start;
     96	resource_size_t io_size;
     97	resource_size_t lmem_size;
     98	int err;
     99
    100	if (!IS_DGFX(i915))
    101		return ERR_PTR(-ENODEV);
    102
    103	if (HAS_FLAT_CCS(i915)) {
    104		u64 tile_stolen, flat_ccs_base;
    105
    106		lmem_size = pci_resource_len(pdev, 2);
    107		flat_ccs_base = intel_gt_read_register(gt, XEHPSDV_FLAT_CCS_BASE_ADDR);
    108		flat_ccs_base = (flat_ccs_base >> XEHPSDV_CCS_BASE_SHIFT) * SZ_64K;
    109
    110		if (GEM_WARN_ON(lmem_size < flat_ccs_base))
    111			return ERR_PTR(-ENODEV);
    112
    113		tile_stolen = lmem_size - flat_ccs_base;
    114
    115		/* If the FLAT_CCS_BASE_ADDR register is not populated, flag an error */
    116		if (tile_stolen == lmem_size)
    117			drm_err(&i915->drm,
    118				"CCS_BASE_ADDR register did not have expected value\n");
    119
    120		lmem_size -= tile_stolen;
    121	} else {
    122		/* Stolen starts from GSMBASE without CCS */
    123		lmem_size = intel_uncore_read64(&i915->uncore, GEN12_GSMBASE);
    124	}
    125
    126	if (i915->params.lmem_size > 0) {
    127		lmem_size = min_t(resource_size_t, lmem_size,
    128				  mul_u32_u32(i915->params.lmem_size, SZ_1M));
    129	}
    130
    131	io_start = pci_resource_start(pdev, 2);
    132	io_size = min(pci_resource_len(pdev, 2), lmem_size);
    133	if (!io_size)
    134		return ERR_PTR(-ENODEV);
    135
    136	min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K :
    137						I915_GTT_PAGE_SIZE_4K;
    138	mem = intel_memory_region_create(i915,
    139					 0,
    140					 lmem_size,
    141					 min_page_size,
    142					 io_start,
    143					 io_size,
    144					 INTEL_MEMORY_LOCAL,
    145					 0,
    146					 &intel_region_lmem_ops);
    147	if (IS_ERR(mem))
    148		return mem;
    149
    150	err = reserve_lowmem_region(uncore, mem);
    151	if (err)
    152		goto err_region_put;
    153
    154	drm_dbg(&i915->drm, "Local memory: %pR\n", &mem->region);
    155	drm_dbg(&i915->drm, "Local memory IO start: %pa\n",
    156		&mem->io_start);
    157	drm_info(&i915->drm, "Local memory IO size: %pa\n",
    158		 &mem->io_size);
    159	drm_info(&i915->drm, "Local memory available: %pa\n",
    160		 &lmem_size);
    161
    162	return mem;
    163
    164err_region_put:
    165	intel_memory_region_destroy(mem);
    166	return ERR_PTR(err);
    167}
    168
    169struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt)
    170{
    171	return setup_lmem(gt);
    172}