cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_gem_pages.c (17373B)


      1/*
      2 * SPDX-License-Identifier: MIT
      3 *
      4 * Copyright © 2014-2016 Intel Corporation
      5 */
      6
      7#include <drm/drm_cache.h>
      8
      9#include "i915_drv.h"
     10#include "i915_gem_object.h"
     11#include "i915_scatterlist.h"
     12#include "i915_gem_lmem.h"
     13#include "i915_gem_mman.h"
     14
     15#include "gt/intel_gt.h"
     16
     17void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
     18				 struct sg_table *pages,
     19				 unsigned int sg_page_sizes)
     20{
     21	struct drm_i915_private *i915 = to_i915(obj->base.dev);
     22	unsigned long supported = INTEL_INFO(i915)->page_sizes;
     23	bool shrinkable;
     24	int i;
     25
     26	assert_object_held_shared(obj);
     27
     28	if (i915_gem_object_is_volatile(obj))
     29		obj->mm.madv = I915_MADV_DONTNEED;
     30
     31	/* Make the pages coherent with the GPU (flushing any swapin). */
     32	if (obj->cache_dirty) {
     33		WARN_ON_ONCE(IS_DGFX(i915));
     34		obj->write_domain = 0;
     35		if (i915_gem_object_has_struct_page(obj))
     36			drm_clflush_sg(pages);
     37		obj->cache_dirty = false;
     38	}
     39
     40	obj->mm.get_page.sg_pos = pages->sgl;
     41	obj->mm.get_page.sg_idx = 0;
     42	obj->mm.get_dma_page.sg_pos = pages->sgl;
     43	obj->mm.get_dma_page.sg_idx = 0;
     44
     45	obj->mm.pages = pages;
     46
     47	GEM_BUG_ON(!sg_page_sizes);
     48	obj->mm.page_sizes.phys = sg_page_sizes;
     49
     50	/*
     51	 * Calculate the supported page-sizes which fit into the given
     52	 * sg_page_sizes. This will give us the page-sizes which we may be able
     53	 * to use opportunistically when later inserting into the GTT. For
     54	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
     55	 * 64K or 4K pages, although in practice this will depend on a number of
     56	 * other factors.
     57	 */
     58	obj->mm.page_sizes.sg = 0;
     59	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
     60		if (obj->mm.page_sizes.phys & ~0u << i)
     61			obj->mm.page_sizes.sg |= BIT(i);
     62	}
     63	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
     64
     65	shrinkable = i915_gem_object_is_shrinkable(obj);
     66
     67	if (i915_gem_object_is_tiled(obj) &&
     68	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
     69		GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
     70		i915_gem_object_set_tiling_quirk(obj);
     71		GEM_BUG_ON(!list_empty(&obj->mm.link));
     72		atomic_inc(&obj->mm.shrink_pin);
     73		shrinkable = false;
     74	}
     75
     76	if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
     77		struct list_head *list;
     78		unsigned long flags;
     79
     80		assert_object_held(obj);
     81		spin_lock_irqsave(&i915->mm.obj_lock, flags);
     82
     83		i915->mm.shrink_count++;
     84		i915->mm.shrink_memory += obj->base.size;
     85
     86		if (obj->mm.madv != I915_MADV_WILLNEED)
     87			list = &i915->mm.purge_list;
     88		else
     89			list = &i915->mm.shrink_list;
     90		list_add_tail(&obj->mm.link, list);
     91
     92		atomic_set(&obj->mm.shrink_pin, 0);
     93		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
     94	}
     95}
     96
     97int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
     98{
     99	struct drm_i915_private *i915 = to_i915(obj->base.dev);
    100	int err;
    101
    102	assert_object_held_shared(obj);
    103
    104	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
    105		drm_dbg(&i915->drm,
    106			"Attempting to obtain a purgeable object\n");
    107		return -EFAULT;
    108	}
    109
    110	err = obj->ops->get_pages(obj);
    111	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
    112
    113	return err;
    114}
    115
    116/* Ensure that the associated pages are gathered from the backing storage
    117 * and pinned into our object. i915_gem_object_pin_pages() may be called
    118 * multiple times before they are released by a single call to
    119 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
    120 * either as a result of memory pressure (reaping pages under the shrinker)
    121 * or as the object is itself released.
    122 */
    123int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
    124{
    125	int err;
    126
    127	assert_object_held(obj);
    128
    129	assert_object_held_shared(obj);
    130
    131	if (unlikely(!i915_gem_object_has_pages(obj))) {
    132		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
    133
    134		err = ____i915_gem_object_get_pages(obj);
    135		if (err)
    136			return err;
    137
    138		smp_mb__before_atomic();
    139	}
    140	atomic_inc(&obj->mm.pages_pin_count);
    141
    142	return 0;
    143}
    144
    145int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
    146{
    147	struct i915_gem_ww_ctx ww;
    148	int err;
    149
    150	i915_gem_ww_ctx_init(&ww, true);
    151retry:
    152	err = i915_gem_object_lock(obj, &ww);
    153	if (!err)
    154		err = i915_gem_object_pin_pages(obj);
    155
    156	if (err == -EDEADLK) {
    157		err = i915_gem_ww_ctx_backoff(&ww);
    158		if (!err)
    159			goto retry;
    160	}
    161	i915_gem_ww_ctx_fini(&ww);
    162	return err;
    163}
    164
    165/* Immediately discard the backing storage */
    166int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
    167{
    168	if (obj->ops->truncate)
    169		return obj->ops->truncate(obj);
    170
    171	return 0;
    172}
    173
    174static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
    175{
    176	struct radix_tree_iter iter;
    177	void __rcu **slot;
    178
    179	rcu_read_lock();
    180	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
    181		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
    182	radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
    183		radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
    184	rcu_read_unlock();
    185}
    186
    187static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
    188{
    189	if (is_vmalloc_addr(ptr))
    190		vunmap(ptr);
    191}
    192
    193struct sg_table *
    194__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
    195{
    196	struct sg_table *pages;
    197
    198	assert_object_held_shared(obj);
    199
    200	pages = fetch_and_zero(&obj->mm.pages);
    201	if (IS_ERR_OR_NULL(pages))
    202		return pages;
    203
    204	if (i915_gem_object_is_volatile(obj))
    205		obj->mm.madv = I915_MADV_WILLNEED;
    206
    207	if (!i915_gem_object_has_self_managed_shrink_list(obj))
    208		i915_gem_object_make_unshrinkable(obj);
    209
    210	if (obj->mm.mapping) {
    211		unmap_object(obj, page_mask_bits(obj->mm.mapping));
    212		obj->mm.mapping = NULL;
    213	}
    214
    215	__i915_gem_object_reset_page_iter(obj);
    216	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
    217
    218	if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
    219		struct drm_i915_private *i915 = to_i915(obj->base.dev);
    220		intel_wakeref_t wakeref;
    221
    222		with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
    223			intel_gt_invalidate_tlbs(to_gt(i915));
    224	}
    225
    226	return pages;
    227}
    228
    229int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
    230{
    231	struct sg_table *pages;
    232
    233	if (i915_gem_object_has_pinned_pages(obj))
    234		return -EBUSY;
    235
    236	/* May be called by shrinker from within get_pages() (on another bo) */
    237	assert_object_held_shared(obj);
    238
    239	i915_gem_object_release_mmap_offset(obj);
    240
    241	/*
    242	 * ->put_pages might need to allocate memory for the bit17 swizzle
    243	 * array, hence protect them from being reaped by removing them from gtt
    244	 * lists early.
    245	 */
    246	pages = __i915_gem_object_unset_pages(obj);
    247
    248	/*
    249	 * XXX Temporary hijinx to avoid updating all backends to handle
    250	 * NULL pages. In the future, when we have more asynchronous
    251	 * get_pages backends we should be better able to handle the
    252	 * cancellation of the async task in a more uniform manner.
    253	 */
    254	if (!IS_ERR_OR_NULL(pages))
    255		obj->ops->put_pages(obj, pages);
    256
    257	return 0;
    258}
    259
    260/* The 'mapping' part of i915_gem_object_pin_map() below */
    261static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
    262				      enum i915_map_type type)
    263{
    264	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
    265	struct page *stack[32], **pages = stack, *page;
    266	struct sgt_iter iter;
    267	pgprot_t pgprot;
    268	void *vaddr;
    269
    270	switch (type) {
    271	default:
    272		MISSING_CASE(type);
    273		fallthrough;	/* to use PAGE_KERNEL anyway */
    274	case I915_MAP_WB:
    275		/*
    276		 * On 32b, highmem using a finite set of indirect PTE (i.e.
    277		 * vmap) to provide virtual mappings of the high pages.
    278		 * As these are finite, map_new_virtual() must wait for some
    279		 * other kmap() to finish when it runs out. If we map a large
    280		 * number of objects, there is no method for it to tell us
    281		 * to release the mappings, and we deadlock.
    282		 *
    283		 * However, if we make an explicit vmap of the page, that
    284		 * uses a larger vmalloc arena, and also has the ability
    285		 * to tell us to release unwanted mappings. Most importantly,
    286		 * it will fail and propagate an error instead of waiting
    287		 * forever.
    288		 *
    289		 * So if the page is beyond the 32b boundary, make an explicit
    290		 * vmap.
    291		 */
    292		if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
    293			return page_address(sg_page(obj->mm.pages->sgl));
    294		pgprot = PAGE_KERNEL;
    295		break;
    296	case I915_MAP_WC:
    297		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
    298		break;
    299	}
    300
    301	if (n_pages > ARRAY_SIZE(stack)) {
    302		/* Too big for stack -- allocate temporary array instead */
    303		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
    304		if (!pages)
    305			return ERR_PTR(-ENOMEM);
    306	}
    307
    308	i = 0;
    309	for_each_sgt_page(page, iter, obj->mm.pages)
    310		pages[i++] = page;
    311	vaddr = vmap(pages, n_pages, 0, pgprot);
    312	if (pages != stack)
    313		kvfree(pages);
    314
    315	return vaddr ?: ERR_PTR(-ENOMEM);
    316}
    317
    318static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
    319				     enum i915_map_type type)
    320{
    321	resource_size_t iomap = obj->mm.region->iomap.base -
    322		obj->mm.region->region.start;
    323	unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
    324	unsigned long stack[32], *pfns = stack, i;
    325	struct sgt_iter iter;
    326	dma_addr_t addr;
    327	void *vaddr;
    328
    329	GEM_BUG_ON(type != I915_MAP_WC);
    330
    331	if (n_pfn > ARRAY_SIZE(stack)) {
    332		/* Too big for stack -- allocate temporary array instead */
    333		pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
    334		if (!pfns)
    335			return ERR_PTR(-ENOMEM);
    336	}
    337
    338	i = 0;
    339	for_each_sgt_daddr(addr, iter, obj->mm.pages)
    340		pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
    341	vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
    342	if (pfns != stack)
    343		kvfree(pfns);
    344
    345	return vaddr ?: ERR_PTR(-ENOMEM);
    346}
    347
    348/* get, pin, and map the pages of the object into kernel space */
    349void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
    350			      enum i915_map_type type)
    351{
    352	enum i915_map_type has_type;
    353	bool pinned;
    354	void *ptr;
    355	int err;
    356
    357	if (!i915_gem_object_has_struct_page(obj) &&
    358	    !i915_gem_object_has_iomem(obj))
    359		return ERR_PTR(-ENXIO);
    360
    361	if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
    362		return ERR_PTR(-EINVAL);
    363
    364	assert_object_held(obj);
    365
    366	pinned = !(type & I915_MAP_OVERRIDE);
    367	type &= ~I915_MAP_OVERRIDE;
    368
    369	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
    370		if (unlikely(!i915_gem_object_has_pages(obj))) {
    371			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
    372
    373			err = ____i915_gem_object_get_pages(obj);
    374			if (err)
    375				return ERR_PTR(err);
    376
    377			smp_mb__before_atomic();
    378		}
    379		atomic_inc(&obj->mm.pages_pin_count);
    380		pinned = false;
    381	}
    382	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
    383
    384	/*
    385	 * For discrete our CPU mappings needs to be consistent in order to
    386	 * function correctly on !x86. When mapping things through TTM, we use
    387	 * the same rules to determine the caching type.
    388	 *
    389	 * The caching rules, starting from DG1:
    390	 *
    391	 *	- If the object can be placed in device local-memory, then the
    392	 *	  pages should be allocated and mapped as write-combined only.
    393	 *
    394	 *	- Everything else is always allocated and mapped as write-back,
    395	 *	  with the guarantee that everything is also coherent with the
    396	 *	  GPU.
    397	 *
    398	 * Internal users of lmem are already expected to get this right, so no
    399	 * fudging needed there.
    400	 */
    401	if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
    402		if (type != I915_MAP_WC && !obj->mm.n_placements) {
    403			ptr = ERR_PTR(-ENODEV);
    404			goto err_unpin;
    405		}
    406
    407		type = I915_MAP_WC;
    408	} else if (IS_DGFX(to_i915(obj->base.dev))) {
    409		type = I915_MAP_WB;
    410	}
    411
    412	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
    413	if (ptr && has_type != type) {
    414		if (pinned) {
    415			ptr = ERR_PTR(-EBUSY);
    416			goto err_unpin;
    417		}
    418
    419		unmap_object(obj, ptr);
    420
    421		ptr = obj->mm.mapping = NULL;
    422	}
    423
    424	if (!ptr) {
    425		err = i915_gem_object_wait_moving_fence(obj, true);
    426		if (err) {
    427			ptr = ERR_PTR(err);
    428			goto err_unpin;
    429		}
    430
    431		if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
    432			ptr = ERR_PTR(-ENODEV);
    433		else if (i915_gem_object_has_struct_page(obj))
    434			ptr = i915_gem_object_map_page(obj, type);
    435		else
    436			ptr = i915_gem_object_map_pfn(obj, type);
    437		if (IS_ERR(ptr))
    438			goto err_unpin;
    439
    440		obj->mm.mapping = page_pack_bits(ptr, type);
    441	}
    442
    443	return ptr;
    444
    445err_unpin:
    446	atomic_dec(&obj->mm.pages_pin_count);
    447	return ptr;
    448}
    449
    450void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
    451				       enum i915_map_type type)
    452{
    453	void *ret;
    454
    455	i915_gem_object_lock(obj, NULL);
    456	ret = i915_gem_object_pin_map(obj, type);
    457	i915_gem_object_unlock(obj);
    458
    459	return ret;
    460}
    461
    462void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
    463				 unsigned long offset,
    464				 unsigned long size)
    465{
    466	enum i915_map_type has_type;
    467	void *ptr;
    468
    469	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
    470	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
    471				     offset, size, obj->base.size));
    472
    473	wmb(); /* let all previous writes be visible to coherent partners */
    474	obj->mm.dirty = true;
    475
    476	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
    477		return;
    478
    479	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
    480	if (has_type == I915_MAP_WC)
    481		return;
    482
    483	drm_clflush_virt_range(ptr + offset, size);
    484	if (size == obj->base.size) {
    485		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
    486		obj->cache_dirty = false;
    487	}
    488}
    489
    490void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
    491{
    492	GEM_BUG_ON(!obj->mm.mapping);
    493
    494	/*
    495	 * We allow removing the mapping from underneath pinned pages!
    496	 *
    497	 * Furthermore, since this is an unsafe operation reserved only
    498	 * for construction time manipulation, we ignore locking prudence.
    499	 */
    500	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
    501
    502	i915_gem_object_unpin_map(obj);
    503}
    504
    505struct scatterlist *
    506__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
    507			 struct i915_gem_object_page_iter *iter,
    508			 unsigned int n,
    509			 unsigned int *offset,
    510			 bool dma)
    511{
    512	struct scatterlist *sg;
    513	unsigned int idx, count;
    514
    515	might_sleep();
    516	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
    517	if (!i915_gem_object_has_pinned_pages(obj))
    518		assert_object_held(obj);
    519
    520	/* As we iterate forward through the sg, we record each entry in a
    521	 * radixtree for quick repeated (backwards) lookups. If we have seen
    522	 * this index previously, we will have an entry for it.
    523	 *
    524	 * Initial lookup is O(N), but this is amortized to O(1) for
    525	 * sequential page access (where each new request is consecutive
    526	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
    527	 * i.e. O(1) with a large constant!
    528	 */
    529	if (n < READ_ONCE(iter->sg_idx))
    530		goto lookup;
    531
    532	mutex_lock(&iter->lock);
    533
    534	/* We prefer to reuse the last sg so that repeated lookup of this
    535	 * (or the subsequent) sg are fast - comparing against the last
    536	 * sg is faster than going through the radixtree.
    537	 */
    538
    539	sg = iter->sg_pos;
    540	idx = iter->sg_idx;
    541	count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
    542
    543	while (idx + count <= n) {
    544		void *entry;
    545		unsigned long i;
    546		int ret;
    547
    548		/* If we cannot allocate and insert this entry, or the
    549		 * individual pages from this range, cancel updating the
    550		 * sg_idx so that on this lookup we are forced to linearly
    551		 * scan onwards, but on future lookups we will try the
    552		 * insertion again (in which case we need to be careful of
    553		 * the error return reporting that we have already inserted
    554		 * this index).
    555		 */
    556		ret = radix_tree_insert(&iter->radix, idx, sg);
    557		if (ret && ret != -EEXIST)
    558			goto scan;
    559
    560		entry = xa_mk_value(idx);
    561		for (i = 1; i < count; i++) {
    562			ret = radix_tree_insert(&iter->radix, idx + i, entry);
    563			if (ret && ret != -EEXIST)
    564				goto scan;
    565		}
    566
    567		idx += count;
    568		sg = ____sg_next(sg);
    569		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
    570	}
    571
    572scan:
    573	iter->sg_pos = sg;
    574	iter->sg_idx = idx;
    575
    576	mutex_unlock(&iter->lock);
    577
    578	if (unlikely(n < idx)) /* insertion completed by another thread */
    579		goto lookup;
    580
    581	/* In case we failed to insert the entry into the radixtree, we need
    582	 * to look beyond the current sg.
    583	 */
    584	while (idx + count <= n) {
    585		idx += count;
    586		sg = ____sg_next(sg);
    587		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
    588	}
    589
    590	*offset = n - idx;
    591	return sg;
    592
    593lookup:
    594	rcu_read_lock();
    595
    596	sg = radix_tree_lookup(&iter->radix, n);
    597	GEM_BUG_ON(!sg);
    598
    599	/* If this index is in the middle of multi-page sg entry,
    600	 * the radix tree will contain a value entry that points
    601	 * to the start of that range. We will return the pointer to
    602	 * the base page and the offset of this page within the
    603	 * sg entry's range.
    604	 */
    605	*offset = 0;
    606	if (unlikely(xa_is_value(sg))) {
    607		unsigned long base = xa_to_value(sg);
    608
    609		sg = radix_tree_lookup(&iter->radix, base);
    610		GEM_BUG_ON(!sg);
    611
    612		*offset = n - base;
    613	}
    614
    615	rcu_read_unlock();
    616
    617	return sg;
    618}
    619
    620struct page *
    621i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
    622{
    623	struct scatterlist *sg;
    624	unsigned int offset;
    625
    626	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
    627
    628	sg = i915_gem_object_get_sg(obj, n, &offset);
    629	return nth_page(sg_page(sg), offset);
    630}
    631
    632/* Like i915_gem_object_get_page(), but mark the returned page dirty */
    633struct page *
    634i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
    635			       unsigned int n)
    636{
    637	struct page *page;
    638
    639	page = i915_gem_object_get_page(obj, n);
    640	if (!obj->mm.dirty)
    641		set_page_dirty(page);
    642
    643	return page;
    644}
    645
    646dma_addr_t
    647i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
    648				    unsigned long n,
    649				    unsigned int *len)
    650{
    651	struct scatterlist *sg;
    652	unsigned int offset;
    653
    654	sg = i915_gem_object_get_sg_dma(obj, n, &offset);
    655
    656	if (len)
    657		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
    658
    659	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
    660}
    661
    662dma_addr_t
    663i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
    664				unsigned long n)
    665{
    666	return i915_gem_object_get_dma_address_len(obj, n, NULL);
    667}