i915_scatterlist.c (5321B)
1/* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7#include "i915_scatterlist.h" 8#include "i915_ttm_buddy_manager.h" 9 10#include <drm/drm_buddy.h> 11#include <drm/drm_mm.h> 12 13#include <linux/slab.h> 14 15bool i915_sg_trim(struct sg_table *orig_st) 16{ 17 struct sg_table new_st; 18 struct scatterlist *sg, *new_sg; 19 unsigned int i; 20 21 if (orig_st->nents == orig_st->orig_nents) 22 return false; 23 24 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN)) 25 return false; 26 27 new_sg = new_st.sgl; 28 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) { 29 sg_set_page(new_sg, sg_page(sg), sg->length, 0); 30 sg_dma_address(new_sg) = sg_dma_address(sg); 31 sg_dma_len(new_sg) = sg_dma_len(sg); 32 33 new_sg = sg_next(new_sg); 34 } 35 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */ 36 37 sg_free_table(orig_st); 38 39 *orig_st = new_st; 40 return true; 41} 42 43static void i915_refct_sgt_release(struct kref *ref) 44{ 45 struct i915_refct_sgt *rsgt = 46 container_of(ref, typeof(*rsgt), kref); 47 48 sg_free_table(&rsgt->table); 49 kfree(rsgt); 50} 51 52static const struct i915_refct_sgt_ops rsgt_ops = { 53 .release = i915_refct_sgt_release 54}; 55 56/** 57 * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops 58 * @rsgt: The struct i915_refct_sgt to initialize. 59 * size: The size of the underlying memory buffer. 60 */ 61void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) 62{ 63 __i915_refct_sgt_init(rsgt, size, &rsgt_ops); 64} 65 66/** 67 * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct 68 * drm_mm_node 69 * @node: The drm_mm_node. 70 * @region_start: An offset to add to the dma addresses of the sg list. 71 * 72 * Create a struct sg_table, initializing it from a struct drm_mm_node, 73 * taking a maximum segment length into account, splitting into segments 74 * if necessary. 75 * 76 * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative 77 * error code cast to an error pointer on failure. 78 */ 79struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, 80 u64 region_start) 81{ 82 const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ 83 u64 segment_pages = max_segment >> PAGE_SHIFT; 84 u64 block_size, offset, prev_end; 85 struct i915_refct_sgt *rsgt; 86 struct sg_table *st; 87 struct scatterlist *sg; 88 89 rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); 90 if (!rsgt) 91 return ERR_PTR(-ENOMEM); 92 93 i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT); 94 st = &rsgt->table; 95 if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages), 96 GFP_KERNEL)) { 97 i915_refct_sgt_put(rsgt); 98 return ERR_PTR(-ENOMEM); 99 } 100 101 sg = st->sgl; 102 st->nents = 0; 103 prev_end = (resource_size_t)-1; 104 block_size = node->size << PAGE_SHIFT; 105 offset = node->start << PAGE_SHIFT; 106 107 while (block_size) { 108 u64 len; 109 110 if (offset != prev_end || sg->length >= max_segment) { 111 if (st->nents) 112 sg = __sg_next(sg); 113 114 sg_dma_address(sg) = region_start + offset; 115 sg_dma_len(sg) = 0; 116 sg->length = 0; 117 st->nents++; 118 } 119 120 len = min(block_size, max_segment - sg->length); 121 sg->length += len; 122 sg_dma_len(sg) += len; 123 124 offset += len; 125 block_size -= len; 126 127 prev_end = offset; 128 } 129 130 sg_mark_end(sg); 131 i915_sg_trim(st); 132 133 return rsgt; 134} 135 136/** 137 * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct 138 * i915_buddy_block list 139 * @res: The struct i915_ttm_buddy_resource. 140 * @region_start: An offset to add to the dma addresses of the sg list. 141 * 142 * Create a struct sg_table, initializing it from struct i915_buddy_block list, 143 * taking a maximum segment length into account, splitting into segments 144 * if necessary. 145 * 146 * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative 147 * error code cast to an error pointer on failure. 148 */ 149struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, 150 u64 region_start) 151{ 152 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 153 const u64 size = res->num_pages << PAGE_SHIFT; 154 const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE); 155 struct drm_buddy *mm = bman_res->mm; 156 struct list_head *blocks = &bman_res->blocks; 157 struct drm_buddy_block *block; 158 struct i915_refct_sgt *rsgt; 159 struct scatterlist *sg; 160 struct sg_table *st; 161 resource_size_t prev_end; 162 163 GEM_BUG_ON(list_empty(blocks)); 164 165 rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); 166 if (!rsgt) 167 return ERR_PTR(-ENOMEM); 168 169 i915_refct_sgt_init(rsgt, size); 170 st = &rsgt->table; 171 if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) { 172 i915_refct_sgt_put(rsgt); 173 return ERR_PTR(-ENOMEM); 174 } 175 176 sg = st->sgl; 177 st->nents = 0; 178 prev_end = (resource_size_t)-1; 179 180 list_for_each_entry(block, blocks, link) { 181 u64 block_size, offset; 182 183 block_size = min_t(u64, size, drm_buddy_block_size(mm, block)); 184 offset = drm_buddy_block_offset(block); 185 186 while (block_size) { 187 u64 len; 188 189 if (offset != prev_end || sg->length >= max_segment) { 190 if (st->nents) 191 sg = __sg_next(sg); 192 193 sg_dma_address(sg) = region_start + offset; 194 sg_dma_len(sg) = 0; 195 sg->length = 0; 196 st->nents++; 197 } 198 199 len = min(block_size, max_segment - sg->length); 200 sg->length += len; 201 sg_dma_len(sg) += len; 202 203 offset += len; 204 block_size -= len; 205 206 prev_end = offset; 207 } 208 } 209 210 sg_mark_end(sg); 211 i915_sg_trim(st); 212 213 return rsgt; 214} 215 216#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 217#include "selftests/scatterlist.c" 218#endif