cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_gt_buffer_pool.c (5662B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2014-2018 Intel Corporation
      4 */
      5
      6#include "gem/i915_gem_internal.h"
      7#include "gem/i915_gem_object.h"
      8
      9#include "i915_drv.h"
     10#include "intel_engine_pm.h"
     11#include "intel_gt_buffer_pool.h"
     12
     13static struct list_head *
     14bucket_for_size(struct intel_gt_buffer_pool *pool, size_t sz)
     15{
     16	int n;
     17
     18	/*
     19	 * Compute a power-of-two bucket, but throw everything greater than
     20	 * 16KiB into the same bucket: i.e. the buckets hold objects of
     21	 * (1 page, 2 pages, 4 pages, 8+ pages).
     22	 */
     23	n = fls(sz >> PAGE_SHIFT) - 1;
     24	if (n >= ARRAY_SIZE(pool->cache_list))
     25		n = ARRAY_SIZE(pool->cache_list) - 1;
     26
     27	return &pool->cache_list[n];
     28}
     29
     30static void node_free(struct intel_gt_buffer_pool_node *node)
     31{
     32	i915_gem_object_put(node->obj);
     33	i915_active_fini(&node->active);
     34	kfree_rcu(node, rcu);
     35}
     36
     37static bool pool_free_older_than(struct intel_gt_buffer_pool *pool, long keep)
     38{
     39	struct intel_gt_buffer_pool_node *node, *stale = NULL;
     40	bool active = false;
     41	int n;
     42
     43	/* Free buffers that have not been used in the past second */
     44	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
     45		struct list_head *list = &pool->cache_list[n];
     46
     47		if (list_empty(list))
     48			continue;
     49
     50		if (spin_trylock_irq(&pool->lock)) {
     51			struct list_head *pos;
     52
     53			/* Most recent at head; oldest at tail */
     54			list_for_each_prev(pos, list) {
     55				unsigned long age;
     56
     57				node = list_entry(pos, typeof(*node), link);
     58
     59				age = READ_ONCE(node->age);
     60				if (!age || jiffies - age < keep)
     61					break;
     62
     63				/* Check we are the first to claim this node */
     64				if (!xchg(&node->age, 0))
     65					break;
     66
     67				node->free = stale;
     68				stale = node;
     69			}
     70			if (!list_is_last(pos, list))
     71				__list_del_many(pos, list);
     72
     73			spin_unlock_irq(&pool->lock);
     74		}
     75
     76		active |= !list_empty(list);
     77	}
     78
     79	while ((node = stale)) {
     80		stale = stale->free;
     81		node_free(node);
     82	}
     83
     84	return active;
     85}
     86
     87static void pool_free_work(struct work_struct *wrk)
     88{
     89	struct intel_gt_buffer_pool *pool =
     90		container_of(wrk, typeof(*pool), work.work);
     91
     92	if (pool_free_older_than(pool, HZ))
     93		schedule_delayed_work(&pool->work,
     94				      round_jiffies_up_relative(HZ));
     95}
     96
     97static void pool_retire(struct i915_active *ref)
     98{
     99	struct intel_gt_buffer_pool_node *node =
    100		container_of(ref, typeof(*node), active);
    101	struct intel_gt_buffer_pool *pool = node->pool;
    102	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
    103	unsigned long flags;
    104
    105	if (node->pinned) {
    106		i915_gem_object_unpin_pages(node->obj);
    107
    108		/* Return this object to the shrinker pool */
    109		i915_gem_object_make_purgeable(node->obj);
    110		node->pinned = false;
    111	}
    112
    113	GEM_BUG_ON(node->age);
    114	spin_lock_irqsave(&pool->lock, flags);
    115	list_add_rcu(&node->link, list);
    116	WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
    117	spin_unlock_irqrestore(&pool->lock, flags);
    118
    119	schedule_delayed_work(&pool->work,
    120			      round_jiffies_up_relative(HZ));
    121}
    122
    123void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
    124{
    125	assert_object_held(node->obj);
    126
    127	if (node->pinned)
    128		return;
    129
    130	__i915_gem_object_pin_pages(node->obj);
    131	/* Hide this pinned object from the shrinker until retired */
    132	i915_gem_object_make_unshrinkable(node->obj);
    133	node->pinned = true;
    134}
    135
    136static struct intel_gt_buffer_pool_node *
    137node_create(struct intel_gt_buffer_pool *pool, size_t sz,
    138	    enum i915_map_type type)
    139{
    140	struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
    141	struct intel_gt_buffer_pool_node *node;
    142	struct drm_i915_gem_object *obj;
    143
    144	node = kmalloc(sizeof(*node),
    145		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
    146	if (!node)
    147		return ERR_PTR(-ENOMEM);
    148
    149	node->age = 0;
    150	node->pool = pool;
    151	node->pinned = false;
    152	i915_active_init(&node->active, NULL, pool_retire, 0);
    153
    154	obj = i915_gem_object_create_internal(gt->i915, sz);
    155	if (IS_ERR(obj)) {
    156		i915_active_fini(&node->active);
    157		kfree(node);
    158		return ERR_CAST(obj);
    159	}
    160
    161	i915_gem_object_set_readonly(obj);
    162
    163	node->type = type;
    164	node->obj = obj;
    165	return node;
    166}
    167
    168struct intel_gt_buffer_pool_node *
    169intel_gt_get_buffer_pool(struct intel_gt *gt, size_t size,
    170			 enum i915_map_type type)
    171{
    172	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
    173	struct intel_gt_buffer_pool_node *node;
    174	struct list_head *list;
    175	int ret;
    176
    177	size = PAGE_ALIGN(size);
    178	list = bucket_for_size(pool, size);
    179
    180	rcu_read_lock();
    181	list_for_each_entry_rcu(node, list, link) {
    182		unsigned long age;
    183
    184		if (node->obj->base.size < size)
    185			continue;
    186
    187		if (node->type != type)
    188			continue;
    189
    190		age = READ_ONCE(node->age);
    191		if (!age)
    192			continue;
    193
    194		if (cmpxchg(&node->age, age, 0) == age) {
    195			spin_lock_irq(&pool->lock);
    196			list_del_rcu(&node->link);
    197			spin_unlock_irq(&pool->lock);
    198			break;
    199		}
    200	}
    201	rcu_read_unlock();
    202
    203	if (&node->link == list) {
    204		node = node_create(pool, size, type);
    205		if (IS_ERR(node))
    206			return node;
    207	}
    208
    209	ret = i915_active_acquire(&node->active);
    210	if (ret) {
    211		node_free(node);
    212		return ERR_PTR(ret);
    213	}
    214
    215	return node;
    216}
    217
    218void intel_gt_init_buffer_pool(struct intel_gt *gt)
    219{
    220	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
    221	int n;
    222
    223	spin_lock_init(&pool->lock);
    224	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
    225		INIT_LIST_HEAD(&pool->cache_list[n]);
    226	INIT_DELAYED_WORK(&pool->work, pool_free_work);
    227}
    228
    229void intel_gt_flush_buffer_pool(struct intel_gt *gt)
    230{
    231	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
    232
    233	do {
    234		while (pool_free_older_than(pool, 0))
    235			;
    236	} while (cancel_delayed_work_sync(&pool->work));
    237}
    238
    239void intel_gt_fini_buffer_pool(struct intel_gt *gt)
    240{
    241	struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
    242	int n;
    243
    244	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
    245		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
    246}