cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

scatterlist.c (9248B)


      1/*
      2 * Copyright © 2016 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 */
     23
     24#include <linux/prime_numbers.h>
     25#include <linux/random.h>
     26
     27#include "i915_selftest.h"
     28#include "i915_utils.h"
     29
     30#define PFN_BIAS (1 << 10)
     31
     32struct pfn_table {
     33	struct sg_table st;
     34	unsigned long start, end;
     35};
     36
     37typedef unsigned int (*npages_fn_t)(unsigned long n,
     38				    unsigned long count,
     39				    struct rnd_state *rnd);
     40
     41static noinline int expect_pfn_sg(struct pfn_table *pt,
     42				  npages_fn_t npages_fn,
     43				  struct rnd_state *rnd,
     44				  const char *who,
     45				  unsigned long timeout)
     46{
     47	struct scatterlist *sg;
     48	unsigned long pfn, n;
     49
     50	pfn = pt->start;
     51	for_each_sg(pt->st.sgl, sg, pt->st.nents, n) {
     52		struct page *page = sg_page(sg);
     53		unsigned int npages = npages_fn(n, pt->st.nents, rnd);
     54
     55		if (page_to_pfn(page) != pfn) {
     56			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
     57			       __func__, who, pfn, page_to_pfn(page));
     58			return -EINVAL;
     59		}
     60
     61		if (sg->length != npages * PAGE_SIZE) {
     62			pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
     63			       __func__, who, npages * PAGE_SIZE, sg->length);
     64			return -EINVAL;
     65		}
     66
     67		if (igt_timeout(timeout, "%s timed out\n", who))
     68			return -EINTR;
     69
     70		pfn += npages;
     71	}
     72	if (pfn != pt->end) {
     73		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
     74		       __func__, who, pt->end, pfn);
     75		return -EINVAL;
     76	}
     77
     78	return 0;
     79}
     80
     81static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt,
     82					    const char *who,
     83					    unsigned long timeout)
     84{
     85	struct sg_page_iter sgiter;
     86	unsigned long pfn;
     87
     88	pfn = pt->start;
     89	for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) {
     90		struct page *page = sg_page_iter_page(&sgiter);
     91
     92		if (page != pfn_to_page(pfn)) {
     93			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
     94			       __func__, who, pfn, page_to_pfn(page));
     95			return -EINVAL;
     96		}
     97
     98		if (igt_timeout(timeout, "%s timed out\n", who))
     99			return -EINTR;
    100
    101		pfn++;
    102	}
    103	if (pfn != pt->end) {
    104		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
    105		       __func__, who, pt->end, pfn);
    106		return -EINVAL;
    107	}
    108
    109	return 0;
    110}
    111
    112static noinline int expect_pfn_sgtiter(struct pfn_table *pt,
    113				       const char *who,
    114				       unsigned long timeout)
    115{
    116	struct sgt_iter sgt;
    117	struct page *page;
    118	unsigned long pfn;
    119
    120	pfn = pt->start;
    121	for_each_sgt_page(page, sgt, &pt->st) {
    122		if (page != pfn_to_page(pfn)) {
    123			pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
    124			       __func__, who, pfn, page_to_pfn(page));
    125			return -EINVAL;
    126		}
    127
    128		if (igt_timeout(timeout, "%s timed out\n", who))
    129			return -EINTR;
    130
    131		pfn++;
    132	}
    133	if (pfn != pt->end) {
    134		pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
    135		       __func__, who, pt->end, pfn);
    136		return -EINVAL;
    137	}
    138
    139	return 0;
    140}
    141
    142static int expect_pfn_sgtable(struct pfn_table *pt,
    143			      npages_fn_t npages_fn,
    144			      struct rnd_state *rnd,
    145			      const char *who,
    146			      unsigned long timeout)
    147{
    148	int err;
    149
    150	err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout);
    151	if (err)
    152		return err;
    153
    154	err = expect_pfn_sg_page_iter(pt, who, timeout);
    155	if (err)
    156		return err;
    157
    158	err = expect_pfn_sgtiter(pt, who, timeout);
    159	if (err)
    160		return err;
    161
    162	return 0;
    163}
    164
    165static unsigned int one(unsigned long n,
    166			unsigned long count,
    167			struct rnd_state *rnd)
    168{
    169	return 1;
    170}
    171
    172static unsigned int grow(unsigned long n,
    173			 unsigned long count,
    174			 struct rnd_state *rnd)
    175{
    176	return n + 1;
    177}
    178
    179static unsigned int shrink(unsigned long n,
    180			   unsigned long count,
    181			   struct rnd_state *rnd)
    182{
    183	return count - n;
    184}
    185
    186static unsigned int random(unsigned long n,
    187			   unsigned long count,
    188			   struct rnd_state *rnd)
    189{
    190	return 1 + (prandom_u32_state(rnd) % 1024);
    191}
    192
    193static unsigned int random_page_size_pages(unsigned long n,
    194					   unsigned long count,
    195					   struct rnd_state *rnd)
    196{
    197	/* 4K, 64K, 2M */
    198	static unsigned int page_count[] = {
    199		BIT(12) >> PAGE_SHIFT,
    200		BIT(16) >> PAGE_SHIFT,
    201		BIT(21) >> PAGE_SHIFT,
    202	};
    203
    204	return page_count[(prandom_u32_state(rnd) % 3)];
    205}
    206
    207static inline bool page_contiguous(struct page *first,
    208				   struct page *last,
    209				   unsigned long npages)
    210{
    211	return first + npages == last;
    212}
    213
    214static int alloc_table(struct pfn_table *pt,
    215		       unsigned long count, unsigned long max,
    216		       npages_fn_t npages_fn,
    217		       struct rnd_state *rnd,
    218		       int alloc_error)
    219{
    220	struct scatterlist *sg;
    221	unsigned long n, pfn;
    222
    223	if (sg_alloc_table(&pt->st, max,
    224			   GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
    225		return alloc_error;
    226
    227	/* count should be less than 20 to prevent overflowing sg->length */
    228	GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
    229
    230	/* Construct a table where each scatterlist contains different number
    231	 * of entries. The idea is to check that we can iterate the individual
    232	 * pages from inside the coalesced lists.
    233	 */
    234	pt->start = PFN_BIAS;
    235	pfn = pt->start;
    236	sg = pt->st.sgl;
    237	for (n = 0; n < count; n++) {
    238		unsigned long npages = npages_fn(n, count, rnd);
    239
    240		/* Nobody expects the Sparse Memmap! */
    241		if (!page_contiguous(pfn_to_page(pfn),
    242				     pfn_to_page(pfn + npages),
    243				     npages)) {
    244			sg_free_table(&pt->st);
    245			return -ENOSPC;
    246		}
    247
    248		if (n)
    249			sg = sg_next(sg);
    250		sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
    251
    252		GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
    253		GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
    254		GEM_BUG_ON(sg->offset != 0);
    255
    256		pfn += npages;
    257	}
    258	sg_mark_end(sg);
    259	pt->st.nents = n;
    260	pt->end = pfn;
    261
    262	return 0;
    263}
    264
    265static const npages_fn_t npages_funcs[] = {
    266	one,
    267	grow,
    268	shrink,
    269	random,
    270	random_page_size_pages,
    271	NULL,
    272};
    273
    274static int igt_sg_alloc(void *ignored)
    275{
    276	IGT_TIMEOUT(end_time);
    277	const unsigned long max_order = 20; /* approximating a 4GiB object */
    278	struct rnd_state prng;
    279	unsigned long prime;
    280	int alloc_error = -ENOMEM;
    281
    282	for_each_prime_number(prime, max_order) {
    283		unsigned long size = BIT(prime);
    284		int offset;
    285
    286		for (offset = -1; offset <= 1; offset++) {
    287			unsigned long sz = size + offset;
    288			const npages_fn_t *npages;
    289			struct pfn_table pt;
    290			int err;
    291
    292			for (npages = npages_funcs; *npages; npages++) {
    293				prandom_seed_state(&prng,
    294						   i915_selftest.random_seed);
    295				err = alloc_table(&pt, sz, sz, *npages, &prng,
    296						  alloc_error);
    297				if (err == -ENOSPC)
    298					break;
    299				if (err)
    300					return err;
    301
    302				prandom_seed_state(&prng,
    303						   i915_selftest.random_seed);
    304				err = expect_pfn_sgtable(&pt, *npages, &prng,
    305							 "sg_alloc_table",
    306							 end_time);
    307				sg_free_table(&pt.st);
    308				if (err)
    309					return err;
    310			}
    311		}
    312
    313		/* Test at least one continuation before accepting oom */
    314		if (size > SG_MAX_SINGLE_ALLOC)
    315			alloc_error = -ENOSPC;
    316	}
    317
    318	return 0;
    319}
    320
    321static int igt_sg_trim(void *ignored)
    322{
    323	IGT_TIMEOUT(end_time);
    324	const unsigned long max = PAGE_SIZE; /* not prime! */
    325	struct pfn_table pt;
    326	unsigned long prime;
    327	int alloc_error = -ENOMEM;
    328
    329	for_each_prime_number(prime, max) {
    330		const npages_fn_t *npages;
    331		int err;
    332
    333		for (npages = npages_funcs; *npages; npages++) {
    334			struct rnd_state prng;
    335
    336			prandom_seed_state(&prng, i915_selftest.random_seed);
    337			err = alloc_table(&pt, prime, max, *npages, &prng,
    338					  alloc_error);
    339			if (err == -ENOSPC)
    340				break;
    341			if (err)
    342				return err;
    343
    344			if (i915_sg_trim(&pt.st)) {
    345				if (pt.st.orig_nents != prime ||
    346				    pt.st.nents != prime) {
    347					pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
    348					       pt.st.nents, pt.st.orig_nents, prime);
    349					err = -EINVAL;
    350				} else {
    351					prandom_seed_state(&prng,
    352							   i915_selftest.random_seed);
    353					err = expect_pfn_sgtable(&pt,
    354								 *npages, &prng,
    355								 "i915_sg_trim",
    356								 end_time);
    357				}
    358			}
    359			sg_free_table(&pt.st);
    360			if (err)
    361				return err;
    362		}
    363
    364		/* Test at least one continuation before accepting oom */
    365		if (prime > SG_MAX_SINGLE_ALLOC)
    366			alloc_error = -ENOSPC;
    367	}
    368
    369	return 0;
    370}
    371
    372int scatterlist_mock_selftests(void)
    373{
    374	static const struct i915_subtest tests[] = {
    375		SUBTEST(igt_sg_alloc),
    376		SUBTEST(igt_sg_trim),
    377	};
    378
    379	return i915_subtests(tests, NULL);
    380}