cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iomap.c (10315B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
      4 */
      5#include <linux/memremap.h>
      6#include <linux/rculist.h>
      7#include <linux/export.h>
      8#include <linux/ioport.h>
      9#include <linux/module.h>
     10#include <linux/types.h>
     11#include <linux/pfn_t.h>
     12#include <linux/acpi.h>
     13#include <linux/io.h>
     14#include <linux/mm.h>
     15#include "nfit_test.h"
     16
     17static LIST_HEAD(iomap_head);
     18
     19static struct iomap_ops {
     20	nfit_test_lookup_fn nfit_test_lookup;
     21	nfit_test_evaluate_dsm_fn evaluate_dsm;
     22	struct list_head list;
     23} iomap_ops = {
     24	.list = LIST_HEAD_INIT(iomap_ops.list),
     25};
     26
     27void nfit_test_setup(nfit_test_lookup_fn lookup,
     28		nfit_test_evaluate_dsm_fn evaluate)
     29{
     30	iomap_ops.nfit_test_lookup = lookup;
     31	iomap_ops.evaluate_dsm = evaluate;
     32	list_add_rcu(&iomap_ops.list, &iomap_head);
     33}
     34EXPORT_SYMBOL(nfit_test_setup);
     35
     36void nfit_test_teardown(void)
     37{
     38	list_del_rcu(&iomap_ops.list);
     39	synchronize_rcu();
     40}
     41EXPORT_SYMBOL(nfit_test_teardown);
     42
     43static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
     44{
     45	struct iomap_ops *ops;
     46
     47	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
     48	if (ops)
     49		return ops->nfit_test_lookup(resource);
     50	return NULL;
     51}
     52
     53struct nfit_test_resource *get_nfit_res(resource_size_t resource)
     54{
     55	struct nfit_test_resource *res;
     56
     57	rcu_read_lock();
     58	res = __get_nfit_res(resource);
     59	rcu_read_unlock();
     60
     61	return res;
     62}
     63EXPORT_SYMBOL(get_nfit_res);
     64
     65#define __nfit_test_ioremap(offset, size, fallback_fn) ({		\
     66	struct nfit_test_resource *nfit_res = get_nfit_res(offset);	\
     67	nfit_res ?							\
     68		(void __iomem *) nfit_res->buf + (offset)		\
     69			- nfit_res->res.start				\
     70	:								\
     71		fallback_fn((offset), (size)) ;				\
     72})
     73
     74void __iomem *__wrap_devm_ioremap(struct device *dev,
     75		resource_size_t offset, unsigned long size)
     76{
     77	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
     78
     79	if (nfit_res)
     80		return (void __iomem *) nfit_res->buf + offset
     81			- nfit_res->res.start;
     82	return devm_ioremap(dev, offset, size);
     83}
     84EXPORT_SYMBOL(__wrap_devm_ioremap);
     85
     86void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
     87		size_t size, unsigned long flags)
     88{
     89	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
     90
     91	if (nfit_res)
     92		return nfit_res->buf + offset - nfit_res->res.start;
     93	return devm_memremap(dev, offset, size, flags);
     94}
     95EXPORT_SYMBOL(__wrap_devm_memremap);
     96
     97static void nfit_test_kill(void *_pgmap)
     98{
     99	struct dev_pagemap *pgmap = _pgmap;
    100
    101	WARN_ON(!pgmap);
    102
    103	percpu_ref_kill(&pgmap->ref);
    104
    105	wait_for_completion(&pgmap->done);
    106	percpu_ref_exit(&pgmap->ref);
    107}
    108
    109static void dev_pagemap_percpu_release(struct percpu_ref *ref)
    110{
    111	struct dev_pagemap *pgmap = container_of(ref, struct dev_pagemap, ref);
    112
    113	complete(&pgmap->done);
    114}
    115
    116void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
    117{
    118	int error;
    119	resource_size_t offset = pgmap->range.start;
    120	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
    121
    122	if (!nfit_res)
    123		return devm_memremap_pages(dev, pgmap);
    124
    125	init_completion(&pgmap->done);
    126	error = percpu_ref_init(&pgmap->ref, dev_pagemap_percpu_release, 0,
    127				GFP_KERNEL);
    128	if (error)
    129		return ERR_PTR(error);
    130
    131	error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
    132	if (error)
    133		return ERR_PTR(error);
    134	return nfit_res->buf + offset - nfit_res->res.start;
    135}
    136EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
    137
    138pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
    139{
    140	struct nfit_test_resource *nfit_res = get_nfit_res(addr);
    141
    142	if (nfit_res)
    143		flags &= ~PFN_MAP;
    144        return phys_to_pfn_t(addr, flags);
    145}
    146EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
    147
    148void *__wrap_memremap(resource_size_t offset, size_t size,
    149		unsigned long flags)
    150{
    151	struct nfit_test_resource *nfit_res = get_nfit_res(offset);
    152
    153	if (nfit_res)
    154		return nfit_res->buf + offset - nfit_res->res.start;
    155	return memremap(offset, size, flags);
    156}
    157EXPORT_SYMBOL(__wrap_memremap);
    158
    159void __wrap_devm_memunmap(struct device *dev, void *addr)
    160{
    161	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
    162
    163	if (nfit_res)
    164		return;
    165	return devm_memunmap(dev, addr);
    166}
    167EXPORT_SYMBOL(__wrap_devm_memunmap);
    168
    169void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size)
    170{
    171	return __nfit_test_ioremap(offset, size, ioremap);
    172}
    173EXPORT_SYMBOL(__wrap_ioremap);
    174
    175void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
    176{
    177	return __nfit_test_ioremap(offset, size, ioremap_wc);
    178}
    179EXPORT_SYMBOL(__wrap_ioremap_wc);
    180
    181void __wrap_iounmap(volatile void __iomem *addr)
    182{
    183	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
    184	if (nfit_res)
    185		return;
    186	return iounmap(addr);
    187}
    188EXPORT_SYMBOL(__wrap_iounmap);
    189
    190void __wrap_memunmap(void *addr)
    191{
    192	struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
    193
    194	if (nfit_res)
    195		return;
    196	return memunmap(addr);
    197}
    198EXPORT_SYMBOL(__wrap_memunmap);
    199
    200static bool nfit_test_release_region(struct device *dev,
    201		struct resource *parent, resource_size_t start,
    202		resource_size_t n);
    203
    204static void nfit_devres_release(struct device *dev, void *data)
    205{
    206	struct resource *res = *((struct resource **) data);
    207
    208	WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
    209			resource_size(res)));
    210}
    211
    212static int match(struct device *dev, void *__res, void *match_data)
    213{
    214	struct resource *res = *((struct resource **) __res);
    215	resource_size_t start = *((resource_size_t *) match_data);
    216
    217	return res->start == start;
    218}
    219
    220static bool nfit_test_release_region(struct device *dev,
    221		struct resource *parent, resource_size_t start,
    222		resource_size_t n)
    223{
    224	if (parent == &iomem_resource) {
    225		struct nfit_test_resource *nfit_res = get_nfit_res(start);
    226
    227		if (nfit_res) {
    228			struct nfit_test_request *req;
    229			struct resource *res = NULL;
    230
    231			if (dev) {
    232				devres_release(dev, nfit_devres_release, match,
    233						&start);
    234				return true;
    235			}
    236
    237			spin_lock(&nfit_res->lock);
    238			list_for_each_entry(req, &nfit_res->requests, list)
    239				if (req->res.start == start) {
    240					res = &req->res;
    241					list_del(&req->list);
    242					break;
    243				}
    244			spin_unlock(&nfit_res->lock);
    245
    246			WARN(!res || resource_size(res) != n,
    247					"%s: start: %llx n: %llx mismatch: %pr\n",
    248						__func__, start, n, res);
    249			if (res)
    250				kfree(req);
    251			return true;
    252		}
    253	}
    254	return false;
    255}
    256
    257static struct resource *nfit_test_request_region(struct device *dev,
    258		struct resource *parent, resource_size_t start,
    259		resource_size_t n, const char *name, int flags)
    260{
    261	struct nfit_test_resource *nfit_res;
    262
    263	if (parent == &iomem_resource) {
    264		nfit_res = get_nfit_res(start);
    265		if (nfit_res) {
    266			struct nfit_test_request *req;
    267			struct resource *res = NULL;
    268
    269			if (start + n > nfit_res->res.start
    270					+ resource_size(&nfit_res->res)) {
    271				pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
    272						__func__, start, n,
    273						&nfit_res->res);
    274				return NULL;
    275			}
    276
    277			spin_lock(&nfit_res->lock);
    278			list_for_each_entry(req, &nfit_res->requests, list)
    279				if (start == req->res.start) {
    280					res = &req->res;
    281					break;
    282				}
    283			spin_unlock(&nfit_res->lock);
    284
    285			if (res) {
    286				WARN(1, "%pr already busy\n", res);
    287				return NULL;
    288			}
    289
    290			req = kzalloc(sizeof(*req), GFP_KERNEL);
    291			if (!req)
    292				return NULL;
    293			INIT_LIST_HEAD(&req->list);
    294			res = &req->res;
    295
    296			res->start = start;
    297			res->end = start + n - 1;
    298			res->name = name;
    299			res->flags = resource_type(parent);
    300			res->flags |= IORESOURCE_BUSY | flags;
    301			spin_lock(&nfit_res->lock);
    302			list_add(&req->list, &nfit_res->requests);
    303			spin_unlock(&nfit_res->lock);
    304
    305			if (dev) {
    306				struct resource **d;
    307
    308				d = devres_alloc(nfit_devres_release,
    309						sizeof(struct resource *),
    310						GFP_KERNEL);
    311				if (!d)
    312					return NULL;
    313				*d = res;
    314				devres_add(dev, d);
    315			}
    316
    317			pr_debug("%s: %pr\n", __func__, res);
    318			return res;
    319		}
    320	}
    321	if (dev)
    322		return __devm_request_region(dev, parent, start, n, name);
    323	return __request_region(parent, start, n, name, flags);
    324}
    325
    326struct resource *__wrap___request_region(struct resource *parent,
    327		resource_size_t start, resource_size_t n, const char *name,
    328		int flags)
    329{
    330	return nfit_test_request_region(NULL, parent, start, n, name, flags);
    331}
    332EXPORT_SYMBOL(__wrap___request_region);
    333
    334int __wrap_insert_resource(struct resource *parent, struct resource *res)
    335{
    336	if (get_nfit_res(res->start))
    337		return 0;
    338	return insert_resource(parent, res);
    339}
    340EXPORT_SYMBOL(__wrap_insert_resource);
    341
    342int __wrap_remove_resource(struct resource *res)
    343{
    344	if (get_nfit_res(res->start))
    345		return 0;
    346	return remove_resource(res);
    347}
    348EXPORT_SYMBOL(__wrap_remove_resource);
    349
    350struct resource *__wrap___devm_request_region(struct device *dev,
    351		struct resource *parent, resource_size_t start,
    352		resource_size_t n, const char *name)
    353{
    354	if (!dev)
    355		return NULL;
    356	return nfit_test_request_region(dev, parent, start, n, name, 0);
    357}
    358EXPORT_SYMBOL(__wrap___devm_request_region);
    359
    360void __wrap___release_region(struct resource *parent, resource_size_t start,
    361		resource_size_t n)
    362{
    363	if (!nfit_test_release_region(NULL, parent, start, n))
    364		__release_region(parent, start, n);
    365}
    366EXPORT_SYMBOL(__wrap___release_region);
    367
    368void __wrap___devm_release_region(struct device *dev, struct resource *parent,
    369		resource_size_t start, resource_size_t n)
    370{
    371	if (!nfit_test_release_region(dev, parent, start, n))
    372		__devm_release_region(dev, parent, start, n);
    373}
    374EXPORT_SYMBOL(__wrap___devm_release_region);
    375
    376acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
    377		struct acpi_object_list *p, struct acpi_buffer *buf)
    378{
    379	struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
    380	union acpi_object **obj;
    381
    382	if (!nfit_res || strcmp(path, "_FIT") || !buf)
    383		return acpi_evaluate_object(handle, path, p, buf);
    384
    385	obj = nfit_res->buf;
    386	buf->length = sizeof(union acpi_object);
    387	buf->pointer = *obj;
    388	return AE_OK;
    389}
    390EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
    391
    392union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
    393		u64 rev, u64 func, union acpi_object *argv4)
    394{
    395	union acpi_object *obj = ERR_PTR(-ENXIO);
    396	struct iomap_ops *ops;
    397
    398	rcu_read_lock();
    399	ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
    400	if (ops)
    401		obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
    402	rcu_read_unlock();
    403
    404	if (IS_ERR(obj))
    405		return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
    406	return obj;
    407}
    408EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
    409
    410MODULE_LICENSE("GPL v2");