cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mteswap.c (2217B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2
      3#include <linux/pagemap.h>
      4#include <linux/xarray.h>
      5#include <linux/slab.h>
      6#include <linux/swap.h>
      7#include <linux/swapops.h>
      8#include <asm/mte.h>
      9
     10static DEFINE_XARRAY(mte_pages);
     11
     12void *mte_allocate_tag_storage(void)
     13{
     14	/* tags granule is 16 bytes, 2 tags stored per byte */
     15	return kmalloc(MTE_PAGE_TAG_STORAGE, GFP_KERNEL);
     16}
     17
     18void mte_free_tag_storage(char *storage)
     19{
     20	kfree(storage);
     21}
     22
     23int mte_save_tags(struct page *page)
     24{
     25	void *tag_storage, *ret;
     26
     27	if (!test_bit(PG_mte_tagged, &page->flags))
     28		return 0;
     29
     30	tag_storage = mte_allocate_tag_storage();
     31	if (!tag_storage)
     32		return -ENOMEM;
     33
     34	mte_save_page_tags(page_address(page), tag_storage);
     35
     36	/* page_private contains the swap entry.val set in do_swap_page */
     37	ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
     38	if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
     39		mte_free_tag_storage(tag_storage);
     40		return xa_err(ret);
     41	} else if (ret) {
     42		/* Entry is being replaced, free the old entry */
     43		mte_free_tag_storage(ret);
     44	}
     45
     46	return 0;
     47}
     48
     49bool mte_restore_tags(swp_entry_t entry, struct page *page)
     50{
     51	void *tags = xa_load(&mte_pages, entry.val);
     52
     53	if (!tags)
     54		return false;
     55
     56	page_kasan_tag_reset(page);
     57	/*
     58	 * We need smp_wmb() in between setting the flags and clearing the
     59	 * tags because if another thread reads page->flags and builds a
     60	 * tagged address out of it, there is an actual dependency to the
     61	 * memory access, but on the current thread we do not guarantee that
     62	 * the new page->flags are visible before the tags were updated.
     63	 */
     64	smp_wmb();
     65	mte_restore_page_tags(page_address(page), tags);
     66
     67	return true;
     68}
     69
     70void mte_invalidate_tags(int type, pgoff_t offset)
     71{
     72	swp_entry_t entry = swp_entry(type, offset);
     73	void *tags = xa_erase(&mte_pages, entry.val);
     74
     75	mte_free_tag_storage(tags);
     76}
     77
     78void mte_invalidate_tags_area(int type)
     79{
     80	swp_entry_t entry = swp_entry(type, 0);
     81	swp_entry_t last_entry = swp_entry(type + 1, 0);
     82	void *tags;
     83
     84	XA_STATE(xa_state, &mte_pages, entry.val);
     85
     86	xa_lock(&mte_pages);
     87	xas_for_each(&xa_state, tags, last_entry.val - 1) {
     88		__xa_erase(&mte_pages, xa_state.xa_index);
     89		mte_free_tag_storage(tags);
     90	}
     91	xa_unlock(&mte_pages);
     92}