cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

paddr.c (6097B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * DAMON Primitives for The Physical Address Space
      4 *
      5 * Author: SeongJae Park <sj@kernel.org>
      6 */
      7
      8#define pr_fmt(fmt) "damon-pa: " fmt
      9
     10#include <linux/mmu_notifier.h>
     11#include <linux/page_idle.h>
     12#include <linux/pagemap.h>
     13#include <linux/rmap.h>
     14#include <linux/swap.h>
     15
     16#include "../internal.h"
     17#include "ops-common.h"
     18
     19static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
     20		unsigned long addr, void *arg)
     21{
     22	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
     23
     24	while (page_vma_mapped_walk(&pvmw)) {
     25		addr = pvmw.address;
     26		if (pvmw.pte)
     27			damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr);
     28		else
     29			damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr);
     30	}
     31	return true;
     32}
     33
     34static void damon_pa_mkold(unsigned long paddr)
     35{
     36	struct folio *folio;
     37	struct page *page = damon_get_page(PHYS_PFN(paddr));
     38	struct rmap_walk_control rwc = {
     39		.rmap_one = __damon_pa_mkold,
     40		.anon_lock = folio_lock_anon_vma_read,
     41	};
     42	bool need_lock;
     43
     44	if (!page)
     45		return;
     46	folio = page_folio(page);
     47
     48	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
     49		folio_set_idle(folio);
     50		goto out;
     51	}
     52
     53	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
     54	if (need_lock && !folio_trylock(folio))
     55		goto out;
     56
     57	rmap_walk(folio, &rwc);
     58
     59	if (need_lock)
     60		folio_unlock(folio);
     61
     62out:
     63	folio_put(folio);
     64}
     65
     66static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
     67					    struct damon_region *r)
     68{
     69	r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
     70
     71	damon_pa_mkold(r->sampling_addr);
     72}
     73
     74static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
     75{
     76	struct damon_target *t;
     77	struct damon_region *r;
     78
     79	damon_for_each_target(t, ctx) {
     80		damon_for_each_region(r, t)
     81			__damon_pa_prepare_access_check(ctx, r);
     82	}
     83}
     84
     85struct damon_pa_access_chk_result {
     86	unsigned long page_sz;
     87	bool accessed;
     88};
     89
     90static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
     91		unsigned long addr, void *arg)
     92{
     93	struct damon_pa_access_chk_result *result = arg;
     94	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
     95
     96	result->accessed = false;
     97	result->page_sz = PAGE_SIZE;
     98	while (page_vma_mapped_walk(&pvmw)) {
     99		addr = pvmw.address;
    100		if (pvmw.pte) {
    101			result->accessed = pte_young(*pvmw.pte) ||
    102				!folio_test_idle(folio) ||
    103				mmu_notifier_test_young(vma->vm_mm, addr);
    104		} else {
    105#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    106			result->accessed = pmd_young(*pvmw.pmd) ||
    107				!folio_test_idle(folio) ||
    108				mmu_notifier_test_young(vma->vm_mm, addr);
    109			result->page_sz = HPAGE_PMD_SIZE;
    110#else
    111			WARN_ON_ONCE(1);
    112#endif	/* CONFIG_TRANSPARENT_HUGEPAGE */
    113		}
    114		if (result->accessed) {
    115			page_vma_mapped_walk_done(&pvmw);
    116			break;
    117		}
    118	}
    119
    120	/* If accessed, stop walking */
    121	return !result->accessed;
    122}
    123
    124static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
    125{
    126	struct folio *folio;
    127	struct page *page = damon_get_page(PHYS_PFN(paddr));
    128	struct damon_pa_access_chk_result result = {
    129		.page_sz = PAGE_SIZE,
    130		.accessed = false,
    131	};
    132	struct rmap_walk_control rwc = {
    133		.arg = &result,
    134		.rmap_one = __damon_pa_young,
    135		.anon_lock = folio_lock_anon_vma_read,
    136	};
    137	bool need_lock;
    138
    139	if (!page)
    140		return false;
    141	folio = page_folio(page);
    142
    143	if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
    144		if (folio_test_idle(folio))
    145			result.accessed = false;
    146		else
    147			result.accessed = true;
    148		folio_put(folio);
    149		goto out;
    150	}
    151
    152	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
    153	if (need_lock && !folio_trylock(folio)) {
    154		folio_put(folio);
    155		return false;
    156	}
    157
    158	rmap_walk(folio, &rwc);
    159
    160	if (need_lock)
    161		folio_unlock(folio);
    162	folio_put(folio);
    163
    164out:
    165	*page_sz = result.page_sz;
    166	return result.accessed;
    167}
    168
    169static void __damon_pa_check_access(struct damon_ctx *ctx,
    170				    struct damon_region *r)
    171{
    172	static unsigned long last_addr;
    173	static unsigned long last_page_sz = PAGE_SIZE;
    174	static bool last_accessed;
    175
    176	/* If the region is in the last checked page, reuse the result */
    177	if (ALIGN_DOWN(last_addr, last_page_sz) ==
    178				ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
    179		if (last_accessed)
    180			r->nr_accesses++;
    181		return;
    182	}
    183
    184	last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
    185	if (last_accessed)
    186		r->nr_accesses++;
    187
    188	last_addr = r->sampling_addr;
    189}
    190
    191static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
    192{
    193	struct damon_target *t;
    194	struct damon_region *r;
    195	unsigned int max_nr_accesses = 0;
    196
    197	damon_for_each_target(t, ctx) {
    198		damon_for_each_region(r, t) {
    199			__damon_pa_check_access(ctx, r);
    200			max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
    201		}
    202	}
    203
    204	return max_nr_accesses;
    205}
    206
    207static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
    208		struct damon_target *t, struct damon_region *r,
    209		struct damos *scheme)
    210{
    211	unsigned long addr, applied;
    212	LIST_HEAD(page_list);
    213
    214	if (scheme->action != DAMOS_PAGEOUT)
    215		return 0;
    216
    217	for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
    218		struct page *page = damon_get_page(PHYS_PFN(addr));
    219
    220		if (!page)
    221			continue;
    222
    223		ClearPageReferenced(page);
    224		test_and_clear_page_young(page);
    225		if (isolate_lru_page(page)) {
    226			put_page(page);
    227			continue;
    228		}
    229		if (PageUnevictable(page)) {
    230			putback_lru_page(page);
    231		} else {
    232			list_add(&page->lru, &page_list);
    233			put_page(page);
    234		}
    235	}
    236	applied = reclaim_pages(&page_list);
    237	cond_resched();
    238	return applied * PAGE_SIZE;
    239}
    240
    241static int damon_pa_scheme_score(struct damon_ctx *context,
    242		struct damon_target *t, struct damon_region *r,
    243		struct damos *scheme)
    244{
    245	switch (scheme->action) {
    246	case DAMOS_PAGEOUT:
    247		return damon_pageout_score(context, r, scheme);
    248	default:
    249		break;
    250	}
    251
    252	return DAMOS_MAX_SCORE;
    253}
    254
    255static int __init damon_pa_initcall(void)
    256{
    257	struct damon_operations ops = {
    258		.id = DAMON_OPS_PADDR,
    259		.init = NULL,
    260		.update = NULL,
    261		.prepare_access_checks = damon_pa_prepare_access_checks,
    262		.check_accesses = damon_pa_check_accesses,
    263		.reset_aggregated = NULL,
    264		.target_valid = NULL,
    265		.cleanup = NULL,
    266		.apply_scheme = damon_pa_apply_scheme,
    267		.get_scheme_score = damon_pa_scheme_score,
    268	};
    269
    270	return damon_register_ops(&ops);
    271};
    272
    273subsys_initcall(damon_pa_initcall);