cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable-frag.c (2626B)


      1// SPDX-License-Identifier: GPL-2.0
      2
      3/*
      4 *  Handling Page Tables through page fragments
      5 *
      6 */
      7
      8#include <linux/kernel.h>
      9#include <linux/gfp.h>
     10#include <linux/mm.h>
     11#include <linux/percpu.h>
     12#include <linux/hardirq.h>
     13#include <linux/hugetlb.h>
     14#include <asm/pgalloc.h>
     15#include <asm/tlbflush.h>
     16#include <asm/tlb.h>
     17
     18void pte_frag_destroy(void *pte_frag)
     19{
     20	int count;
     21	struct page *page;
     22
     23	page = virt_to_page(pte_frag);
     24	/* drop all the pending references */
     25	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
     26	/* We allow PTE_FRAG_NR fragments from a PTE page */
     27	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
     28		pgtable_pte_page_dtor(page);
     29		__free_page(page);
     30	}
     31}
     32
     33static pte_t *get_pte_from_cache(struct mm_struct *mm)
     34{
     35	void *pte_frag, *ret;
     36
     37	if (PTE_FRAG_NR == 1)
     38		return NULL;
     39
     40	spin_lock(&mm->page_table_lock);
     41	ret = pte_frag_get(&mm->context);
     42	if (ret) {
     43		pte_frag = ret + PTE_FRAG_SIZE;
     44		/*
     45		 * If we have taken up all the fragments mark PTE page NULL
     46		 */
     47		if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
     48			pte_frag = NULL;
     49		pte_frag_set(&mm->context, pte_frag);
     50	}
     51	spin_unlock(&mm->page_table_lock);
     52	return (pte_t *)ret;
     53}
     54
     55static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
     56{
     57	void *ret = NULL;
     58	struct page *page;
     59
     60	if (!kernel) {
     61		page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
     62		if (!page)
     63			return NULL;
     64		if (!pgtable_pte_page_ctor(page)) {
     65			__free_page(page);
     66			return NULL;
     67		}
     68	} else {
     69		page = alloc_page(PGALLOC_GFP);
     70		if (!page)
     71			return NULL;
     72	}
     73
     74	atomic_set(&page->pt_frag_refcount, 1);
     75
     76	ret = page_address(page);
     77	/*
     78	 * if we support only one fragment just return the
     79	 * allocated page.
     80	 */
     81	if (PTE_FRAG_NR == 1)
     82		return ret;
     83	spin_lock(&mm->page_table_lock);
     84	/*
     85	 * If we find pgtable_page set, we return
     86	 * the allocated page with single fragment
     87	 * count.
     88	 */
     89	if (likely(!pte_frag_get(&mm->context))) {
     90		atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
     91		pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
     92	}
     93	spin_unlock(&mm->page_table_lock);
     94
     95	return (pte_t *)ret;
     96}
     97
     98pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)
     99{
    100	pte_t *pte;
    101
    102	pte = get_pte_from_cache(mm);
    103	if (pte)
    104		return pte;
    105
    106	return __alloc_for_ptecache(mm, kernel);
    107}
    108
    109void pte_fragment_free(unsigned long *table, int kernel)
    110{
    111	struct page *page = virt_to_page(table);
    112
    113	if (PageReserved(page))
    114		return free_reserved_page(page);
    115
    116	BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
    117	if (atomic_dec_and_test(&page->pt_frag_refcount)) {
    118		if (!kernel)
    119			pgtable_pte_page_dtor(page);
    120		__free_page(page);
    121	}
    122}