cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

page.h (4179B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Page management definitions for the Hexagon architecture
      4 *
      5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
      6 */
      7
      8#ifndef _ASM_PAGE_H
      9#define _ASM_PAGE_H
     10
     11#include <linux/const.h>
     12
     13/*  This is probably not the most graceful way to handle this.  */
     14
     15#ifdef CONFIG_PAGE_SIZE_4KB
     16#define PAGE_SHIFT 12
     17#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_4KB
     18#endif
     19
     20#ifdef CONFIG_PAGE_SIZE_16KB
     21#define PAGE_SHIFT 14
     22#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_16KB
     23#endif
     24
     25#ifdef CONFIG_PAGE_SIZE_64KB
     26#define PAGE_SHIFT 16
     27#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_64KB
     28#endif
     29
     30#ifdef CONFIG_PAGE_SIZE_256KB
     31#define PAGE_SHIFT 18
     32#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_256KB
     33#endif
     34
     35#ifdef CONFIG_PAGE_SIZE_1MB
     36#define PAGE_SHIFT 20
     37#define HEXAGON_L1_PTE_SIZE __HVM_PDE_S_1MB
     38#endif
     39
     40/*
     41 *  These should be defined in hugetlb.h, but apparently not.
     42 *  "Huge" for us should be 4MB or 16MB, which are both represented
     43 *  in L1 PTE's.  Right now, it's set up for 4MB.
     44 */
     45#ifdef CONFIG_HUGETLB_PAGE
     46#define HPAGE_SHIFT 22
     47#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
     48#define HPAGE_MASK (~(HPAGE_SIZE-1))
     49#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
     50#define HVM_HUGEPAGE_SIZE 0x5
     51#endif
     52
     53#define PAGE_SIZE  (1UL << PAGE_SHIFT)
     54#define PAGE_MASK  (~((1 << PAGE_SHIFT) - 1))
     55
     56#ifdef __KERNEL__
     57#ifndef __ASSEMBLY__
     58
     59/*
     60 * This is for PFN_DOWN, which mm.h needs.  Seems the right place to pull it in.
     61 */
     62#include <linux/pfn.h>
     63
     64/*
     65 * We implement a two-level architecture-specific page table structure.
     66 * Null intermediate page table level (pmd, pud) definitions will come from
     67 * asm-generic/pagetable-nopmd.h and asm-generic/pagetable-nopud.h
     68 */
     69typedef struct { unsigned long pte; } pte_t;
     70typedef struct { unsigned long pgd; } pgd_t;
     71typedef struct { unsigned long pgprot; } pgprot_t;
     72typedef struct page *pgtable_t;
     73
     74#define pte_val(x)     ((x).pte)
     75#define pgd_val(x)     ((x).pgd)
     76#define pgprot_val(x)  ((x).pgprot)
     77#define __pte(x)       ((pte_t) { (x) })
     78#define __pgd(x)       ((pgd_t) { (x) })
     79#define __pgprot(x)    ((pgprot_t) { (x) })
     80
     81/*
     82 * We need a __pa and a __va routine for kernel space.
     83 * MIPS says they're only used during mem_init.
     84 * also, check if we need a PHYS_OFFSET.
     85 */
     86#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
     87#define __va(x) ((void *)((unsigned long)(x) - PHYS_OFFSET + PAGE_OFFSET))
     88
     89/* The "page frame" descriptor is defined in linux/mm.h */
     90struct page;
     91
     92/* Returns page frame descriptor for virtual address. */
     93#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(__pa(kaddr)))
     94
     95/* Default vm area behavior is non-executable.  */
     96#define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_NON_EXEC
     97
     98#define pfn_valid(pfn) ((pfn) < max_mapnr)
     99#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
    100
    101/*  Need to not use a define for linesize; may move this to another file.  */
    102static inline void clear_page(void *page)
    103{
    104	/*  This can only be done on pages with L1 WB cache */
    105	asm volatile(
    106		"	loop0(1f,%1);\n"
    107		"1:	{ dczeroa(%0);\n"
    108		"	  %0 = add(%0,#32); }:endloop0\n"
    109		: "+r" (page)
    110		: "r" (PAGE_SIZE/32)
    111		: "lc0", "sa0", "memory"
    112	);
    113}
    114
    115#define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
    116
    117/*
    118 * Under assumption that kernel always "sees" user map...
    119 */
    120#define clear_user_page(page, vaddr, pg)	clear_page(page)
    121#define copy_user_page(to, from, vaddr, pg)	copy_page(to, from)
    122
    123/*
    124 * page_to_phys - convert page to physical address
    125 * @page - pointer to page entry in mem_map
    126 */
    127#define page_to_phys(page)      (page_to_pfn(page) << PAGE_SHIFT)
    128
    129#define virt_to_pfn(kaddr)      (__pa(kaddr) >> PAGE_SHIFT)
    130#define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
    131
    132#define page_to_virt(page)	__va(page_to_phys(page))
    133
    134/*
    135 * For port to Hexagon Virtual Machine, MAYBE we check for attempts
    136 * to reference reserved HVM space, but in any case, the VM will be
    137 * protected.
    138 */
    139#define kern_addr_valid(addr)   (1)
    140
    141#include <asm/mem-layout.h>
    142#include <asm-generic/memory_model.h>
    143/* XXX Todo: implement assembly-optimized version of getorder. */
    144#include <asm-generic/getorder.h>
    145
    146#endif /* ifdef __ASSEMBLY__ */
    147#endif /* ifdef __KERNEL__ */
    148
    149#endif