cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtable-bits.h (7852B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 1994 - 2002 by Ralf Baechle
      7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
      8 * Copyright (C) 2002  Maciej W. Rozycki
      9 */
     10#ifndef _ASM_PGTABLE_BITS_H
     11#define _ASM_PGTABLE_BITS_H
     12
     13
     14/*
     15 * Note that we shift the lower 32bits of each EntryLo[01] entry
     16 * 6 bits to the left. That way we can convert the PFN into the
     17 * physical address by a single 'and' operation and gain 6 additional
     18 * bits for storing information which isn't present in a normal
     19 * MIPS page table.
     20 *
     21 * Similar to the Alpha port, we need to keep track of the ref
     22 * and mod bits in software.  We have a software "yeah you can read
     23 * from this page" bit, and a hardware one which actually lets the
     24 * process read from the page.	On the same token we have a software
     25 * writable bit and the real hardware one which actually lets the
     26 * process write to the page, this keeps a mod bit via the hardware
     27 * dirty bit.
     28 *
     29 * Certain revisions of the R4000 and R5000 have a bug where if a
     30 * certain sequence occurs in the last 3 instructions of an executable
     31 * page, and the following page is not mapped, the cpu can do
     32 * unpredictable things.  The code (when it is written) to deal with
     33 * this problem will be in the update_mmu_cache() code for the r4k.
     34 */
     35#if defined(CONFIG_XPA)
     36
     37/*
     38 * Page table bit offsets used for 64 bit physical addressing on
     39 * MIPS32r5 with XPA.
     40 */
     41enum pgtable_bits {
     42	/* Used by TLB hardware (placed in EntryLo*) */
     43	_PAGE_NO_EXEC_SHIFT,
     44	_PAGE_NO_READ_SHIFT,
     45	_PAGE_GLOBAL_SHIFT,
     46	_PAGE_VALID_SHIFT,
     47	_PAGE_DIRTY_SHIFT,
     48	_CACHE_SHIFT,
     49
     50	/* Used only by software (masked out before writing EntryLo*) */
     51	_PAGE_PRESENT_SHIFT = 24,
     52	_PAGE_WRITE_SHIFT,
     53	_PAGE_ACCESSED_SHIFT,
     54	_PAGE_MODIFIED_SHIFT,
     55#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
     56	_PAGE_SPECIAL_SHIFT,
     57#endif
     58#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
     59	_PAGE_SOFT_DIRTY_SHIFT,
     60#endif
     61};
     62
     63/*
     64 * Bits for extended EntryLo0/EntryLo1 registers
     65 */
     66#define _PFNX_MASK		0xffffff
     67
     68#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
     69
     70/*
     71 * Page table bit offsets used for 36 bit physical addressing on MIPS32,
     72 * for example with Alchemy or Netlogic XLP/XLR.
     73 */
     74enum pgtable_bits {
     75	/* Used by TLB hardware (placed in EntryLo*) */
     76	_PAGE_GLOBAL_SHIFT,
     77	_PAGE_VALID_SHIFT,
     78	_PAGE_DIRTY_SHIFT,
     79	_CACHE_SHIFT,
     80
     81	/* Used only by software (masked out before writing EntryLo*) */
     82	_PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3,
     83	_PAGE_NO_READ_SHIFT,
     84	_PAGE_WRITE_SHIFT,
     85	_PAGE_ACCESSED_SHIFT,
     86	_PAGE_MODIFIED_SHIFT,
     87#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
     88	_PAGE_SPECIAL_SHIFT,
     89#endif
     90#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
     91	_PAGE_SOFT_DIRTY_SHIFT,
     92#endif
     93};
     94
     95#elif defined(CONFIG_CPU_R3K_TLB)
     96
     97/* Page table bits used for r3k systems */
     98enum pgtable_bits {
     99	/* Used only by software (writes to EntryLo ignored) */
    100	_PAGE_PRESENT_SHIFT,
    101	_PAGE_NO_READ_SHIFT,
    102	_PAGE_WRITE_SHIFT,
    103	_PAGE_ACCESSED_SHIFT,
    104	_PAGE_MODIFIED_SHIFT,
    105#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
    106	_PAGE_SPECIAL_SHIFT,
    107#endif
    108#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
    109	_PAGE_SOFT_DIRTY_SHIFT,
    110#endif
    111
    112	/* Used by TLB hardware (placed in EntryLo) */
    113	_PAGE_GLOBAL_SHIFT = 8,
    114	_PAGE_VALID_SHIFT,
    115	_PAGE_DIRTY_SHIFT,
    116	_CACHE_UNCACHED_SHIFT,
    117};
    118
    119#else
    120
    121/* Page table bits used for r4k systems */
    122enum pgtable_bits {
    123	/* Used only by software (masked out before writing EntryLo*) */
    124	_PAGE_PRESENT_SHIFT,
    125#if !defined(CONFIG_CPU_HAS_RIXI)
    126	_PAGE_NO_READ_SHIFT,
    127#endif
    128	_PAGE_WRITE_SHIFT,
    129	_PAGE_ACCESSED_SHIFT,
    130	_PAGE_MODIFIED_SHIFT,
    131#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
    132	_PAGE_HUGE_SHIFT,
    133#endif
    134#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
    135	_PAGE_SPECIAL_SHIFT,
    136#endif
    137#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
    138	_PAGE_SOFT_DIRTY_SHIFT,
    139#endif
    140	/* Used by TLB hardware (placed in EntryLo*) */
    141#if defined(CONFIG_CPU_HAS_RIXI)
    142	_PAGE_NO_EXEC_SHIFT,
    143	_PAGE_NO_READ_SHIFT,
    144#endif
    145	_PAGE_GLOBAL_SHIFT,
    146	_PAGE_VALID_SHIFT,
    147	_PAGE_DIRTY_SHIFT,
    148	_CACHE_SHIFT,
    149};
    150
    151#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
    152
    153/* Used only by software */
    154#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
    155#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
    156#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
    157#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
    158#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
    159# define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
    160#endif
    161#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
    162# define _PAGE_SPECIAL		(1 << _PAGE_SPECIAL_SHIFT)
    163#else
    164# define _PAGE_SPECIAL		0
    165#endif
    166#if defined(CONFIG_HAVE_ARCH_SOFT_DIRTY)
    167# define _PAGE_SOFT_DIRTY	(1 << _PAGE_SOFT_DIRTY_SHIFT)
    168#else
    169# define _PAGE_SOFT_DIRTY	0
    170#endif
    171
    172/* Used by TLB hardware (placed in EntryLo*) */
    173#if defined(CONFIG_XPA)
    174# define _PAGE_NO_EXEC		(1 << _PAGE_NO_EXEC_SHIFT)
    175#elif defined(CONFIG_CPU_HAS_RIXI)
    176# define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
    177#endif
    178#define _PAGE_NO_READ		(1 << _PAGE_NO_READ_SHIFT)
    179#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
    180#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
    181#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
    182#if defined(CONFIG_CPU_R3K_TLB)
    183# define _CACHE_UNCACHED	(1 << _CACHE_UNCACHED_SHIFT)
    184# define _CACHE_MASK		_CACHE_UNCACHED
    185# define _PFN_SHIFT		PAGE_SHIFT
    186#else
    187# define _CACHE_MASK		(7 << _CACHE_SHIFT)
    188# define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
    189#endif
    190
    191#ifndef _PAGE_NO_EXEC
    192#define _PAGE_NO_EXEC		0
    193#endif
    194
    195#define _PAGE_SILENT_READ	_PAGE_VALID
    196#define _PAGE_SILENT_WRITE	_PAGE_DIRTY
    197
    198#define _PFN_MASK		(~((1 << (_PFN_SHIFT)) - 1))
    199
    200/*
    201 * The final layouts of the PTE bits are:
    202 *
    203 *   64-bit, R1 or earlier:     CCC D V G [S H] M A W R P
    204 *   32-bit, R1 or earler:      CCC D V G M A W R P
    205 *   64-bit, R2 or later:       CCC D V G RI/R XI [S H] M A W P
    206 *   32-bit, R2 or later:       CCC D V G RI/R XI M A W P
    207 */
    208
    209
    210/*
    211 * pte_to_entrylo converts a page table entry (PTE) into a Mips
    212 * entrylo0/1 value.
    213 */
    214static inline uint64_t pte_to_entrylo(unsigned long pte_val)
    215{
    216#ifdef CONFIG_CPU_HAS_RIXI
    217	if (cpu_has_rixi) {
    218		int sa;
    219#ifdef CONFIG_32BIT
    220		sa = 31 - _PAGE_NO_READ_SHIFT;
    221#else
    222		sa = 63 - _PAGE_NO_READ_SHIFT;
    223#endif
    224		/*
    225		 * C has no way to express that this is a DSRL
    226		 * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2.  Luckily
    227		 * in the fast path this is done in assembly
    228		 */
    229		return (pte_val >> _PAGE_GLOBAL_SHIFT) |
    230			((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
    231	}
    232#endif
    233
    234	return pte_val >> _PAGE_GLOBAL_SHIFT;
    235}
    236
    237/*
    238 * Cache attributes
    239 */
    240#if defined(CONFIG_CPU_R3K_TLB)
    241
    242#define _CACHE_CACHABLE_NONCOHERENT 0
    243#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
    244
    245#elif defined(CONFIG_CPU_SB1)
    246
    247/* No penalty for being coherent on the SB1, so just
    248   use it for "noncoherent" spaces, too.  Shouldn't hurt. */
    249
    250#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
    251
    252#endif
    253
    254#ifndef _CACHE_CACHABLE_NO_WA
    255#define _CACHE_CACHABLE_NO_WA		(0<<_CACHE_SHIFT)
    256#endif
    257#ifndef _CACHE_CACHABLE_WA
    258#define _CACHE_CACHABLE_WA		(1<<_CACHE_SHIFT)
    259#endif
    260#ifndef _CACHE_UNCACHED
    261#define _CACHE_UNCACHED			(2<<_CACHE_SHIFT)
    262#endif
    263#ifndef _CACHE_CACHABLE_NONCOHERENT
    264#define _CACHE_CACHABLE_NONCOHERENT	(3<<_CACHE_SHIFT)
    265#endif
    266#ifndef _CACHE_CACHABLE_CE
    267#define _CACHE_CACHABLE_CE		(4<<_CACHE_SHIFT)
    268#endif
    269#ifndef _CACHE_CACHABLE_COW
    270#define _CACHE_CACHABLE_COW		(5<<_CACHE_SHIFT)
    271#endif
    272#ifndef _CACHE_CACHABLE_CUW
    273#define _CACHE_CACHABLE_CUW		(6<<_CACHE_SHIFT)
    274#endif
    275#ifndef _CACHE_UNCACHED_ACCELERATED
    276#define _CACHE_UNCACHED_ACCELERATED	(7<<_CACHE_SHIFT)
    277#endif
    278
    279#define __READABLE	(_PAGE_SILENT_READ | _PAGE_ACCESSED)
    280#define __WRITEABLE	(_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
    281
    282#define _PAGE_CHG_MASK	(_PAGE_ACCESSED | _PAGE_MODIFIED |	\
    283			 _PAGE_SOFT_DIRTY | _PFN_MASK | _CACHE_MASK)
    284
    285#endif /* _ASM_PGTABLE_BITS_H */