cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pgtsrmmu.h (4879B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * pgtsrmmu.h:  SRMMU page table defines and code.
      4 *
      5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
      6 */
      7
      8#ifndef _SPARC_PGTSRMMU_H
      9#define _SPARC_PGTSRMMU_H
     10
     11#include <asm/page.h>
     12
     13#ifdef __ASSEMBLY__
     14#include <asm/thread_info.h>	/* TI_UWINMASK for WINDOW_FLUSH */
     15#endif
     16
     17/* Number of contexts is implementation-dependent; 64k is the most we support */
     18#define SRMMU_MAX_CONTEXTS	65536
     19
     20#define SRMMU_PTE_TABLE_SIZE		(PTRS_PER_PTE*4)
     21#define SRMMU_PMD_TABLE_SIZE		(PTRS_PER_PMD*4)
     22#define SRMMU_PGD_TABLE_SIZE		(PTRS_PER_PGD*4)
     23
     24/* Definition of the values in the ET field of PTD's and PTE's */
     25#define SRMMU_ET_MASK         0x3
     26#define SRMMU_ET_INVALID      0x0
     27#define SRMMU_ET_PTD          0x1
     28#define SRMMU_ET_PTE          0x2
     29#define SRMMU_ET_REPTE        0x3 /* AIEEE, SuperSparc II reverse endian page! */
     30
     31/* Physical page extraction from PTP's and PTE's. */
     32#define SRMMU_CTX_PMASK    0xfffffff0
     33#define SRMMU_PTD_PMASK    0xfffffff0
     34#define SRMMU_PTE_PMASK    0xffffff00
     35
     36/* The pte non-page bits.  Some notes:
     37 * 1) cache, dirty, valid, and ref are frobbable
     38 *    for both supervisor and user pages.
     39 * 2) exec and write will only give the desired effect
     40 *    on user pages
     41 * 3) use priv and priv_readonly for changing the
     42 *    characteristics of supervisor ptes
     43 */
     44#define SRMMU_CACHE        0x80
     45#define SRMMU_DIRTY        0x40
     46#define SRMMU_REF          0x20
     47#define SRMMU_NOREAD       0x10
     48#define SRMMU_EXEC         0x08
     49#define SRMMU_WRITE        0x04
     50#define SRMMU_VALID        0x02 /* SRMMU_ET_PTE */
     51#define SRMMU_PRIV         0x1c
     52#define SRMMU_PRIV_RDONLY  0x18
     53
     54#define SRMMU_CHG_MASK    (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
     55
     56/* SRMMU swap entry encoding
     57 *
     58 * We use 5 bits for the type and 19 for the offset.  This gives us
     59 * 32 swapfiles of 4GB each.  Encoding looks like:
     60 *
     61 * oooooooooooooooooootttttRRRRRRRR
     62 * fedcba9876543210fedcba9876543210
     63 *
     64 * The bottom 7 bits are reserved for protection and status bits, especially
     65 * PRESENT.
     66 */
     67#define SRMMU_SWP_TYPE_MASK	0x1f
     68#define SRMMU_SWP_TYPE_SHIFT	7
     69#define SRMMU_SWP_OFF_MASK	0xfffff
     70#define SRMMU_SWP_OFF_SHIFT	(SRMMU_SWP_TYPE_SHIFT + 5)
     71
     72/* Some day I will implement true fine grained access bits for
     73 * user pages because the SRMMU gives us the capabilities to
     74 * enforce all the protection levels that vma's can have.
     75 * XXX But for now...
     76 */
     77#define SRMMU_PAGE_NONE    __pgprot(SRMMU_CACHE | \
     78				    SRMMU_PRIV | SRMMU_REF)
     79#define SRMMU_PAGE_SHARED  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
     80				    SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
     81#define SRMMU_PAGE_COPY    __pgprot(SRMMU_VALID | SRMMU_CACHE | \
     82				    SRMMU_EXEC | SRMMU_REF)
     83#define SRMMU_PAGE_RDONLY  __pgprot(SRMMU_VALID | SRMMU_CACHE | \
     84				    SRMMU_EXEC | SRMMU_REF)
     85#define SRMMU_PAGE_KERNEL  __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
     86				    SRMMU_DIRTY | SRMMU_REF)
     87
     88/* SRMMU Register addresses in ASI 0x4.  These are valid for all
     89 * current SRMMU implementations that exist.
     90 */
     91#define SRMMU_CTRL_REG           0x00000000
     92#define SRMMU_CTXTBL_PTR         0x00000100
     93#define SRMMU_CTX_REG            0x00000200
     94#define SRMMU_FAULT_STATUS       0x00000300
     95#define SRMMU_FAULT_ADDR         0x00000400
     96
     97#define WINDOW_FLUSH(tmp1, tmp2)					\
     98	mov	0, tmp1;						\
     9998:	ld	[%g6 + TI_UWINMASK], tmp2;				\
    100	orcc	%g0, tmp2, %g0;						\
    101	add	tmp1, 1, tmp1;						\
    102	bne	98b;							\
    103	 save	%sp, -64, %sp;						\
    10499:	subcc	tmp1, 1, tmp1;						\
    105	bne	99b;							\
    106	 restore %g0, %g0, %g0;
    107
    108#ifndef __ASSEMBLY__
    109extern unsigned long last_valid_pfn;
    110
    111/* This makes sense. Honest it does - Anton */
    112/* XXX Yes but it's ugly as sin.  FIXME. -KMW */
    113extern void *srmmu_nocache_pool;
    114#define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
    115#define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
    116#define __nocache_fix(VADDR) ((__typeof__(VADDR))__va(__nocache_pa(VADDR)))
    117
    118/* Accessing the MMU control register. */
    119unsigned int srmmu_get_mmureg(void);
    120void srmmu_set_mmureg(unsigned long regval);
    121void srmmu_set_ctable_ptr(unsigned long paddr);
    122void srmmu_set_context(int context);
    123int srmmu_get_context(void);
    124unsigned int srmmu_get_fstatus(void);
    125unsigned int srmmu_get_faddr(void);
    126
    127/* This is guaranteed on all SRMMU's. */
    128static inline void srmmu_flush_whole_tlb(void)
    129{
    130	__asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
    131			     "r" (0x400),        /* Flush entire TLB!! */
    132			     "i" (ASI_M_FLUSH_PROBE) : "memory");
    133
    134}
    135
    136static inline int
    137srmmu_get_pte (unsigned long addr)
    138{
    139	register unsigned long entry;
    140        
    141	__asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" :
    142				"=r" (entry):
    143				"r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE));
    144	return entry;
    145}
    146
    147#endif /* !(__ASSEMBLY__) */
    148
    149#endif /* !(_SPARC_PGTSRMMU_H) */