cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

tlb.h (4321B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
      4 */
      5#ifndef __ASM_TLB_H
      6#define __ASM_TLB_H
      7
      8#include <linux/mm_types.h>
      9#include <asm/cpu-features.h>
     10#include <asm/loongarch.h>
     11
     12/*
     13 * TLB Invalidate Flush
     14 */
     15static inline void tlbclr(void)
     16{
     17	__asm__ __volatile__("tlbclr");
     18}
     19
     20static inline void tlbflush(void)
     21{
     22	__asm__ __volatile__("tlbflush");
     23}
     24
     25/*
     26 * TLB R/W operations.
     27 */
     28static inline void tlb_probe(void)
     29{
     30	__asm__ __volatile__("tlbsrch");
     31}
     32
     33static inline void tlb_read(void)
     34{
     35	__asm__ __volatile__("tlbrd");
     36}
     37
     38static inline void tlb_write_indexed(void)
     39{
     40	__asm__ __volatile__("tlbwr");
     41}
     42
     43static inline void tlb_write_random(void)
     44{
     45	__asm__ __volatile__("tlbfill");
     46}
     47
     48enum invtlb_ops {
     49	/* Invalid all tlb */
     50	INVTLB_ALL = 0x0,
     51	/* Invalid current tlb */
     52	INVTLB_CURRENT_ALL = 0x1,
     53	/* Invalid all global=1 lines in current tlb */
     54	INVTLB_CURRENT_GTRUE = 0x2,
     55	/* Invalid all global=0 lines in current tlb */
     56	INVTLB_CURRENT_GFALSE = 0x3,
     57	/* Invalid global=0 and matched asid lines in current tlb */
     58	INVTLB_GFALSE_AND_ASID = 0x4,
     59	/* Invalid addr with global=0 and matched asid in current tlb */
     60	INVTLB_ADDR_GFALSE_AND_ASID = 0x5,
     61	/* Invalid addr with global=1 or matched asid in current tlb */
     62	INVTLB_ADDR_GTRUE_OR_ASID = 0x6,
     63	/* Invalid matched gid in guest tlb */
     64	INVGTLB_GID = 0x9,
     65	/* Invalid global=1, matched gid in guest tlb */
     66	INVGTLB_GID_GTRUE = 0xa,
     67	/* Invalid global=0, matched gid in guest tlb */
     68	INVGTLB_GID_GFALSE = 0xb,
     69	/* Invalid global=0, matched gid and asid in guest tlb */
     70	INVGTLB_GID_GFALSE_ASID = 0xc,
     71	/* Invalid global=0 , matched gid, asid and addr in guest tlb */
     72	INVGTLB_GID_GFALSE_ASID_ADDR = 0xd,
     73	/* Invalid global=1 , matched gid, asid and addr in guest tlb */
     74	INVGTLB_GID_GTRUE_ASID_ADDR = 0xe,
     75	/* Invalid all gid gva-->gpa guest tlb */
     76	INVGTLB_ALLGID_GVA_TO_GPA = 0x10,
     77	/* Invalid all gid gpa-->hpa tlb */
     78	INVTLB_ALLGID_GPA_TO_HPA = 0x11,
     79	/* Invalid all gid tlb, including  gva-->gpa and gpa-->hpa */
     80	INVTLB_ALLGID = 0x12,
     81	/* Invalid matched gid gva-->gpa guest tlb */
     82	INVGTLB_GID_GVA_TO_GPA = 0x13,
     83	/* Invalid matched gid gpa-->hpa tlb */
     84	INVTLB_GID_GPA_TO_HPA = 0x14,
     85	/* Invalid matched gid tlb,including gva-->gpa and gpa-->hpa */
     86	INVTLB_GID_ALL = 0x15,
     87	/* Invalid matched gid and addr gpa-->hpa tlb */
     88	INVTLB_GID_ADDR = 0x16,
     89};
     90
     91/*
     92 * invtlb op info addr
     93 * (0x1 << 26) | (0x24 << 20) | (0x13 << 15) |
     94 * (addr << 10) | (info << 5) | op
     95 */
     96static inline void invtlb(u32 op, u32 info, u64 addr)
     97{
     98	__asm__ __volatile__(
     99		"parse_r addr,%0\n\t"
    100		"parse_r info,%1\n\t"
    101		".word ((0x6498000) | (addr << 10) | (info << 5) | %2)\n\t"
    102		:
    103		: "r"(addr), "r"(info), "i"(op)
    104		:
    105		);
    106}
    107
    108static inline void invtlb_addr(u32 op, u32 info, u64 addr)
    109{
    110	__asm__ __volatile__(
    111		"parse_r addr,%0\n\t"
    112		".word ((0x6498000) | (addr << 10) | (0 << 5) | %1)\n\t"
    113		:
    114		: "r"(addr), "i"(op)
    115		:
    116		);
    117}
    118
    119static inline void invtlb_info(u32 op, u32 info, u64 addr)
    120{
    121	__asm__ __volatile__(
    122		"parse_r info,%0\n\t"
    123		".word ((0x6498000) | (0 << 10) | (info << 5) | %1)\n\t"
    124		:
    125		: "r"(info), "i"(op)
    126		:
    127		);
    128}
    129
    130static inline void invtlb_all(u32 op, u32 info, u64 addr)
    131{
    132	__asm__ __volatile__(
    133		".word ((0x6498000) | (0 << 10) | (0 << 5) | %0)\n\t"
    134		:
    135		: "i"(op)
    136		:
    137		);
    138}
    139
    140/*
    141 * LoongArch doesn't need any special per-pte or per-vma handling, except
    142 * we need to flush cache for area to be unmapped.
    143 */
    144#define tlb_start_vma(tlb, vma)					\
    145	do {							\
    146		if (!(tlb)->fullmm)				\
    147			flush_cache_range(vma, vma->vm_start, vma->vm_end); \
    148	}  while (0)
    149#define tlb_end_vma(tlb, vma) do { } while (0)
    150#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
    151
    152static void tlb_flush(struct mmu_gather *tlb);
    153
    154#define tlb_flush tlb_flush
    155#include <asm-generic/tlb.h>
    156
    157static inline void tlb_flush(struct mmu_gather *tlb)
    158{
    159	struct vm_area_struct vma;
    160
    161	vma.vm_mm = tlb->mm;
    162	vma.vm_flags = 0;
    163	if (tlb->fullmm) {
    164		flush_tlb_mm(tlb->mm);
    165		return;
    166	}
    167
    168	flush_tlb_range(&vma, tlb->start, tlb->end);
    169}
    170
    171extern void handle_tlb_load(void);
    172extern void handle_tlb_store(void);
    173extern void handle_tlb_modify(void);
    174extern void handle_tlb_refill(void);
    175extern void handle_tlb_protect(void);
    176
    177extern void dump_tlb_all(void);
    178extern void dump_tlb_regs(void);
    179
    180#endif /* __ASM_TLB_H */