processor.h (4932B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 */ 5#ifndef _ASM_PROCESSOR_H 6#define _ASM_PROCESSOR_H 7 8#include <linux/atomic.h> 9#include <linux/cpumask.h> 10#include <linux/sizes.h> 11 12#include <asm/cpu.h> 13#include <asm/cpu-info.h> 14#include <asm/loongarch.h> 15#include <asm/vdso/processor.h> 16#include <uapi/asm/ptrace.h> 17#include <uapi/asm/sigcontext.h> 18 19#ifdef CONFIG_32BIT 20 21#define TASK_SIZE 0x80000000UL 22#define TASK_SIZE_MIN TASK_SIZE 23#define STACK_TOP_MAX TASK_SIZE 24 25#define TASK_IS_32BIT_ADDR 1 26 27#endif 28 29#ifdef CONFIG_64BIT 30 31#define TASK_SIZE32 0x100000000UL 32#define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits)) 33 34#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) 35#define TASK_SIZE_MIN TASK_SIZE32 36#define STACK_TOP_MAX TASK_SIZE64 37 38#define TASK_SIZE_OF(tsk) \ 39 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) 40 41#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR) 42 43#endif 44 45#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M) 46 47unsigned long stack_top(void); 48#define STACK_TOP stack_top() 49 50/* 51 * This decides where the kernel will search for a free chunk of vm 52 * space during mmap's. 53 */ 54#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) 55 56#define FPU_REG_WIDTH 256 57#define FPU_ALIGN __attribute__((aligned(32))) 58 59union fpureg { 60 __u32 val32[FPU_REG_WIDTH / 32]; 61 __u64 val64[FPU_REG_WIDTH / 64]; 62}; 63 64#define FPR_IDX(width, idx) (idx) 65 66#define BUILD_FPR_ACCESS(width) \ 67static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \ 68{ \ 69 return fpr->val##width[FPR_IDX(width, idx)]; \ 70} \ 71 \ 72static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \ 73 u##width val) \ 74{ \ 75 fpr->val##width[FPR_IDX(width, idx)] = val; \ 76} 77 78BUILD_FPR_ACCESS(32) 79BUILD_FPR_ACCESS(64) 80 81struct loongarch_fpu { 82 unsigned int fcsr; 83 uint64_t fcc; /* 8x8 */ 84 union fpureg fpr[NUM_FPU_REGS]; 85}; 86 87#define INIT_CPUMASK { \ 88 {0,} \ 89} 90 91#define ARCH_MIN_TASKALIGN 32 92 93struct loongarch_vdso_info; 94 95/* 96 * If you change thread_struct remember to change the #defines below too! 97 */ 98struct thread_struct { 99 /* Main processor registers. */ 100 unsigned long reg01, reg03, reg22; /* ra sp fp */ 101 unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */ 102 unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */ 103 104 /* CSR registers */ 105 unsigned long csr_prmd; 106 unsigned long csr_crmd; 107 unsigned long csr_euen; 108 unsigned long csr_ecfg; 109 unsigned long csr_badvaddr; /* Last user fault */ 110 111 /* Scratch registers */ 112 unsigned long scr0; 113 unsigned long scr1; 114 unsigned long scr2; 115 unsigned long scr3; 116 117 /* Eflags register */ 118 unsigned long eflags; 119 120 /* Other stuff associated with the thread. */ 121 unsigned long trap_nr; 122 unsigned long error_code; 123 struct loongarch_vdso_info *vdso; 124 125 /* 126 * FPU & vector registers, must be at last because 127 * they are conditionally copied at fork(). 128 */ 129 struct loongarch_fpu fpu FPU_ALIGN; 130}; 131 132#define INIT_THREAD { \ 133 /* \ 134 * Main processor registers \ 135 */ \ 136 .reg01 = 0, \ 137 .reg03 = 0, \ 138 .reg22 = 0, \ 139 .reg23 = 0, \ 140 .reg24 = 0, \ 141 .reg25 = 0, \ 142 .reg26 = 0, \ 143 .reg27 = 0, \ 144 .reg28 = 0, \ 145 .reg29 = 0, \ 146 .reg30 = 0, \ 147 .reg31 = 0, \ 148 .csr_crmd = 0, \ 149 .csr_prmd = 0, \ 150 .csr_euen = 0, \ 151 .csr_ecfg = 0, \ 152 .csr_badvaddr = 0, \ 153 /* \ 154 * Other stuff associated with the process \ 155 */ \ 156 .trap_nr = 0, \ 157 .error_code = 0, \ 158 /* \ 159 * FPU & vector registers \ 160 */ \ 161 .fpu = { \ 162 .fcsr = 0, \ 163 .fcc = 0, \ 164 .fpr = {{{0,},},}, \ 165 }, \ 166} 167 168struct task_struct; 169 170/* Free all resources held by a thread. */ 171#define release_thread(thread) do { } while (0) 172 173enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL}; 174 175extern unsigned long boot_option_idle_override; 176/* 177 * Do necessary setup to start up a newly executed thread. 178 */ 179extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); 180 181static inline void flush_thread(void) 182{ 183} 184 185unsigned long __get_wchan(struct task_struct *p); 186 187#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \ 188 THREAD_SIZE - 32 - sizeof(struct pt_regs)) 189#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk)) 190#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era) 191#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3]) 192#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen) 193#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg) 194 195#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);}) 196 197#ifdef CONFIG_CPU_HAS_PREFETCH 198 199#define ARCH_HAS_PREFETCH 200#define prefetch(x) __builtin_prefetch((x), 0, 1) 201 202#define ARCH_HAS_PREFETCHW 203#define prefetchw(x) __builtin_prefetch((x), 1, 1) 204 205#endif 206 207#endif /* _ASM_PROCESSOR_H */