cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

stacktrace.h (2847B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2012 ARM Ltd.
      4 */
      5#ifndef __ASM_STACKTRACE_H
      6#define __ASM_STACKTRACE_H
      7
      8#include <linux/percpu.h>
      9#include <linux/sched.h>
     10#include <linux/sched/task_stack.h>
     11#include <linux/types.h>
     12#include <linux/llist.h>
     13
     14#include <asm/memory.h>
     15#include <asm/ptrace.h>
     16#include <asm/sdei.h>
     17
     18enum stack_type {
     19	STACK_TYPE_UNKNOWN,
     20	STACK_TYPE_TASK,
     21	STACK_TYPE_IRQ,
     22	STACK_TYPE_OVERFLOW,
     23	STACK_TYPE_SDEI_NORMAL,
     24	STACK_TYPE_SDEI_CRITICAL,
     25	__NR_STACK_TYPES
     26};
     27
     28struct stack_info {
     29	unsigned long low;
     30	unsigned long high;
     31	enum stack_type type;
     32};
     33
     34extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
     35			   const char *loglvl);
     36
     37DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
     38
     39static inline bool on_stack(unsigned long sp, unsigned long size,
     40			    unsigned long low, unsigned long high,
     41			    enum stack_type type, struct stack_info *info)
     42{
     43	if (!low)
     44		return false;
     45
     46	if (sp < low || sp + size < sp || sp + size > high)
     47		return false;
     48
     49	if (info) {
     50		info->low = low;
     51		info->high = high;
     52		info->type = type;
     53	}
     54	return true;
     55}
     56
     57static inline bool on_irq_stack(unsigned long sp, unsigned long size,
     58				struct stack_info *info)
     59{
     60	unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
     61	unsigned long high = low + IRQ_STACK_SIZE;
     62
     63	return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
     64}
     65
     66static inline bool on_task_stack(const struct task_struct *tsk,
     67				 unsigned long sp, unsigned long size,
     68				 struct stack_info *info)
     69{
     70	unsigned long low = (unsigned long)task_stack_page(tsk);
     71	unsigned long high = low + THREAD_SIZE;
     72
     73	return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
     74}
     75
     76#ifdef CONFIG_VMAP_STACK
     77DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
     78
     79static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
     80				struct stack_info *info)
     81{
     82	unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
     83	unsigned long high = low + OVERFLOW_STACK_SIZE;
     84
     85	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
     86}
     87#else
     88static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
     89			struct stack_info *info) { return false; }
     90#endif
     91
     92
     93/*
     94 * We can only safely access per-cpu stacks from current in a non-preemptible
     95 * context.
     96 */
     97static inline bool on_accessible_stack(const struct task_struct *tsk,
     98				       unsigned long sp, unsigned long size,
     99				       struct stack_info *info)
    100{
    101	if (info)
    102		info->type = STACK_TYPE_UNKNOWN;
    103
    104	if (on_task_stack(tsk, sp, size, info))
    105		return true;
    106	if (tsk != current || preemptible())
    107		return false;
    108	if (on_irq_stack(sp, size, info))
    109		return true;
    110	if (on_overflow_stack(sp, size, info))
    111		return true;
    112	if (on_sdei_stack(sp, size, info))
    113		return true;
    114
    115	return false;
    116}
    117
    118#endif	/* __ASM_STACKTRACE_H */