cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

hardirq.h (2335B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * Copyright (C) 2012 ARM Ltd.
      4 */
      5#ifndef __ASM_HARDIRQ_H
      6#define __ASM_HARDIRQ_H
      7
      8#include <linux/cache.h>
      9#include <linux/percpu.h>
     10#include <linux/threads.h>
     11#include <asm/barrier.h>
     12#include <asm/irq.h>
     13#include <asm/kvm_arm.h>
     14#include <asm/sysreg.h>
     15
     16#define ack_bad_irq ack_bad_irq
     17#include <asm-generic/hardirq.h>
     18
     19#define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
     20
     21struct nmi_ctx {
     22	u64 hcr;
     23	unsigned int cnt;
     24};
     25
     26DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
     27
     28#define arch_nmi_enter()						\
     29do {									\
     30	struct nmi_ctx *___ctx;						\
     31	u64 ___hcr;							\
     32									\
     33	if (!is_kernel_in_hyp_mode())					\
     34		break;							\
     35									\
     36	___ctx = this_cpu_ptr(&nmi_contexts);				\
     37	if (___ctx->cnt) {						\
     38		___ctx->cnt++;						\
     39		break;							\
     40	}								\
     41									\
     42	___hcr = read_sysreg(hcr_el2);					\
     43	if (!(___hcr & HCR_TGE)) {					\
     44		write_sysreg(___hcr | HCR_TGE, hcr_el2);		\
     45		isb();							\
     46	}								\
     47	/*								\
     48	 * Make sure the sysreg write is performed before ___ctx->cnt	\
     49	 * is set to 1. NMIs that see cnt == 1 will rely on us.		\
     50	 */								\
     51	barrier();							\
     52	___ctx->cnt = 1;                                                \
     53	/*								\
     54	 * Make sure ___ctx->cnt is set before we save ___hcr. We	\
     55	 * don't want ___ctx->hcr to be overwritten.			\
     56	 */								\
     57	barrier();							\
     58	___ctx->hcr = ___hcr;						\
     59} while (0)
     60
     61#define arch_nmi_exit()							\
     62do {									\
     63	struct nmi_ctx *___ctx;						\
     64	u64 ___hcr;							\
     65									\
     66	if (!is_kernel_in_hyp_mode())					\
     67		break;							\
     68									\
     69	___ctx = this_cpu_ptr(&nmi_contexts);				\
     70	___hcr = ___ctx->hcr;						\
     71	/*								\
     72	 * Make sure we read ___ctx->hcr before we release		\
     73	 * ___ctx->cnt as it makes ___ctx->hcr updatable again.		\
     74	 */								\
     75	barrier();							\
     76	___ctx->cnt--;							\
     77	/*								\
     78	 * Make sure ___ctx->cnt release is visible before we		\
     79	 * restore the sysreg. Otherwise a new NMI occurring		\
     80	 * right after write_sysreg() can be fooled and think		\
     81	 * we secured things for it.					\
     82	 */								\
     83	barrier();							\
     84	if (!___ctx->cnt && !(___hcr & HCR_TGE))			\
     85		write_sysreg(___hcr, hcr_el2);				\
     86} while (0)
     87
     88static inline void ack_bad_irq(unsigned int irq)
     89{
     90	extern unsigned long irq_err_count;
     91	irq_err_count++;
     92}
     93
     94#endif /* __ASM_HARDIRQ_H */