cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

internal.h (6254B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef __X86_MCE_INTERNAL_H__
      3#define __X86_MCE_INTERNAL_H__
      4
      5#undef pr_fmt
      6#define pr_fmt(fmt) "mce: " fmt
      7
      8#include <linux/device.h>
      9#include <asm/mce.h>
     10
     11enum severity_level {
     12	MCE_NO_SEVERITY,
     13	MCE_DEFERRED_SEVERITY,
     14	MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
     15	MCE_KEEP_SEVERITY,
     16	MCE_SOME_SEVERITY,
     17	MCE_AO_SEVERITY,
     18	MCE_UC_SEVERITY,
     19	MCE_AR_SEVERITY,
     20	MCE_PANIC_SEVERITY,
     21};
     22
     23extern struct blocking_notifier_head x86_mce_decoder_chain;
     24
     25#define INITIAL_CHECK_INTERVAL	5 * 60 /* 5 minutes */
     26
     27struct mce_evt_llist {
     28	struct llist_node llnode;
     29	struct mce mce;
     30};
     31
     32void mce_gen_pool_process(struct work_struct *__unused);
     33bool mce_gen_pool_empty(void);
     34int mce_gen_pool_add(struct mce *mce);
     35int mce_gen_pool_init(void);
     36struct llist_node *mce_gen_pool_prepare_records(void);
     37
     38int mce_severity(struct mce *a, struct pt_regs *regs, char **msg, bool is_excp);
     39struct dentry *mce_get_debugfs_dir(void);
     40
     41extern mce_banks_t mce_banks_ce_disabled;
     42
     43#ifdef CONFIG_X86_MCE_INTEL
     44unsigned long cmci_intel_adjust_timer(unsigned long interval);
     45bool mce_intel_cmci_poll(void);
     46void mce_intel_hcpu_update(unsigned long cpu);
     47void cmci_disable_bank(int bank);
     48void intel_init_cmci(void);
     49void intel_init_lmce(void);
     50void intel_clear_lmce(void);
     51bool intel_filter_mce(struct mce *m);
     52#else
     53# define cmci_intel_adjust_timer mce_adjust_timer_default
     54static inline bool mce_intel_cmci_poll(void) { return false; }
     55static inline void mce_intel_hcpu_update(unsigned long cpu) { }
     56static inline void cmci_disable_bank(int bank) { }
     57static inline void intel_init_cmci(void) { }
     58static inline void intel_init_lmce(void) { }
     59static inline void intel_clear_lmce(void) { }
     60static inline bool intel_filter_mce(struct mce *m) { return false; }
     61#endif
     62
     63void mce_timer_kick(unsigned long interval);
     64
     65#ifdef CONFIG_ACPI_APEI
     66int apei_write_mce(struct mce *m);
     67ssize_t apei_read_mce(struct mce *m, u64 *record_id);
     68int apei_check_mce(void);
     69int apei_clear_mce(u64 record_id);
     70#else
     71static inline int apei_write_mce(struct mce *m)
     72{
     73	return -EINVAL;
     74}
     75static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
     76{
     77	return 0;
     78}
     79static inline int apei_check_mce(void)
     80{
     81	return 0;
     82}
     83static inline int apei_clear_mce(u64 record_id)
     84{
     85	return -EINVAL;
     86}
     87#endif
     88
     89/*
     90 * We consider records to be equivalent if bank+status+addr+misc all match.
     91 * This is only used when the system is going down because of a fatal error
     92 * to avoid cluttering the console log with essentially repeated information.
     93 * In normal processing all errors seen are logged.
     94 */
     95static inline bool mce_cmp(struct mce *m1, struct mce *m2)
     96{
     97	return m1->bank != m2->bank ||
     98		m1->status != m2->status ||
     99		m1->addr != m2->addr ||
    100		m1->misc != m2->misc;
    101}
    102
    103extern struct device_attribute dev_attr_trigger;
    104
    105#ifdef CONFIG_X86_MCELOG_LEGACY
    106void mce_work_trigger(void);
    107void mce_register_injector_chain(struct notifier_block *nb);
    108void mce_unregister_injector_chain(struct notifier_block *nb);
    109#else
    110static inline void mce_work_trigger(void)	{ }
    111static inline void mce_register_injector_chain(struct notifier_block *nb)	{ }
    112static inline void mce_unregister_injector_chain(struct notifier_block *nb)	{ }
    113#endif
    114
    115struct mca_config {
    116	__u64 lmce_disabled		: 1,
    117	      disabled			: 1,
    118	      ser			: 1,
    119	      recovery			: 1,
    120	      bios_cmci_threshold	: 1,
    121	      /* Proper #MC exception handler is set */
    122	      initialized		: 1,
    123	      __reserved		: 58;
    124
    125	bool dont_log_ce;
    126	bool cmci_disabled;
    127	bool ignore_ce;
    128	bool print_all;
    129
    130	int monarch_timeout;
    131	int panic_timeout;
    132	u32 rip_msr;
    133	s8 bootlog;
    134};
    135
    136extern struct mca_config mca_cfg;
    137DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
    138
    139struct mce_vendor_flags {
    140	/*
    141	 * Indicates that overflow conditions are not fatal, when set.
    142	 */
    143	__u64 overflow_recov	: 1,
    144
    145	/*
    146	 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
    147	 * Recovery. It indicates support for data poisoning in HW and deferred
    148	 * error interrupts.
    149	 */
    150	succor			: 1,
    151
    152	/*
    153	 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
    154	 * the register space for each MCA bank and also increases number of
    155	 * banks. Also, to accommodate the new banks and registers, the MCA
    156	 * register space is moved to a new MSR range.
    157	 */
    158	smca			: 1,
    159
    160	/* AMD-style error thresholding banks present. */
    161	amd_threshold		: 1,
    162
    163	/* Pentium, family 5-style MCA */
    164	p5			: 1,
    165
    166	/* Centaur Winchip C6-style MCA */
    167	winchip			: 1,
    168
    169	/* SandyBridge IFU quirk */
    170	snb_ifu_quirk		: 1,
    171
    172	/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
    173	skx_repmov_quirk	: 1,
    174
    175	__reserved_0		: 56;
    176};
    177
    178extern struct mce_vendor_flags mce_flags;
    179
    180enum mca_msr {
    181	MCA_CTL,
    182	MCA_STATUS,
    183	MCA_ADDR,
    184	MCA_MISC,
    185};
    186
    187/* Decide whether to add MCE record to MCE event pool or filter it out. */
    188extern bool filter_mce(struct mce *m);
    189
    190#ifdef CONFIG_X86_MCE_AMD
    191extern bool amd_filter_mce(struct mce *m);
    192#else
    193static inline bool amd_filter_mce(struct mce *m) { return false; }
    194#endif
    195
    196#ifdef CONFIG_X86_ANCIENT_MCE
    197void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
    198void winchip_mcheck_init(struct cpuinfo_x86 *c);
    199noinstr void pentium_machine_check(struct pt_regs *regs);
    200noinstr void winchip_machine_check(struct pt_regs *regs);
    201static inline void enable_p5_mce(void) { mce_p5_enabled = 1; }
    202#else
    203static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {}
    204static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
    205static inline void enable_p5_mce(void) {}
    206static inline void pentium_machine_check(struct pt_regs *regs) {}
    207static inline void winchip_machine_check(struct pt_regs *regs) {}
    208#endif
    209
    210noinstr u64 mce_rdmsrl(u32 msr);
    211
    212static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
    213{
    214	if (mce_flags.smca) {
    215		switch (reg) {
    216		case MCA_CTL:	 return MSR_AMD64_SMCA_MCx_CTL(bank);
    217		case MCA_ADDR:	 return MSR_AMD64_SMCA_MCx_ADDR(bank);
    218		case MCA_MISC:	 return MSR_AMD64_SMCA_MCx_MISC(bank);
    219		case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
    220		}
    221	}
    222
    223	switch (reg) {
    224	case MCA_CTL:	 return MSR_IA32_MCx_CTL(bank);
    225	case MCA_ADDR:	 return MSR_IA32_MCx_ADDR(bank);
    226	case MCA_MISC:	 return MSR_IA32_MCx_MISC(bank);
    227	case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
    228	}
    229
    230	return 0;
    231}
    232
    233#endif /* __X86_MCE_INTERNAL_H__ */