cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

perf_regs.c (4641B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright 2016 Anju T, IBM Corporation.
      4 */
      5
      6#include <linux/errno.h>
      7#include <linux/kernel.h>
      8#include <linux/sched.h>
      9#include <linux/sched/task_stack.h>
     10#include <linux/perf_event.h>
     11#include <linux/bug.h>
     12#include <linux/stddef.h>
     13#include <asm/ptrace.h>
     14#include <asm/perf_regs.h>
     15
     16u64 PERF_REG_EXTENDED_MASK;
     17
     18#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
     19
     20#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
     21
     22static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
     23	PT_REGS_OFFSET(PERF_REG_POWERPC_R0,  gpr[0]),
     24	PT_REGS_OFFSET(PERF_REG_POWERPC_R1,  gpr[1]),
     25	PT_REGS_OFFSET(PERF_REG_POWERPC_R2,  gpr[2]),
     26	PT_REGS_OFFSET(PERF_REG_POWERPC_R3,  gpr[3]),
     27	PT_REGS_OFFSET(PERF_REG_POWERPC_R4,  gpr[4]),
     28	PT_REGS_OFFSET(PERF_REG_POWERPC_R5,  gpr[5]),
     29	PT_REGS_OFFSET(PERF_REG_POWERPC_R6,  gpr[6]),
     30	PT_REGS_OFFSET(PERF_REG_POWERPC_R7,  gpr[7]),
     31	PT_REGS_OFFSET(PERF_REG_POWERPC_R8,  gpr[8]),
     32	PT_REGS_OFFSET(PERF_REG_POWERPC_R9,  gpr[9]),
     33	PT_REGS_OFFSET(PERF_REG_POWERPC_R10, gpr[10]),
     34	PT_REGS_OFFSET(PERF_REG_POWERPC_R11, gpr[11]),
     35	PT_REGS_OFFSET(PERF_REG_POWERPC_R12, gpr[12]),
     36	PT_REGS_OFFSET(PERF_REG_POWERPC_R13, gpr[13]),
     37	PT_REGS_OFFSET(PERF_REG_POWERPC_R14, gpr[14]),
     38	PT_REGS_OFFSET(PERF_REG_POWERPC_R15, gpr[15]),
     39	PT_REGS_OFFSET(PERF_REG_POWERPC_R16, gpr[16]),
     40	PT_REGS_OFFSET(PERF_REG_POWERPC_R17, gpr[17]),
     41	PT_REGS_OFFSET(PERF_REG_POWERPC_R18, gpr[18]),
     42	PT_REGS_OFFSET(PERF_REG_POWERPC_R19, gpr[19]),
     43	PT_REGS_OFFSET(PERF_REG_POWERPC_R20, gpr[20]),
     44	PT_REGS_OFFSET(PERF_REG_POWERPC_R21, gpr[21]),
     45	PT_REGS_OFFSET(PERF_REG_POWERPC_R22, gpr[22]),
     46	PT_REGS_OFFSET(PERF_REG_POWERPC_R23, gpr[23]),
     47	PT_REGS_OFFSET(PERF_REG_POWERPC_R24, gpr[24]),
     48	PT_REGS_OFFSET(PERF_REG_POWERPC_R25, gpr[25]),
     49	PT_REGS_OFFSET(PERF_REG_POWERPC_R26, gpr[26]),
     50	PT_REGS_OFFSET(PERF_REG_POWERPC_R27, gpr[27]),
     51	PT_REGS_OFFSET(PERF_REG_POWERPC_R28, gpr[28]),
     52	PT_REGS_OFFSET(PERF_REG_POWERPC_R29, gpr[29]),
     53	PT_REGS_OFFSET(PERF_REG_POWERPC_R30, gpr[30]),
     54	PT_REGS_OFFSET(PERF_REG_POWERPC_R31, gpr[31]),
     55	PT_REGS_OFFSET(PERF_REG_POWERPC_NIP, nip),
     56	PT_REGS_OFFSET(PERF_REG_POWERPC_MSR, msr),
     57	PT_REGS_OFFSET(PERF_REG_POWERPC_ORIG_R3, orig_gpr3),
     58	PT_REGS_OFFSET(PERF_REG_POWERPC_CTR, ctr),
     59	PT_REGS_OFFSET(PERF_REG_POWERPC_LINK, link),
     60	PT_REGS_OFFSET(PERF_REG_POWERPC_XER, xer),
     61	PT_REGS_OFFSET(PERF_REG_POWERPC_CCR, ccr),
     62#ifdef CONFIG_PPC64
     63	PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, softe),
     64#else
     65	PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, mq),
     66#endif
     67	PT_REGS_OFFSET(PERF_REG_POWERPC_TRAP, trap),
     68	PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
     69	PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
     70	PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
     71	PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
     72};
     73
     74/* Function to return the extended register values */
     75static u64 get_ext_regs_value(int idx)
     76{
     77	switch (idx) {
     78	case PERF_REG_POWERPC_PMC1 ... PERF_REG_POWERPC_PMC6:
     79		return get_pmcs_ext_regs(idx - PERF_REG_POWERPC_PMC1);
     80	case PERF_REG_POWERPC_MMCR0:
     81		return mfspr(SPRN_MMCR0);
     82	case PERF_REG_POWERPC_MMCR1:
     83		return mfspr(SPRN_MMCR1);
     84	case PERF_REG_POWERPC_MMCR2:
     85		return mfspr(SPRN_MMCR2);
     86#ifdef CONFIG_PPC64
     87	case PERF_REG_POWERPC_MMCR3:
     88		return mfspr(SPRN_MMCR3);
     89	case PERF_REG_POWERPC_SIER2:
     90		return mfspr(SPRN_SIER2);
     91	case PERF_REG_POWERPC_SIER3:
     92		return mfspr(SPRN_SIER3);
     93	case PERF_REG_POWERPC_SDAR:
     94		return mfspr(SPRN_SDAR);
     95#endif
     96	case PERF_REG_POWERPC_SIAR:
     97		return mfspr(SPRN_SIAR);
     98	default: return 0;
     99	}
    100}
    101
    102u64 perf_reg_value(struct pt_regs *regs, int idx)
    103{
    104	if (idx == PERF_REG_POWERPC_SIER &&
    105	   (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
    106	    IS_ENABLED(CONFIG_PPC32) ||
    107	    !is_sier_available()))
    108		return 0;
    109
    110	if (idx == PERF_REG_POWERPC_MMCRA &&
    111	   (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
    112	    IS_ENABLED(CONFIG_PPC32)))
    113		return 0;
    114
    115	if (idx >= PERF_REG_POWERPC_MAX && idx < PERF_REG_EXTENDED_MAX)
    116		return get_ext_regs_value(idx);
    117
    118	/*
    119	 * If the idx is referring to value beyond the
    120	 * supported registers, return 0 with a warning
    121	 */
    122	if (WARN_ON_ONCE(idx >= PERF_REG_EXTENDED_MAX))
    123		return 0;
    124
    125	return regs_get_register(regs, pt_regs_offset[idx]);
    126}
    127
    128int perf_reg_validate(u64 mask)
    129{
    130	if (!mask || mask & REG_RESERVED)
    131		return -EINVAL;
    132	return 0;
    133}
    134
    135u64 perf_reg_abi(struct task_struct *task)
    136{
    137	if (is_tsk_32bit_task(task))
    138		return PERF_SAMPLE_REGS_ABI_32;
    139	else
    140		return PERF_SAMPLE_REGS_ABI_64;
    141}
    142
    143void perf_get_regs_user(struct perf_regs *regs_user,
    144			struct pt_regs *regs)
    145{
    146	regs_user->regs = task_pt_regs(current);
    147	regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
    148			 PERF_SAMPLE_REGS_ABI_NONE;
    149}