cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

pervasive.c (2783B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * CBE Pervasive Monitor and Debug
      4 *
      5 * (C) Copyright IBM Corporation 2005
      6 *
      7 * Authors: Maximino Aguilar (maguilar@us.ibm.com)
      8 *          Michael N. Day (mnday@us.ibm.com)
      9 */
     10
     11#undef DEBUG
     12
     13#include <linux/interrupt.h>
     14#include <linux/irq.h>
     15#include <linux/percpu.h>
     16#include <linux/types.h>
     17#include <linux/kallsyms.h>
     18#include <linux/pgtable.h>
     19
     20#include <asm/io.h>
     21#include <asm/machdep.h>
     22#include <asm/reg.h>
     23#include <asm/cell-regs.h>
     24#include <asm/cpu_has_feature.h>
     25
     26#include "pervasive.h"
     27#include "ras.h"
     28
     29static void cbe_power_save(void)
     30{
     31	unsigned long ctrl, thread_switch_control;
     32
     33	/* Ensure our interrupt state is properly tracked */
     34	if (!prep_irq_for_idle())
     35		return;
     36
     37	ctrl = mfspr(SPRN_CTRLF);
     38
     39	/* Enable DEC and EE interrupt request */
     40	thread_switch_control  = mfspr(SPRN_TSC_CELL);
     41	thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST;
     42
     43	switch (ctrl & CTRL_CT) {
     44	case CTRL_CT0:
     45		thread_switch_control |= TSC_CELL_DEC_ENABLE_0;
     46		break;
     47	case CTRL_CT1:
     48		thread_switch_control |= TSC_CELL_DEC_ENABLE_1;
     49		break;
     50	default:
     51		printk(KERN_WARNING "%s: unknown configuration\n",
     52			__func__);
     53		break;
     54	}
     55	mtspr(SPRN_TSC_CELL, thread_switch_control);
     56
     57	/*
     58	 * go into low thread priority, medium priority will be
     59	 * restored for us after wake-up.
     60	 */
     61	HMT_low();
     62
     63	/*
     64	 * atomically disable thread execution and runlatch.
     65	 * External and Decrementer exceptions are still handled when the
     66	 * thread is disabled but now enter in cbe_system_reset_exception()
     67	 */
     68	ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
     69	mtspr(SPRN_CTRLT, ctrl);
     70
     71	/* Re-enable interrupts in MSR */
     72	__hard_irq_enable();
     73}
     74
     75static int cbe_system_reset_exception(struct pt_regs *regs)
     76{
     77	switch (regs->msr & SRR1_WAKEMASK) {
     78	case SRR1_WAKEDEC:
     79		set_dec(1);
     80		break;
     81	case SRR1_WAKEEE:
     82		/*
     83		 * Handle these when interrupts get re-enabled and we take
     84		 * them as regular exceptions. We are in an NMI context
     85		 * and can't handle these here.
     86		 */
     87		break;
     88	case SRR1_WAKEMT:
     89		return cbe_sysreset_hack();
     90#ifdef CONFIG_CBE_RAS
     91	case SRR1_WAKESYSERR:
     92		cbe_system_error_exception(regs);
     93		break;
     94	case SRR1_WAKETHERM:
     95		cbe_thermal_exception(regs);
     96		break;
     97#endif /* CONFIG_CBE_RAS */
     98	default:
     99		/* do system reset */
    100		return 0;
    101	}
    102	/* everything handled */
    103	return 1;
    104}
    105
    106void __init cbe_pervasive_init(void)
    107{
    108	int cpu;
    109
    110	if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO))
    111		return;
    112
    113	for_each_possible_cpu(cpu) {
    114		struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu);
    115		if (!regs)
    116			continue;
    117
    118		 /* Enable Pause(0) control bit */
    119		out_be64(&regs->pmcr, in_be64(&regs->pmcr) |
    120					    CBE_PMD_PAUSE_ZERO_CONTROL);
    121	}
    122
    123	ppc_md.power_save = cbe_power_save;
    124	ppc_md.system_reset_exception = cbe_system_reset_exception;
    125}