cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

icp-hv.c (3840B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * Copyright 2011 IBM Corporation.
      4 */
      5#include <linux/types.h>
      6#include <linux/kernel.h>
      7#include <linux/irq.h>
      8#include <linux/smp.h>
      9#include <linux/interrupt.h>
     10#include <linux/irqdomain.h>
     11#include <linux/cpu.h>
     12#include <linux/of.h>
     13
     14#include <asm/smp.h>
     15#include <asm/irq.h>
     16#include <asm/errno.h>
     17#include <asm/xics.h>
     18#include <asm/io.h>
     19#include <asm/hvcall.h>
     20
     21static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
     22{
     23	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
     24	long rc;
     25	unsigned int ret = XICS_IRQ_SPURIOUS;
     26
     27	rc = plpar_hcall(H_XIRR, retbuf, cppr);
     28	if (rc == H_SUCCESS) {
     29		ret = (unsigned int)retbuf[0];
     30	} else {
     31		pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
     32			__func__, cppr, rc);
     33		WARN_ON_ONCE(1);
     34	}
     35
     36	return ret;
     37}
     38
     39static inline void icp_hv_set_cppr(u8 value)
     40{
     41	long rc = plpar_hcall_norets(H_CPPR, value);
     42	if (rc != H_SUCCESS) {
     43		pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
     44			__func__, value, rc);
     45		WARN_ON_ONCE(1);
     46	}
     47}
     48
     49static inline void icp_hv_set_xirr(unsigned int value)
     50{
     51	long rc = plpar_hcall_norets(H_EOI, value);
     52	if (rc != H_SUCCESS) {
     53		pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
     54			__func__, value, rc);
     55		WARN_ON_ONCE(1);
     56		icp_hv_set_cppr(value >> 24);
     57	}
     58}
     59
     60static inline void icp_hv_set_qirr(int n_cpu , u8 value)
     61{
     62	int hw_cpu = get_hard_smp_processor_id(n_cpu);
     63	long rc;
     64
     65	/* Make sure all previous accesses are ordered before IPI sending */
     66	mb();
     67	rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
     68	if (rc != H_SUCCESS) {
     69		pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
     70			"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
     71		WARN_ON_ONCE(1);
     72	}
     73}
     74
     75static void icp_hv_eoi(struct irq_data *d)
     76{
     77	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
     78
     79	iosync();
     80	icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
     81}
     82
     83static void icp_hv_teardown_cpu(void)
     84{
     85	int cpu = smp_processor_id();
     86
     87	/* Clear any pending IPI */
     88	icp_hv_set_qirr(cpu, 0xff);
     89}
     90
     91static void icp_hv_flush_ipi(void)
     92{
     93	/* We take the ipi irq but and never return so we
     94	 * need to EOI the IPI, but want to leave our priority 0
     95	 *
     96	 * should we check all the other interrupts too?
     97	 * should we be flagging idle loop instead?
     98	 * or creating some task to be scheduled?
     99	 */
    100
    101	icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
    102}
    103
    104static unsigned int icp_hv_get_irq(void)
    105{
    106	unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
    107	unsigned int vec = xirr & 0x00ffffff;
    108	unsigned int irq;
    109
    110	if (vec == XICS_IRQ_SPURIOUS)
    111		return 0;
    112
    113	irq = irq_find_mapping(xics_host, vec);
    114	if (likely(irq)) {
    115		xics_push_cppr(vec);
    116		return irq;
    117	}
    118
    119	/* We don't have a linux mapping, so have rtas mask it. */
    120	xics_mask_unknown_vec(vec);
    121
    122	/* We might learn about it later, so EOI it */
    123	icp_hv_set_xirr(xirr);
    124
    125	return 0;
    126}
    127
    128static void icp_hv_set_cpu_priority(unsigned char cppr)
    129{
    130	xics_set_base_cppr(cppr);
    131	icp_hv_set_cppr(cppr);
    132	iosync();
    133}
    134
    135#ifdef CONFIG_SMP
    136
    137static void icp_hv_cause_ipi(int cpu)
    138{
    139	icp_hv_set_qirr(cpu, IPI_PRIORITY);
    140}
    141
    142static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
    143{
    144	int cpu = smp_processor_id();
    145
    146	icp_hv_set_qirr(cpu, 0xff);
    147
    148	return smp_ipi_demux();
    149}
    150
    151#endif /* CONFIG_SMP */
    152
    153static const struct icp_ops icp_hv_ops = {
    154	.get_irq	= icp_hv_get_irq,
    155	.eoi		= icp_hv_eoi,
    156	.set_priority	= icp_hv_set_cpu_priority,
    157	.teardown_cpu	= icp_hv_teardown_cpu,
    158	.flush_ipi	= icp_hv_flush_ipi,
    159#ifdef CONFIG_SMP
    160	.ipi_action	= icp_hv_ipi_action,
    161	.cause_ipi	= icp_hv_cause_ipi,
    162#endif
    163};
    164
    165int __init icp_hv_init(void)
    166{
    167	struct device_node *np;
    168
    169	np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
    170	if (!np)
    171		np = of_find_node_by_type(NULL,
    172				    "PowerPC-External-Interrupt-Presentation");
    173	if (!np)
    174		return -ENODEV;
    175
    176	icp_ops = &icp_hv_ops;
    177
    178	of_node_put(np);
    179	return 0;
    180}
    181