cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mcip.c (10603B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
      4 *
      5 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
      6 */
      7
      8#include <linux/smp.h>
      9#include <linux/irq.h>
     10#include <linux/irqchip/chained_irq.h>
     11#include <linux/spinlock.h>
     12#include <soc/arc/mcip.h>
     13#include <asm/irqflags-arcv2.h>
     14#include <asm/setup.h>
     15
     16static DEFINE_RAW_SPINLOCK(mcip_lock);
     17
     18#ifdef CONFIG_SMP
     19
     20static char smp_cpuinfo_buf[128];
     21
     22/*
     23 * Set mask to halt GFRC if any online core in SMP cluster is halted.
     24 * Only works for ARC HS v3.0+, on earlier versions has no effect.
     25 */
     26static void mcip_update_gfrc_halt_mask(int cpu)
     27{
     28	struct bcr_generic gfrc;
     29	unsigned long flags;
     30	u32 gfrc_halt_mask;
     31
     32	READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
     33
     34	/*
     35	 * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
     36	 * GFRC 0x3 version.
     37	 */
     38	if (gfrc.ver < 0x3)
     39		return;
     40
     41	raw_spin_lock_irqsave(&mcip_lock, flags);
     42
     43	__mcip_cmd(CMD_GFRC_READ_CORE, 0);
     44	gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
     45	gfrc_halt_mask |= BIT(cpu);
     46	__mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
     47
     48	raw_spin_unlock_irqrestore(&mcip_lock, flags);
     49}
     50
     51static void mcip_update_debug_halt_mask(int cpu)
     52{
     53	u32 mcip_mask = 0;
     54	unsigned long flags;
     55
     56	raw_spin_lock_irqsave(&mcip_lock, flags);
     57
     58	/*
     59	 * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
     60	 * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
     61	 * and CMD_DEBUG_READ_SELECT.
     62	 */
     63	__mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
     64	mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
     65
     66	mcip_mask |= BIT(cpu);
     67
     68	__mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
     69	/*
     70	 * Parameter specified halt cause:
     71	 * STATUS32[H]/actionpoint/breakpoint/self-halt
     72	 * We choose all of them (0xF).
     73	 */
     74	__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
     75
     76	raw_spin_unlock_irqrestore(&mcip_lock, flags);
     77}
     78
     79static void mcip_setup_per_cpu(int cpu)
     80{
     81	struct mcip_bcr mp;
     82
     83	READ_BCR(ARC_REG_MCIP_BCR, mp);
     84
     85	smp_ipi_irq_setup(cpu, IPI_IRQ);
     86	smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
     87
     88	/* Update GFRC halt mask as new CPU came online */
     89	if (mp.gfrc)
     90		mcip_update_gfrc_halt_mask(cpu);
     91
     92	/* Update MCIP debug mask as new CPU came online */
     93	if (mp.dbg)
     94		mcip_update_debug_halt_mask(cpu);
     95}
     96
     97static void mcip_ipi_send(int cpu)
     98{
     99	unsigned long flags;
    100	int ipi_was_pending;
    101
    102	/* ARConnect can only send IPI to others */
    103	if (unlikely(cpu == raw_smp_processor_id())) {
    104		arc_softirq_trigger(SOFTIRQ_IRQ);
    105		return;
    106	}
    107
    108	raw_spin_lock_irqsave(&mcip_lock, flags);
    109
    110	/*
    111	 * If receiver already has a pending interrupt, elide sending this one.
    112	 * Linux cross core calling works well with concurrent IPIs
    113	 * coalesced into one
    114	 * see arch/arc/kernel/smp.c: ipi_send_msg_one()
    115	 */
    116	__mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
    117	ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
    118	if (!ipi_was_pending)
    119		__mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
    120
    121	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    122}
    123
    124static void mcip_ipi_clear(int irq)
    125{
    126	unsigned int cpu, c;
    127	unsigned long flags;
    128
    129	if (unlikely(irq == SOFTIRQ_IRQ)) {
    130		arc_softirq_clear(irq);
    131		return;
    132	}
    133
    134	raw_spin_lock_irqsave(&mcip_lock, flags);
    135
    136	/* Who sent the IPI */
    137	__mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
    138
    139	cpu = read_aux_reg(ARC_REG_MCIP_READBACK);	/* 1,2,4,8... */
    140
    141	/*
    142	 * In rare case, multiple concurrent IPIs sent to same target can
    143	 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
    144	 * "vectored" (multiple bits sets) as opposed to typical single bit
    145	 */
    146	do {
    147		c = __ffs(cpu);			/* 0,1,2,3 */
    148		__mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
    149		cpu &= ~(1U << c);
    150	} while (cpu);
    151
    152	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    153}
    154
    155static void mcip_probe_n_setup(void)
    156{
    157	struct mcip_bcr mp;
    158
    159	READ_BCR(ARC_REG_MCIP_BCR, mp);
    160
    161	sprintf(smp_cpuinfo_buf,
    162		"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
    163		mp.ver, mp.num_cores,
    164		IS_AVAIL1(mp.ipi, "IPI "),
    165		IS_AVAIL1(mp.idu, "IDU "),
    166		IS_AVAIL1(mp.dbg, "DEBUG "),
    167		IS_AVAIL1(mp.gfrc, "GFRC"));
    168
    169	cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
    170}
    171
    172struct plat_smp_ops plat_smp_ops = {
    173	.info		= smp_cpuinfo_buf,
    174	.init_early_smp	= mcip_probe_n_setup,
    175	.init_per_cpu	= mcip_setup_per_cpu,
    176	.ipi_send	= mcip_ipi_send,
    177	.ipi_clear	= mcip_ipi_clear,
    178};
    179
    180#endif
    181
    182/***************************************************************************
    183 * ARCv2 Interrupt Distribution Unit (IDU)
    184 *
    185 * Connects external "COMMON" IRQs to core intc, providing:
    186 *  -dynamic routing (IRQ affinity)
    187 *  -load balancing (Round Robin interrupt distribution)
    188 *  -1:N distribution
    189 *
    190 * It physically resides in the MCIP hw block
    191 */
    192
    193#include <linux/irqchip.h>
    194#include <linux/of.h>
    195#include <linux/of_irq.h>
    196
    197/*
    198 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
    199 */
    200static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
    201{
    202	__mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
    203}
    204
    205static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
    206			 bool set_distr, unsigned int distr)
    207{
    208	union {
    209		unsigned int word;
    210		struct {
    211			unsigned int distr:2, pad:2, lvl:1, pad2:27;
    212		};
    213	} data;
    214
    215	data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
    216	if (set_distr)
    217		data.distr = distr;
    218	if (set_lvl)
    219		data.lvl = lvl;
    220	__mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
    221}
    222
    223static void idu_irq_mask_raw(irq_hw_number_t hwirq)
    224{
    225	unsigned long flags;
    226
    227	raw_spin_lock_irqsave(&mcip_lock, flags);
    228	__mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
    229	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    230}
    231
    232static void idu_irq_mask(struct irq_data *data)
    233{
    234	idu_irq_mask_raw(data->hwirq);
    235}
    236
    237static void idu_irq_unmask(struct irq_data *data)
    238{
    239	unsigned long flags;
    240
    241	raw_spin_lock_irqsave(&mcip_lock, flags);
    242	__mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
    243	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    244}
    245
    246static void idu_irq_ack(struct irq_data *data)
    247{
    248	unsigned long flags;
    249
    250	raw_spin_lock_irqsave(&mcip_lock, flags);
    251	__mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
    252	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    253}
    254
    255static void idu_irq_mask_ack(struct irq_data *data)
    256{
    257	unsigned long flags;
    258
    259	raw_spin_lock_irqsave(&mcip_lock, flags);
    260	__mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
    261	__mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
    262	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    263}
    264
    265static int
    266idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
    267		     bool force)
    268{
    269	unsigned long flags;
    270	cpumask_t online;
    271	unsigned int destination_bits;
    272	unsigned int distribution_mode;
    273
    274	/* errout if no online cpu per @cpumask */
    275	if (!cpumask_and(&online, cpumask, cpu_online_mask))
    276		return -EINVAL;
    277
    278	raw_spin_lock_irqsave(&mcip_lock, flags);
    279
    280	destination_bits = cpumask_bits(&online)[0];
    281	idu_set_dest(data->hwirq, destination_bits);
    282
    283	if (ffs(destination_bits) == fls(destination_bits))
    284		distribution_mode = IDU_M_DISTRI_DEST;
    285	else
    286		distribution_mode = IDU_M_DISTRI_RR;
    287
    288	idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
    289
    290	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    291
    292	return IRQ_SET_MASK_OK;
    293}
    294
    295static int idu_irq_set_type(struct irq_data *data, u32 type)
    296{
    297	unsigned long flags;
    298
    299	/*
    300	 * ARCv2 IDU HW does not support inverse polarity, so these are the
    301	 * only interrupt types supported.
    302	 */
    303	if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
    304		return -EINVAL;
    305
    306	raw_spin_lock_irqsave(&mcip_lock, flags);
    307
    308	idu_set_mode(data->hwirq, true,
    309		     type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
    310						   IDU_M_TRIG_LEVEL,
    311		     false, 0);
    312
    313	raw_spin_unlock_irqrestore(&mcip_lock, flags);
    314
    315	return 0;
    316}
    317
    318static void idu_irq_enable(struct irq_data *data)
    319{
    320	/*
    321	 * By default send all common interrupts to all available online CPUs.
    322	 * The affinity of common interrupts in IDU must be set manually since
    323	 * in some cases the kernel will not call irq_set_affinity() by itself:
    324	 *   1. When the kernel is not configured with support of SMP.
    325	 *   2. When the kernel is configured with support of SMP but upper
    326	 *      interrupt controllers does not support setting of the affinity
    327	 *      and cannot propagate it to IDU.
    328	 */
    329	idu_irq_set_affinity(data, cpu_online_mask, false);
    330	idu_irq_unmask(data);
    331}
    332
    333static struct irq_chip idu_irq_chip = {
    334	.name			= "MCIP IDU Intc",
    335	.irq_mask		= idu_irq_mask,
    336	.irq_unmask		= idu_irq_unmask,
    337	.irq_ack		= idu_irq_ack,
    338	.irq_mask_ack		= idu_irq_mask_ack,
    339	.irq_enable		= idu_irq_enable,
    340	.irq_set_type		= idu_irq_set_type,
    341#ifdef CONFIG_SMP
    342	.irq_set_affinity       = idu_irq_set_affinity,
    343#endif
    344
    345};
    346
    347static void idu_cascade_isr(struct irq_desc *desc)
    348{
    349	struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
    350	struct irq_chip *core_chip = irq_desc_get_chip(desc);
    351	irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
    352	irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
    353
    354	chained_irq_enter(core_chip, desc);
    355	generic_handle_domain_irq(idu_domain, idu_hwirq);
    356	chained_irq_exit(core_chip, desc);
    357}
    358
    359static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
    360{
    361	irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
    362	irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
    363
    364	return 0;
    365}
    366
    367static const struct irq_domain_ops idu_irq_ops = {
    368	.xlate	= irq_domain_xlate_onetwocell,
    369	.map	= idu_irq_map,
    370};
    371
    372/*
    373 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
    374 * [24, 23+C]: If C > 0 then "C" common IRQs
    375 * [24+C, N]: Not statically assigned, private-per-core
    376 */
    377
    378
    379static int __init
    380idu_of_init(struct device_node *intc, struct device_node *parent)
    381{
    382	struct irq_domain *domain;
    383	int nr_irqs;
    384	int i, virq;
    385	struct mcip_bcr mp;
    386	struct mcip_idu_bcr idu_bcr;
    387
    388	READ_BCR(ARC_REG_MCIP_BCR, mp);
    389
    390	if (!mp.idu)
    391		panic("IDU not detected, but DeviceTree using it");
    392
    393	READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
    394	nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
    395
    396	pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
    397
    398	domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
    399
    400	/* Parent interrupts (core-intc) are already mapped */
    401
    402	for (i = 0; i < nr_irqs; i++) {
    403		/* Mask all common interrupts by default */
    404		idu_irq_mask_raw(i);
    405
    406		/*
    407		 * Return parent uplink IRQs (towards core intc) 24,25,.....
    408		 * this step has been done before already
    409		 * however we need it to get the parent virq and set IDU handler
    410		 * as first level isr
    411		 */
    412		virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
    413		BUG_ON(!virq);
    414		irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
    415	}
    416
    417	__mcip_cmd(CMD_IDU_ENABLE, 0);
    418
    419	return 0;
    420}
    421IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);