cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq-gic.c (44452B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
      4 *
      5 * Interrupt architecture for the GIC:
      6 *
      7 * o There is one Interrupt Distributor, which receives interrupts
      8 *   from system devices and sends them to the Interrupt Controllers.
      9 *
     10 * o There is one CPU Interface per CPU, which sends interrupts sent
     11 *   by the Distributor, and interrupts generated locally, to the
     12 *   associated CPU. The base address of the CPU interface is usually
     13 *   aliased so that the same address points to different chips depending
     14 *   on the CPU it is accessed from.
     15 *
     16 * Note that IRQs 0-31 are special - they are local to each CPU.
     17 * As such, the enable set/clear, pending set/clear and active bit
     18 * registers are banked per-cpu for these sources.
     19 */
     20#include <linux/init.h>
     21#include <linux/kernel.h>
     22#include <linux/err.h>
     23#include <linux/module.h>
     24#include <linux/list.h>
     25#include <linux/smp.h>
     26#include <linux/cpu.h>
     27#include <linux/cpu_pm.h>
     28#include <linux/cpumask.h>
     29#include <linux/io.h>
     30#include <linux/of.h>
     31#include <linux/of_address.h>
     32#include <linux/of_irq.h>
     33#include <linux/acpi.h>
     34#include <linux/irqdomain.h>
     35#include <linux/interrupt.h>
     36#include <linux/percpu.h>
     37#include <linux/seq_file.h>
     38#include <linux/slab.h>
     39#include <linux/irqchip.h>
     40#include <linux/irqchip/chained_irq.h>
     41#include <linux/irqchip/arm-gic.h>
     42
     43#include <asm/cputype.h>
     44#include <asm/irq.h>
     45#include <asm/exception.h>
     46#include <asm/smp_plat.h>
     47#include <asm/virt.h>
     48
     49#include "irq-gic-common.h"
     50
     51#ifdef CONFIG_ARM64
     52#include <asm/cpufeature.h>
     53
     54static void gic_check_cpu_features(void)
     55{
     56	WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
     57			TAINT_CPU_OUT_OF_SPEC,
     58			"GICv3 system registers enabled, broken firmware!\n");
     59}
     60#else
     61#define gic_check_cpu_features()	do { } while(0)
     62#endif
     63
     64union gic_base {
     65	void __iomem *common_base;
     66	void __percpu * __iomem *percpu_base;
     67};
     68
     69struct gic_chip_data {
     70	union gic_base dist_base;
     71	union gic_base cpu_base;
     72	void __iomem *raw_dist_base;
     73	void __iomem *raw_cpu_base;
     74	u32 percpu_offset;
     75#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
     76	u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
     77	u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
     78	u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
     79	u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
     80	u32 __percpu *saved_ppi_enable;
     81	u32 __percpu *saved_ppi_active;
     82	u32 __percpu *saved_ppi_conf;
     83#endif
     84	struct irq_domain *domain;
     85	unsigned int gic_irqs;
     86};
     87
     88#ifdef CONFIG_BL_SWITCHER
     89
     90static DEFINE_RAW_SPINLOCK(cpu_map_lock);
     91
     92#define gic_lock_irqsave(f)		\
     93	raw_spin_lock_irqsave(&cpu_map_lock, (f))
     94#define gic_unlock_irqrestore(f)	\
     95	raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
     96
     97#define gic_lock()			raw_spin_lock(&cpu_map_lock)
     98#define gic_unlock()			raw_spin_unlock(&cpu_map_lock)
     99
    100#else
    101
    102#define gic_lock_irqsave(f)		do { (void)(f); } while(0)
    103#define gic_unlock_irqrestore(f)	do { (void)(f); } while(0)
    104
    105#define gic_lock()			do { } while(0)
    106#define gic_unlock()			do { } while(0)
    107
    108#endif
    109
    110static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
    111
    112/*
    113 * The GIC mapping of CPU interfaces does not necessarily match
    114 * the logical CPU numbering.  Let's use a mapping as returned
    115 * by the GIC itself.
    116 */
    117#define NR_GIC_CPU_IF 8
    118static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
    119
    120static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
    121
    122static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
    123
    124static struct gic_kvm_info gic_v2_kvm_info __initdata;
    125
    126static DEFINE_PER_CPU(u32, sgi_intid);
    127
    128#ifdef CONFIG_GIC_NON_BANKED
    129static DEFINE_STATIC_KEY_FALSE(frankengic_key);
    130
    131static void enable_frankengic(void)
    132{
    133	static_branch_enable(&frankengic_key);
    134}
    135
    136static inline void __iomem *__get_base(union gic_base *base)
    137{
    138	if (static_branch_unlikely(&frankengic_key))
    139		return raw_cpu_read(*base->percpu_base);
    140
    141	return base->common_base;
    142}
    143
    144#define gic_data_dist_base(d)	__get_base(&(d)->dist_base)
    145#define gic_data_cpu_base(d)	__get_base(&(d)->cpu_base)
    146#else
    147#define gic_data_dist_base(d)	((d)->dist_base.common_base)
    148#define gic_data_cpu_base(d)	((d)->cpu_base.common_base)
    149#define enable_frankengic()	do { } while(0)
    150#endif
    151
    152static inline void __iomem *gic_dist_base(struct irq_data *d)
    153{
    154	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
    155	return gic_data_dist_base(gic_data);
    156}
    157
    158static inline void __iomem *gic_cpu_base(struct irq_data *d)
    159{
    160	struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
    161	return gic_data_cpu_base(gic_data);
    162}
    163
    164static inline unsigned int gic_irq(struct irq_data *d)
    165{
    166	return d->hwirq;
    167}
    168
    169static inline bool cascading_gic_irq(struct irq_data *d)
    170{
    171	void *data = irq_data_get_irq_handler_data(d);
    172
    173	/*
    174	 * If handler_data is set, this is a cascading interrupt, and
    175	 * it cannot possibly be forwarded.
    176	 */
    177	return data != NULL;
    178}
    179
    180/*
    181 * Routines to acknowledge, disable and enable interrupts
    182 */
    183static void gic_poke_irq(struct irq_data *d, u32 offset)
    184{
    185	u32 mask = 1 << (gic_irq(d) % 32);
    186	writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4);
    187}
    188
    189static int gic_peek_irq(struct irq_data *d, u32 offset)
    190{
    191	u32 mask = 1 << (gic_irq(d) % 32);
    192	return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask);
    193}
    194
    195static void gic_mask_irq(struct irq_data *d)
    196{
    197	gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
    198}
    199
    200static void gic_eoimode1_mask_irq(struct irq_data *d)
    201{
    202	gic_mask_irq(d);
    203	/*
    204	 * When masking a forwarded interrupt, make sure it is
    205	 * deactivated as well.
    206	 *
    207	 * This ensures that an interrupt that is getting
    208	 * disabled/masked will not get "stuck", because there is
    209	 * noone to deactivate it (guest is being terminated).
    210	 */
    211	if (irqd_is_forwarded_to_vcpu(d))
    212		gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
    213}
    214
    215static void gic_unmask_irq(struct irq_data *d)
    216{
    217	gic_poke_irq(d, GIC_DIST_ENABLE_SET);
    218}
    219
    220static void gic_eoi_irq(struct irq_data *d)
    221{
    222	u32 hwirq = gic_irq(d);
    223
    224	if (hwirq < 16)
    225		hwirq = this_cpu_read(sgi_intid);
    226
    227	writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
    228}
    229
    230static void gic_eoimode1_eoi_irq(struct irq_data *d)
    231{
    232	u32 hwirq = gic_irq(d);
    233
    234	/* Do not deactivate an IRQ forwarded to a vcpu. */
    235	if (irqd_is_forwarded_to_vcpu(d))
    236		return;
    237
    238	if (hwirq < 16)
    239		hwirq = this_cpu_read(sgi_intid);
    240
    241	writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
    242}
    243
    244static int gic_irq_set_irqchip_state(struct irq_data *d,
    245				     enum irqchip_irq_state which, bool val)
    246{
    247	u32 reg;
    248
    249	switch (which) {
    250	case IRQCHIP_STATE_PENDING:
    251		reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR;
    252		break;
    253
    254	case IRQCHIP_STATE_ACTIVE:
    255		reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR;
    256		break;
    257
    258	case IRQCHIP_STATE_MASKED:
    259		reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET;
    260		break;
    261
    262	default:
    263		return -EINVAL;
    264	}
    265
    266	gic_poke_irq(d, reg);
    267	return 0;
    268}
    269
    270static int gic_irq_get_irqchip_state(struct irq_data *d,
    271				      enum irqchip_irq_state which, bool *val)
    272{
    273	switch (which) {
    274	case IRQCHIP_STATE_PENDING:
    275		*val = gic_peek_irq(d, GIC_DIST_PENDING_SET);
    276		break;
    277
    278	case IRQCHIP_STATE_ACTIVE:
    279		*val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET);
    280		break;
    281
    282	case IRQCHIP_STATE_MASKED:
    283		*val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET);
    284		break;
    285
    286	default:
    287		return -EINVAL;
    288	}
    289
    290	return 0;
    291}
    292
    293static int gic_set_type(struct irq_data *d, unsigned int type)
    294{
    295	void __iomem *base = gic_dist_base(d);
    296	unsigned int gicirq = gic_irq(d);
    297	int ret;
    298
    299	/* Interrupt configuration for SGIs can't be changed */
    300	if (gicirq < 16)
    301		return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
    302
    303	/* SPIs have restrictions on the supported types */
    304	if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
    305			    type != IRQ_TYPE_EDGE_RISING)
    306		return -EINVAL;
    307
    308	ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL);
    309	if (ret && gicirq < 32) {
    310		/* Misconfigured PPIs are usually not fatal */
    311		pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16);
    312		ret = 0;
    313	}
    314
    315	return ret;
    316}
    317
    318static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
    319{
    320	/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
    321	if (cascading_gic_irq(d) || gic_irq(d) < 16)
    322		return -EINVAL;
    323
    324	if (vcpu)
    325		irqd_set_forwarded_to_vcpu(d);
    326	else
    327		irqd_clr_forwarded_to_vcpu(d);
    328	return 0;
    329}
    330
    331static int gic_retrigger(struct irq_data *data)
    332{
    333	return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
    334}
    335
    336static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
    337{
    338	u32 irqstat, irqnr;
    339	struct gic_chip_data *gic = &gic_data[0];
    340	void __iomem *cpu_base = gic_data_cpu_base(gic);
    341
    342	do {
    343		irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
    344		irqnr = irqstat & GICC_IAR_INT_ID_MASK;
    345
    346		if (unlikely(irqnr >= 1020))
    347			break;
    348
    349		if (static_branch_likely(&supports_deactivate_key))
    350			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
    351		isb();
    352
    353		/*
    354		 * Ensure any shared data written by the CPU sending the IPI
    355		 * is read after we've read the ACK register on the GIC.
    356		 *
    357		 * Pairs with the write barrier in gic_ipi_send_mask
    358		 */
    359		if (irqnr <= 15) {
    360			smp_rmb();
    361
    362			/*
    363			 * The GIC encodes the source CPU in GICC_IAR,
    364			 * leading to the deactivation to fail if not
    365			 * written back as is to GICC_EOI.  Stash the INTID
    366			 * away for gic_eoi_irq() to write back.  This only
    367			 * works because we don't nest SGIs...
    368			 */
    369			this_cpu_write(sgi_intid, irqstat);
    370		}
    371
    372		generic_handle_domain_irq(gic->domain, irqnr);
    373	} while (1);
    374}
    375
    376static void gic_handle_cascade_irq(struct irq_desc *desc)
    377{
    378	struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
    379	struct irq_chip *chip = irq_desc_get_chip(desc);
    380	unsigned int gic_irq;
    381	unsigned long status;
    382	int ret;
    383
    384	chained_irq_enter(chip, desc);
    385
    386	status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
    387
    388	gic_irq = (status & GICC_IAR_INT_ID_MASK);
    389	if (gic_irq == GICC_INT_SPURIOUS)
    390		goto out;
    391
    392	isb();
    393	ret = generic_handle_domain_irq(chip_data->domain, gic_irq);
    394	if (unlikely(ret))
    395		handle_bad_irq(desc);
    396 out:
    397	chained_irq_exit(chip, desc);
    398}
    399
    400static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
    401{
    402	struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
    403
    404	if (gic->domain->dev)
    405		seq_printf(p, gic->domain->dev->of_node->name);
    406	else
    407		seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
    408}
    409
    410void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
    411{
    412	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
    413	irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
    414					 &gic_data[gic_nr]);
    415}
    416
    417static u8 gic_get_cpumask(struct gic_chip_data *gic)
    418{
    419	void __iomem *base = gic_data_dist_base(gic);
    420	u32 mask, i;
    421
    422	for (i = mask = 0; i < 32; i += 4) {
    423		mask = readl_relaxed(base + GIC_DIST_TARGET + i);
    424		mask |= mask >> 16;
    425		mask |= mask >> 8;
    426		if (mask)
    427			break;
    428	}
    429
    430	if (!mask && num_possible_cpus() > 1)
    431		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
    432
    433	return mask;
    434}
    435
    436static bool gic_check_gicv2(void __iomem *base)
    437{
    438	u32 val = readl_relaxed(base + GIC_CPU_IDENT);
    439	return (val & 0xff0fff) == 0x02043B;
    440}
    441
    442static void gic_cpu_if_up(struct gic_chip_data *gic)
    443{
    444	void __iomem *cpu_base = gic_data_cpu_base(gic);
    445	u32 bypass = 0;
    446	u32 mode = 0;
    447	int i;
    448
    449	if (gic == &gic_data[0] && static_branch_likely(&supports_deactivate_key))
    450		mode = GIC_CPU_CTRL_EOImodeNS;
    451
    452	if (gic_check_gicv2(cpu_base))
    453		for (i = 0; i < 4; i++)
    454			writel_relaxed(0, cpu_base + GIC_CPU_ACTIVEPRIO + i * 4);
    455
    456	/*
    457	* Preserve bypass disable bits to be written back later
    458	*/
    459	bypass = readl(cpu_base + GIC_CPU_CTRL);
    460	bypass &= GICC_DIS_BYPASS_MASK;
    461
    462	writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
    463}
    464
    465
    466static void gic_dist_init(struct gic_chip_data *gic)
    467{
    468	unsigned int i;
    469	u32 cpumask;
    470	unsigned int gic_irqs = gic->gic_irqs;
    471	void __iomem *base = gic_data_dist_base(gic);
    472
    473	writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL);
    474
    475	/*
    476	 * Set all global interrupts to this CPU only.
    477	 */
    478	cpumask = gic_get_cpumask(gic);
    479	cpumask |= cpumask << 8;
    480	cpumask |= cpumask << 16;
    481	for (i = 32; i < gic_irqs; i += 4)
    482		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
    483
    484	gic_dist_config(base, gic_irqs, NULL);
    485
    486	writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
    487}
    488
    489static int gic_cpu_init(struct gic_chip_data *gic)
    490{
    491	void __iomem *dist_base = gic_data_dist_base(gic);
    492	void __iomem *base = gic_data_cpu_base(gic);
    493	unsigned int cpu_mask, cpu = smp_processor_id();
    494	int i;
    495
    496	/*
    497	 * Setting up the CPU map is only relevant for the primary GIC
    498	 * because any nested/secondary GICs do not directly interface
    499	 * with the CPU(s).
    500	 */
    501	if (gic == &gic_data[0]) {
    502		/*
    503		 * Get what the GIC says our CPU mask is.
    504		 */
    505		if (WARN_ON(cpu >= NR_GIC_CPU_IF))
    506			return -EINVAL;
    507
    508		gic_check_cpu_features();
    509		cpu_mask = gic_get_cpumask(gic);
    510		gic_cpu_map[cpu] = cpu_mask;
    511
    512		/*
    513		 * Clear our mask from the other map entries in case they're
    514		 * still undefined.
    515		 */
    516		for (i = 0; i < NR_GIC_CPU_IF; i++)
    517			if (i != cpu)
    518				gic_cpu_map[i] &= ~cpu_mask;
    519	}
    520
    521	gic_cpu_config(dist_base, 32, NULL);
    522
    523	writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
    524	gic_cpu_if_up(gic);
    525
    526	return 0;
    527}
    528
    529int gic_cpu_if_down(unsigned int gic_nr)
    530{
    531	void __iomem *cpu_base;
    532	u32 val = 0;
    533
    534	if (gic_nr >= CONFIG_ARM_GIC_MAX_NR)
    535		return -EINVAL;
    536
    537	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
    538	val = readl(cpu_base + GIC_CPU_CTRL);
    539	val &= ~GICC_ENABLE;
    540	writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
    541
    542	return 0;
    543}
    544
    545#if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM)
    546/*
    547 * Saves the GIC distributor registers during suspend or idle.  Must be called
    548 * with interrupts disabled but before powering down the GIC.  After calling
    549 * this function, no interrupts will be delivered by the GIC, and another
    550 * platform-specific wakeup source must be enabled.
    551 */
    552void gic_dist_save(struct gic_chip_data *gic)
    553{
    554	unsigned int gic_irqs;
    555	void __iomem *dist_base;
    556	int i;
    557
    558	if (WARN_ON(!gic))
    559		return;
    560
    561	gic_irqs = gic->gic_irqs;
    562	dist_base = gic_data_dist_base(gic);
    563
    564	if (!dist_base)
    565		return;
    566
    567	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
    568		gic->saved_spi_conf[i] =
    569			readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
    570
    571	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
    572		gic->saved_spi_target[i] =
    573			readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
    574
    575	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
    576		gic->saved_spi_enable[i] =
    577			readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
    578
    579	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
    580		gic->saved_spi_active[i] =
    581			readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
    582}
    583
    584/*
    585 * Restores the GIC distributor registers during resume or when coming out of
    586 * idle.  Must be called before enabling interrupts.  If a level interrupt
    587 * that occurred while the GIC was suspended is still present, it will be
    588 * handled normally, but any edge interrupts that occurred will not be seen by
    589 * the GIC and need to be handled by the platform-specific wakeup source.
    590 */
    591void gic_dist_restore(struct gic_chip_data *gic)
    592{
    593	unsigned int gic_irqs;
    594	unsigned int i;
    595	void __iomem *dist_base;
    596
    597	if (WARN_ON(!gic))
    598		return;
    599
    600	gic_irqs = gic->gic_irqs;
    601	dist_base = gic_data_dist_base(gic);
    602
    603	if (!dist_base)
    604		return;
    605
    606	writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
    607
    608	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
    609		writel_relaxed(gic->saved_spi_conf[i],
    610			dist_base + GIC_DIST_CONFIG + i * 4);
    611
    612	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
    613		writel_relaxed(GICD_INT_DEF_PRI_X4,
    614			dist_base + GIC_DIST_PRI + i * 4);
    615
    616	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
    617		writel_relaxed(gic->saved_spi_target[i],
    618			dist_base + GIC_DIST_TARGET + i * 4);
    619
    620	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
    621		writel_relaxed(GICD_INT_EN_CLR_X32,
    622			dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
    623		writel_relaxed(gic->saved_spi_enable[i],
    624			dist_base + GIC_DIST_ENABLE_SET + i * 4);
    625	}
    626
    627	for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
    628		writel_relaxed(GICD_INT_EN_CLR_X32,
    629			dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
    630		writel_relaxed(gic->saved_spi_active[i],
    631			dist_base + GIC_DIST_ACTIVE_SET + i * 4);
    632	}
    633
    634	writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
    635}
    636
    637void gic_cpu_save(struct gic_chip_data *gic)
    638{
    639	int i;
    640	u32 *ptr;
    641	void __iomem *dist_base;
    642	void __iomem *cpu_base;
    643
    644	if (WARN_ON(!gic))
    645		return;
    646
    647	dist_base = gic_data_dist_base(gic);
    648	cpu_base = gic_data_cpu_base(gic);
    649
    650	if (!dist_base || !cpu_base)
    651		return;
    652
    653	ptr = raw_cpu_ptr(gic->saved_ppi_enable);
    654	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
    655		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
    656
    657	ptr = raw_cpu_ptr(gic->saved_ppi_active);
    658	for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
    659		ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
    660
    661	ptr = raw_cpu_ptr(gic->saved_ppi_conf);
    662	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
    663		ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
    664
    665}
    666
    667void gic_cpu_restore(struct gic_chip_data *gic)
    668{
    669	int i;
    670	u32 *ptr;
    671	void __iomem *dist_base;
    672	void __iomem *cpu_base;
    673
    674	if (WARN_ON(!gic))
    675		return;
    676
    677	dist_base = gic_data_dist_base(gic);
    678	cpu_base = gic_data_cpu_base(gic);
    679
    680	if (!dist_base || !cpu_base)
    681		return;
    682
    683	ptr = raw_cpu_ptr(gic->saved_ppi_enable);
    684	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
    685		writel_relaxed(GICD_INT_EN_CLR_X32,
    686			       dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
    687		writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
    688	}
    689
    690	ptr = raw_cpu_ptr(gic->saved_ppi_active);
    691	for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
    692		writel_relaxed(GICD_INT_EN_CLR_X32,
    693			       dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
    694		writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
    695	}
    696
    697	ptr = raw_cpu_ptr(gic->saved_ppi_conf);
    698	for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
    699		writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
    700
    701	for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
    702		writel_relaxed(GICD_INT_DEF_PRI_X4,
    703					dist_base + GIC_DIST_PRI + i * 4);
    704
    705	writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
    706	gic_cpu_if_up(gic);
    707}
    708
    709static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
    710{
    711	int i;
    712
    713	for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
    714		switch (cmd) {
    715		case CPU_PM_ENTER:
    716			gic_cpu_save(&gic_data[i]);
    717			break;
    718		case CPU_PM_ENTER_FAILED:
    719		case CPU_PM_EXIT:
    720			gic_cpu_restore(&gic_data[i]);
    721			break;
    722		case CPU_CLUSTER_PM_ENTER:
    723			gic_dist_save(&gic_data[i]);
    724			break;
    725		case CPU_CLUSTER_PM_ENTER_FAILED:
    726		case CPU_CLUSTER_PM_EXIT:
    727			gic_dist_restore(&gic_data[i]);
    728			break;
    729		}
    730	}
    731
    732	return NOTIFY_OK;
    733}
    734
    735static struct notifier_block gic_notifier_block = {
    736	.notifier_call = gic_notifier,
    737};
    738
    739static int gic_pm_init(struct gic_chip_data *gic)
    740{
    741	gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
    742		sizeof(u32));
    743	if (WARN_ON(!gic->saved_ppi_enable))
    744		return -ENOMEM;
    745
    746	gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
    747		sizeof(u32));
    748	if (WARN_ON(!gic->saved_ppi_active))
    749		goto free_ppi_enable;
    750
    751	gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
    752		sizeof(u32));
    753	if (WARN_ON(!gic->saved_ppi_conf))
    754		goto free_ppi_active;
    755
    756	if (gic == &gic_data[0])
    757		cpu_pm_register_notifier(&gic_notifier_block);
    758
    759	return 0;
    760
    761free_ppi_active:
    762	free_percpu(gic->saved_ppi_active);
    763free_ppi_enable:
    764	free_percpu(gic->saved_ppi_enable);
    765
    766	return -ENOMEM;
    767}
    768#else
    769static int gic_pm_init(struct gic_chip_data *gic)
    770{
    771	return 0;
    772}
    773#endif
    774
    775#ifdef CONFIG_SMP
    776static void rmw_writeb(u8 bval, void __iomem *addr)
    777{
    778	static DEFINE_RAW_SPINLOCK(rmw_lock);
    779	unsigned long offset = (unsigned long)addr & 3UL;
    780	unsigned long shift = offset * 8;
    781	unsigned long flags;
    782	u32 val;
    783
    784	raw_spin_lock_irqsave(&rmw_lock, flags);
    785
    786	addr -= offset;
    787	val = readl_relaxed(addr);
    788	val &= ~GENMASK(shift + 7, shift);
    789	val |= bval << shift;
    790	writel_relaxed(val, addr);
    791
    792	raw_spin_unlock_irqrestore(&rmw_lock, flags);
    793}
    794
    795static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
    796			    bool force)
    797{
    798	void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
    799	struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
    800	unsigned int cpu;
    801
    802	if (unlikely(gic != &gic_data[0]))
    803		return -EINVAL;
    804
    805	if (!force)
    806		cpu = cpumask_any_and(mask_val, cpu_online_mask);
    807	else
    808		cpu = cpumask_first(mask_val);
    809
    810	if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
    811		return -EINVAL;
    812
    813	if (static_branch_unlikely(&needs_rmw_access))
    814		rmw_writeb(gic_cpu_map[cpu], reg);
    815	else
    816		writeb_relaxed(gic_cpu_map[cpu], reg);
    817	irq_data_update_effective_affinity(d, cpumask_of(cpu));
    818
    819	return IRQ_SET_MASK_OK_DONE;
    820}
    821
    822static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
    823{
    824	int cpu;
    825	unsigned long flags, map = 0;
    826
    827	if (unlikely(nr_cpu_ids == 1)) {
    828		/* Only one CPU? let's do a self-IPI... */
    829		writel_relaxed(2 << 24 | d->hwirq,
    830			       gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
    831		return;
    832	}
    833
    834	gic_lock_irqsave(flags);
    835
    836	/* Convert our logical CPU mask into a physical one. */
    837	for_each_cpu(cpu, mask)
    838		map |= gic_cpu_map[cpu];
    839
    840	/*
    841	 * Ensure that stores to Normal memory are visible to the
    842	 * other CPUs before they observe us issuing the IPI.
    843	 */
    844	dmb(ishst);
    845
    846	/* this always happens on GIC0 */
    847	writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
    848
    849	gic_unlock_irqrestore(flags);
    850}
    851
    852static int gic_starting_cpu(unsigned int cpu)
    853{
    854	gic_cpu_init(&gic_data[0]);
    855	return 0;
    856}
    857
    858static __init void gic_smp_init(void)
    859{
    860	struct irq_fwspec sgi_fwspec = {
    861		.fwnode		= gic_data[0].domain->fwnode,
    862		.param_count	= 1,
    863	};
    864	int base_sgi;
    865
    866	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
    867				  "irqchip/arm/gic:starting",
    868				  gic_starting_cpu, NULL);
    869
    870	base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
    871					   NUMA_NO_NODE, &sgi_fwspec,
    872					   false, NULL);
    873	if (WARN_ON(base_sgi <= 0))
    874		return;
    875
    876	set_smp_ipi_range(base_sgi, 8);
    877}
    878#else
    879#define gic_smp_init()		do { } while(0)
    880#define gic_set_affinity	NULL
    881#define gic_ipi_send_mask	NULL
    882#endif
    883
    884static const struct irq_chip gic_chip = {
    885	.irq_mask		= gic_mask_irq,
    886	.irq_unmask		= gic_unmask_irq,
    887	.irq_eoi		= gic_eoi_irq,
    888	.irq_set_type		= gic_set_type,
    889	.irq_retrigger          = gic_retrigger,
    890	.irq_set_affinity	= gic_set_affinity,
    891	.ipi_send_mask		= gic_ipi_send_mask,
    892	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
    893	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
    894	.irq_print_chip		= gic_irq_print_chip,
    895	.flags			= IRQCHIP_SET_TYPE_MASKED |
    896				  IRQCHIP_SKIP_SET_WAKE |
    897				  IRQCHIP_MASK_ON_SUSPEND,
    898};
    899
    900static const struct irq_chip gic_chip_mode1 = {
    901	.name			= "GICv2",
    902	.irq_mask		= gic_eoimode1_mask_irq,
    903	.irq_unmask		= gic_unmask_irq,
    904	.irq_eoi		= gic_eoimode1_eoi_irq,
    905	.irq_set_type		= gic_set_type,
    906	.irq_retrigger          = gic_retrigger,
    907	.irq_set_affinity	= gic_set_affinity,
    908	.ipi_send_mask		= gic_ipi_send_mask,
    909	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
    910	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
    911	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
    912	.flags			= IRQCHIP_SET_TYPE_MASKED |
    913				  IRQCHIP_SKIP_SET_WAKE |
    914				  IRQCHIP_MASK_ON_SUSPEND,
    915};
    916
    917#ifdef CONFIG_BL_SWITCHER
    918/*
    919 * gic_send_sgi - send a SGI directly to given CPU interface number
    920 *
    921 * cpu_id: the ID for the destination CPU interface
    922 * irq: the IPI number to send a SGI for
    923 */
    924void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
    925{
    926	BUG_ON(cpu_id >= NR_GIC_CPU_IF);
    927	cpu_id = 1 << cpu_id;
    928	/* this always happens on GIC0 */
    929	writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
    930}
    931
    932/*
    933 * gic_get_cpu_id - get the CPU interface ID for the specified CPU
    934 *
    935 * @cpu: the logical CPU number to get the GIC ID for.
    936 *
    937 * Return the CPU interface ID for the given logical CPU number,
    938 * or -1 if the CPU number is too large or the interface ID is
    939 * unknown (more than one bit set).
    940 */
    941int gic_get_cpu_id(unsigned int cpu)
    942{
    943	unsigned int cpu_bit;
    944
    945	if (cpu >= NR_GIC_CPU_IF)
    946		return -1;
    947	cpu_bit = gic_cpu_map[cpu];
    948	if (cpu_bit & (cpu_bit - 1))
    949		return -1;
    950	return __ffs(cpu_bit);
    951}
    952
    953/*
    954 * gic_migrate_target - migrate IRQs to another CPU interface
    955 *
    956 * @new_cpu_id: the CPU target ID to migrate IRQs to
    957 *
    958 * Migrate all peripheral interrupts with a target matching the current CPU
    959 * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
    960 * is also updated.  Targets to other CPU interfaces are unchanged.
    961 * This must be called with IRQs locally disabled.
    962 */
    963void gic_migrate_target(unsigned int new_cpu_id)
    964{
    965	unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
    966	void __iomem *dist_base;
    967	int i, ror_val, cpu = smp_processor_id();
    968	u32 val, cur_target_mask, active_mask;
    969
    970	BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
    971
    972	dist_base = gic_data_dist_base(&gic_data[gic_nr]);
    973	if (!dist_base)
    974		return;
    975	gic_irqs = gic_data[gic_nr].gic_irqs;
    976
    977	cur_cpu_id = __ffs(gic_cpu_map[cpu]);
    978	cur_target_mask = 0x01010101 << cur_cpu_id;
    979	ror_val = (cur_cpu_id - new_cpu_id) & 31;
    980
    981	gic_lock();
    982
    983	/* Update the target interface for this logical CPU */
    984	gic_cpu_map[cpu] = 1 << new_cpu_id;
    985
    986	/*
    987	 * Find all the peripheral interrupts targeting the current
    988	 * CPU interface and migrate them to the new CPU interface.
    989	 * We skip DIST_TARGET 0 to 7 as they are read-only.
    990	 */
    991	for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
    992		val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
    993		active_mask = val & cur_target_mask;
    994		if (active_mask) {
    995			val &= ~active_mask;
    996			val |= ror32(active_mask, ror_val);
    997			writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
    998		}
    999	}
   1000
   1001	gic_unlock();
   1002
   1003	/*
   1004	 * Now let's migrate and clear any potential SGIs that might be
   1005	 * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
   1006	 * is a banked register, we can only forward the SGI using
   1007	 * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
   1008	 * doesn't use that information anyway.
   1009	 *
   1010	 * For the same reason we do not adjust SGI source information
   1011	 * for previously sent SGIs by us to other CPUs either.
   1012	 */
   1013	for (i = 0; i < 16; i += 4) {
   1014		int j;
   1015		val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
   1016		if (!val)
   1017			continue;
   1018		writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
   1019		for (j = i; j < i + 4; j++) {
   1020			if (val & 0xff)
   1021				writel_relaxed((1 << (new_cpu_id + 16)) | j,
   1022						dist_base + GIC_DIST_SOFTINT);
   1023			val >>= 8;
   1024		}
   1025	}
   1026}
   1027
   1028/*
   1029 * gic_get_sgir_physaddr - get the physical address for the SGI register
   1030 *
   1031 * Return the physical address of the SGI register to be used
   1032 * by some early assembly code when the kernel is not yet available.
   1033 */
   1034static unsigned long gic_dist_physaddr;
   1035
   1036unsigned long gic_get_sgir_physaddr(void)
   1037{
   1038	if (!gic_dist_physaddr)
   1039		return 0;
   1040	return gic_dist_physaddr + GIC_DIST_SOFTINT;
   1041}
   1042
   1043static void __init gic_init_physaddr(struct device_node *node)
   1044{
   1045	struct resource res;
   1046	if (of_address_to_resource(node, 0, &res) == 0) {
   1047		gic_dist_physaddr = res.start;
   1048		pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
   1049	}
   1050}
   1051
   1052#else
   1053#define gic_init_physaddr(node)  do { } while (0)
   1054#endif
   1055
   1056static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
   1057				irq_hw_number_t hw)
   1058{
   1059	struct gic_chip_data *gic = d->host_data;
   1060	struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
   1061	const struct irq_chip *chip;
   1062
   1063	chip = (static_branch_likely(&supports_deactivate_key) &&
   1064		gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
   1065
   1066	switch (hw) {
   1067	case 0 ... 31:
   1068		irq_set_percpu_devid(irq);
   1069		irq_domain_set_info(d, irq, hw, chip, d->host_data,
   1070				    handle_percpu_devid_irq, NULL, NULL);
   1071		break;
   1072	default:
   1073		irq_domain_set_info(d, irq, hw, chip, d->host_data,
   1074				    handle_fasteoi_irq, NULL, NULL);
   1075		irq_set_probe(irq);
   1076		irqd_set_single_target(irqd);
   1077		break;
   1078	}
   1079
   1080	/* Prevents SW retriggers which mess up the ACK/EOI ordering */
   1081	irqd_set_handle_enforce_irqctx(irqd);
   1082	return 0;
   1083}
   1084
   1085static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq)
   1086{
   1087}
   1088
   1089static int gic_irq_domain_translate(struct irq_domain *d,
   1090				    struct irq_fwspec *fwspec,
   1091				    unsigned long *hwirq,
   1092				    unsigned int *type)
   1093{
   1094	if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
   1095		*hwirq = fwspec->param[0];
   1096		*type = IRQ_TYPE_EDGE_RISING;
   1097		return 0;
   1098	}
   1099
   1100	if (is_of_node(fwspec->fwnode)) {
   1101		if (fwspec->param_count < 3)
   1102			return -EINVAL;
   1103
   1104		switch (fwspec->param[0]) {
   1105		case 0:			/* SPI */
   1106			*hwirq = fwspec->param[1] + 32;
   1107			break;
   1108		case 1:			/* PPI */
   1109			*hwirq = fwspec->param[1] + 16;
   1110			break;
   1111		default:
   1112			return -EINVAL;
   1113		}
   1114
   1115		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
   1116
   1117		/* Make it clear that broken DTs are... broken */
   1118		WARN(*type == IRQ_TYPE_NONE,
   1119		     "HW irq %ld has invalid type\n", *hwirq);
   1120		return 0;
   1121	}
   1122
   1123	if (is_fwnode_irqchip(fwspec->fwnode)) {
   1124		if(fwspec->param_count != 2)
   1125			return -EINVAL;
   1126
   1127		if (fwspec->param[0] < 16) {
   1128			pr_err(FW_BUG "Illegal GSI%d translation request\n",
   1129			       fwspec->param[0]);
   1130			return -EINVAL;
   1131		}
   1132
   1133		*hwirq = fwspec->param[0];
   1134		*type = fwspec->param[1];
   1135
   1136		WARN(*type == IRQ_TYPE_NONE,
   1137		     "HW irq %ld has invalid type\n", *hwirq);
   1138		return 0;
   1139	}
   1140
   1141	return -EINVAL;
   1142}
   1143
   1144static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
   1145				unsigned int nr_irqs, void *arg)
   1146{
   1147	int i, ret;
   1148	irq_hw_number_t hwirq;
   1149	unsigned int type = IRQ_TYPE_NONE;
   1150	struct irq_fwspec *fwspec = arg;
   1151
   1152	ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
   1153	if (ret)
   1154		return ret;
   1155
   1156	for (i = 0; i < nr_irqs; i++) {
   1157		ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
   1158		if (ret)
   1159			return ret;
   1160	}
   1161
   1162	return 0;
   1163}
   1164
   1165static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = {
   1166	.translate = gic_irq_domain_translate,
   1167	.alloc = gic_irq_domain_alloc,
   1168	.free = irq_domain_free_irqs_top,
   1169};
   1170
   1171static const struct irq_domain_ops gic_irq_domain_ops = {
   1172	.map = gic_irq_domain_map,
   1173	.unmap = gic_irq_domain_unmap,
   1174};
   1175
   1176static int gic_init_bases(struct gic_chip_data *gic,
   1177			  struct fwnode_handle *handle)
   1178{
   1179	int gic_irqs, ret;
   1180
   1181	if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
   1182		/* Frankein-GIC without banked registers... */
   1183		unsigned int cpu;
   1184
   1185		gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
   1186		gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
   1187		if (WARN_ON(!gic->dist_base.percpu_base ||
   1188			    !gic->cpu_base.percpu_base)) {
   1189			ret = -ENOMEM;
   1190			goto error;
   1191		}
   1192
   1193		for_each_possible_cpu(cpu) {
   1194			u32 mpidr = cpu_logical_map(cpu);
   1195			u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
   1196			unsigned long offset = gic->percpu_offset * core_id;
   1197			*per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
   1198				gic->raw_dist_base + offset;
   1199			*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
   1200				gic->raw_cpu_base + offset;
   1201		}
   1202
   1203		enable_frankengic();
   1204	} else {
   1205		/* Normal, sane GIC... */
   1206		WARN(gic->percpu_offset,
   1207		     "GIC_NON_BANKED not enabled, ignoring %08x offset!",
   1208		     gic->percpu_offset);
   1209		gic->dist_base.common_base = gic->raw_dist_base;
   1210		gic->cpu_base.common_base = gic->raw_cpu_base;
   1211	}
   1212
   1213	/*
   1214	 * Find out how many interrupts are supported.
   1215	 * The GIC only supports up to 1020 interrupt sources.
   1216	 */
   1217	gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
   1218	gic_irqs = (gic_irqs + 1) * 32;
   1219	if (gic_irqs > 1020)
   1220		gic_irqs = 1020;
   1221	gic->gic_irqs = gic_irqs;
   1222
   1223	if (handle) {		/* DT/ACPI */
   1224		gic->domain = irq_domain_create_linear(handle, gic_irqs,
   1225						       &gic_irq_domain_hierarchy_ops,
   1226						       gic);
   1227	} else {		/* Legacy support */
   1228		/*
   1229		 * For primary GICs, skip over SGIs.
   1230		 * No secondary GIC support whatsoever.
   1231		 */
   1232		int irq_base;
   1233
   1234		gic_irqs -= 16; /* calculate # of irqs to allocate */
   1235
   1236		irq_base = irq_alloc_descs(16, 16, gic_irqs,
   1237					   numa_node_id());
   1238		if (irq_base < 0) {
   1239			WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
   1240			irq_base = 16;
   1241		}
   1242
   1243		gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
   1244						    16, &gic_irq_domain_ops, gic);
   1245	}
   1246
   1247	if (WARN_ON(!gic->domain)) {
   1248		ret = -ENODEV;
   1249		goto error;
   1250	}
   1251
   1252	gic_dist_init(gic);
   1253	ret = gic_cpu_init(gic);
   1254	if (ret)
   1255		goto error;
   1256
   1257	ret = gic_pm_init(gic);
   1258	if (ret)
   1259		goto error;
   1260
   1261	return 0;
   1262
   1263error:
   1264	if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
   1265		free_percpu(gic->dist_base.percpu_base);
   1266		free_percpu(gic->cpu_base.percpu_base);
   1267	}
   1268
   1269	return ret;
   1270}
   1271
   1272static int __init __gic_init_bases(struct gic_chip_data *gic,
   1273				   struct fwnode_handle *handle)
   1274{
   1275	int i, ret;
   1276
   1277	if (WARN_ON(!gic || gic->domain))
   1278		return -EINVAL;
   1279
   1280	if (gic == &gic_data[0]) {
   1281		/*
   1282		 * Initialize the CPU interface map to all CPUs.
   1283		 * It will be refined as each CPU probes its ID.
   1284		 * This is only necessary for the primary GIC.
   1285		 */
   1286		for (i = 0; i < NR_GIC_CPU_IF; i++)
   1287			gic_cpu_map[i] = 0xff;
   1288
   1289		set_handle_irq(gic_handle_irq);
   1290		if (static_branch_likely(&supports_deactivate_key))
   1291			pr_info("GIC: Using split EOI/Deactivate mode\n");
   1292	}
   1293
   1294	ret = gic_init_bases(gic, handle);
   1295	if (gic == &gic_data[0])
   1296		gic_smp_init();
   1297
   1298	return ret;
   1299}
   1300
   1301void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
   1302{
   1303	struct gic_chip_data *gic;
   1304
   1305	/*
   1306	 * Non-DT/ACPI systems won't run a hypervisor, so let's not
   1307	 * bother with these...
   1308	 */
   1309	static_branch_disable(&supports_deactivate_key);
   1310
   1311	gic = &gic_data[0];
   1312	gic->raw_dist_base = dist_base;
   1313	gic->raw_cpu_base = cpu_base;
   1314
   1315	__gic_init_bases(gic, NULL);
   1316}
   1317
   1318static void gic_teardown(struct gic_chip_data *gic)
   1319{
   1320	if (WARN_ON(!gic))
   1321		return;
   1322
   1323	if (gic->raw_dist_base)
   1324		iounmap(gic->raw_dist_base);
   1325	if (gic->raw_cpu_base)
   1326		iounmap(gic->raw_cpu_base);
   1327}
   1328
   1329#ifdef CONFIG_OF
   1330static int gic_cnt __initdata;
   1331static bool gicv2_force_probe;
   1332
   1333static int __init gicv2_force_probe_cfg(char *buf)
   1334{
   1335	return strtobool(buf, &gicv2_force_probe);
   1336}
   1337early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
   1338
   1339static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
   1340{
   1341	struct resource cpuif_res;
   1342
   1343	of_address_to_resource(node, 1, &cpuif_res);
   1344
   1345	if (!is_hyp_mode_available())
   1346		return false;
   1347	if (resource_size(&cpuif_res) < SZ_8K) {
   1348		void __iomem *alt;
   1349		/*
   1350		 * Check for a stupid firmware that only exposes the
   1351		 * first page of a GICv2.
   1352		 */
   1353		if (!gic_check_gicv2(*base))
   1354			return false;
   1355
   1356		if (!gicv2_force_probe) {
   1357			pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
   1358			return false;
   1359		}
   1360
   1361		alt = ioremap(cpuif_res.start, SZ_8K);
   1362		if (!alt)
   1363			return false;
   1364		if (!gic_check_gicv2(alt + SZ_4K)) {
   1365			/*
   1366			 * The first page was that of a GICv2, and
   1367			 * the second was *something*. Let's trust it
   1368			 * to be a GICv2, and update the mapping.
   1369			 */
   1370			pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
   1371				&cpuif_res.start);
   1372			iounmap(*base);
   1373			*base = alt;
   1374			return true;
   1375		}
   1376
   1377		/*
   1378		 * We detected *two* initial GICv2 pages in a
   1379		 * row. Could be a GICv2 aliased over two 64kB
   1380		 * pages. Update the resource, map the iospace, and
   1381		 * pray.
   1382		 */
   1383		iounmap(alt);
   1384		alt = ioremap(cpuif_res.start, SZ_128K);
   1385		if (!alt)
   1386			return false;
   1387		pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
   1388			&cpuif_res.start);
   1389		cpuif_res.end = cpuif_res.start + SZ_128K -1;
   1390		iounmap(*base);
   1391		*base = alt;
   1392	}
   1393	if (resource_size(&cpuif_res) == SZ_128K) {
   1394		/*
   1395		 * Verify that we have the first 4kB of a GICv2
   1396		 * aliased over the first 64kB by checking the
   1397		 * GICC_IIDR register on both ends.
   1398		 */
   1399		if (!gic_check_gicv2(*base) ||
   1400		    !gic_check_gicv2(*base + 0xf000))
   1401			return false;
   1402
   1403		/*
   1404		 * Move the base up by 60kB, so that we have a 8kB
   1405		 * contiguous region, which allows us to use GICC_DIR
   1406		 * at its normal offset. Please pass me that bucket.
   1407		 */
   1408		*base += 0xf000;
   1409		cpuif_res.start += 0xf000;
   1410		pr_warn("GIC: Adjusting CPU interface base to %pa\n",
   1411			&cpuif_res.start);
   1412	}
   1413
   1414	return true;
   1415}
   1416
   1417static bool gic_enable_rmw_access(void *data)
   1418{
   1419	/*
   1420	 * The EMEV2 class of machines has a broken interconnect, and
   1421	 * locks up on accesses that are less than 32bit. So far, only
   1422	 * the affinity setting requires it.
   1423	 */
   1424	if (of_machine_is_compatible("renesas,emev2")) {
   1425		static_branch_enable(&needs_rmw_access);
   1426		return true;
   1427	}
   1428
   1429	return false;
   1430}
   1431
   1432static const struct gic_quirk gic_quirks[] = {
   1433	{
   1434		.desc		= "broken byte access",
   1435		.compatible	= "arm,pl390",
   1436		.init		= gic_enable_rmw_access,
   1437	},
   1438	{ },
   1439};
   1440
   1441static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
   1442{
   1443	if (!gic || !node)
   1444		return -EINVAL;
   1445
   1446	gic->raw_dist_base = of_iomap(node, 0);
   1447	if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
   1448		goto error;
   1449
   1450	gic->raw_cpu_base = of_iomap(node, 1);
   1451	if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
   1452		goto error;
   1453
   1454	if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
   1455		gic->percpu_offset = 0;
   1456
   1457	gic_enable_of_quirks(node, gic_quirks, gic);
   1458
   1459	return 0;
   1460
   1461error:
   1462	gic_teardown(gic);
   1463
   1464	return -ENOMEM;
   1465}
   1466
   1467int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
   1468{
   1469	int ret;
   1470
   1471	if (!dev || !dev->of_node || !gic || !irq)
   1472		return -EINVAL;
   1473
   1474	*gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL);
   1475	if (!*gic)
   1476		return -ENOMEM;
   1477
   1478	ret = gic_of_setup(*gic, dev->of_node);
   1479	if (ret)
   1480		return ret;
   1481
   1482	ret = gic_init_bases(*gic, &dev->of_node->fwnode);
   1483	if (ret) {
   1484		gic_teardown(*gic);
   1485		return ret;
   1486	}
   1487
   1488	irq_domain_set_pm_device((*gic)->domain, dev);
   1489	irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
   1490
   1491	return 0;
   1492}
   1493
   1494static void __init gic_of_setup_kvm_info(struct device_node *node)
   1495{
   1496	int ret;
   1497	struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
   1498	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
   1499
   1500	gic_v2_kvm_info.type = GIC_V2;
   1501
   1502	gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
   1503	if (!gic_v2_kvm_info.maint_irq)
   1504		return;
   1505
   1506	ret = of_address_to_resource(node, 2, vctrl_res);
   1507	if (ret)
   1508		return;
   1509
   1510	ret = of_address_to_resource(node, 3, vcpu_res);
   1511	if (ret)
   1512		return;
   1513
   1514	if (static_branch_likely(&supports_deactivate_key))
   1515		vgic_set_kvm_info(&gic_v2_kvm_info);
   1516}
   1517
   1518int __init
   1519gic_of_init(struct device_node *node, struct device_node *parent)
   1520{
   1521	struct gic_chip_data *gic;
   1522	int irq, ret;
   1523
   1524	if (WARN_ON(!node))
   1525		return -ENODEV;
   1526
   1527	if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
   1528		return -EINVAL;
   1529
   1530	gic = &gic_data[gic_cnt];
   1531
   1532	ret = gic_of_setup(gic, node);
   1533	if (ret)
   1534		return ret;
   1535
   1536	/*
   1537	 * Disable split EOI/Deactivate if either HYP is not available
   1538	 * or the CPU interface is too small.
   1539	 */
   1540	if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
   1541		static_branch_disable(&supports_deactivate_key);
   1542
   1543	ret = __gic_init_bases(gic, &node->fwnode);
   1544	if (ret) {
   1545		gic_teardown(gic);
   1546		return ret;
   1547	}
   1548
   1549	if (!gic_cnt) {
   1550		gic_init_physaddr(node);
   1551		gic_of_setup_kvm_info(node);
   1552	}
   1553
   1554	if (parent) {
   1555		irq = irq_of_parse_and_map(node, 0);
   1556		gic_cascade_irq(gic_cnt, irq);
   1557	}
   1558
   1559	if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
   1560		gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain);
   1561
   1562	gic_cnt++;
   1563	return 0;
   1564}
   1565IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
   1566IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init);
   1567IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init);
   1568IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
   1569IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
   1570IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
   1571IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
   1572IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
   1573IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init);
   1574#else
   1575int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
   1576{
   1577	return -ENOTSUPP;
   1578}
   1579#endif
   1580
   1581#ifdef CONFIG_ACPI
   1582static struct
   1583{
   1584	phys_addr_t cpu_phys_base;
   1585	u32 maint_irq;
   1586	int maint_irq_mode;
   1587	phys_addr_t vctrl_base;
   1588	phys_addr_t vcpu_base;
   1589} acpi_data __initdata;
   1590
   1591static int __init
   1592gic_acpi_parse_madt_cpu(union acpi_subtable_headers *header,
   1593			const unsigned long end)
   1594{
   1595	struct acpi_madt_generic_interrupt *processor;
   1596	phys_addr_t gic_cpu_base;
   1597	static int cpu_base_assigned;
   1598
   1599	processor = (struct acpi_madt_generic_interrupt *)header;
   1600
   1601	if (BAD_MADT_GICC_ENTRY(processor, end))
   1602		return -EINVAL;
   1603
   1604	/*
   1605	 * There is no support for non-banked GICv1/2 register in ACPI spec.
   1606	 * All CPU interface addresses have to be the same.
   1607	 */
   1608	gic_cpu_base = processor->base_address;
   1609	if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base)
   1610		return -EINVAL;
   1611
   1612	acpi_data.cpu_phys_base = gic_cpu_base;
   1613	acpi_data.maint_irq = processor->vgic_interrupt;
   1614	acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
   1615				    ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
   1616	acpi_data.vctrl_base = processor->gich_base_address;
   1617	acpi_data.vcpu_base = processor->gicv_base_address;
   1618
   1619	cpu_base_assigned = 1;
   1620	return 0;
   1621}
   1622
   1623/* The things you have to do to just *count* something... */
   1624static int __init acpi_dummy_func(union acpi_subtable_headers *header,
   1625				  const unsigned long end)
   1626{
   1627	return 0;
   1628}
   1629
   1630static bool __init acpi_gic_redist_is_present(void)
   1631{
   1632	return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
   1633				     acpi_dummy_func, 0) > 0;
   1634}
   1635
   1636static bool __init gic_validate_dist(struct acpi_subtable_header *header,
   1637				     struct acpi_probe_entry *ape)
   1638{
   1639	struct acpi_madt_generic_distributor *dist;
   1640	dist = (struct acpi_madt_generic_distributor *)header;
   1641
   1642	return (dist->version == ape->driver_data &&
   1643		(dist->version != ACPI_MADT_GIC_VERSION_NONE ||
   1644		 !acpi_gic_redist_is_present()));
   1645}
   1646
   1647#define ACPI_GICV2_DIST_MEM_SIZE	(SZ_4K)
   1648#define ACPI_GIC_CPU_IF_MEM_SIZE	(SZ_8K)
   1649#define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
   1650#define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
   1651
   1652static void __init gic_acpi_setup_kvm_info(void)
   1653{
   1654	int irq;
   1655	struct resource *vctrl_res = &gic_v2_kvm_info.vctrl;
   1656	struct resource *vcpu_res = &gic_v2_kvm_info.vcpu;
   1657
   1658	gic_v2_kvm_info.type = GIC_V2;
   1659
   1660	if (!acpi_data.vctrl_base)
   1661		return;
   1662
   1663	vctrl_res->flags = IORESOURCE_MEM;
   1664	vctrl_res->start = acpi_data.vctrl_base;
   1665	vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1;
   1666
   1667	if (!acpi_data.vcpu_base)
   1668		return;
   1669
   1670	vcpu_res->flags = IORESOURCE_MEM;
   1671	vcpu_res->start = acpi_data.vcpu_base;
   1672	vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
   1673
   1674	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
   1675				acpi_data.maint_irq_mode,
   1676				ACPI_ACTIVE_HIGH);
   1677	if (irq <= 0)
   1678		return;
   1679
   1680	gic_v2_kvm_info.maint_irq = irq;
   1681
   1682	vgic_set_kvm_info(&gic_v2_kvm_info);
   1683}
   1684
   1685static int __init gic_v2_acpi_init(union acpi_subtable_headers *header,
   1686				   const unsigned long end)
   1687{
   1688	struct acpi_madt_generic_distributor *dist;
   1689	struct fwnode_handle *domain_handle;
   1690	struct gic_chip_data *gic = &gic_data[0];
   1691	int count, ret;
   1692
   1693	/* Collect CPU base addresses */
   1694	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
   1695				      gic_acpi_parse_madt_cpu, 0);
   1696	if (count <= 0) {
   1697		pr_err("No valid GICC entries exist\n");
   1698		return -EINVAL;
   1699	}
   1700
   1701	gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE);
   1702	if (!gic->raw_cpu_base) {
   1703		pr_err("Unable to map GICC registers\n");
   1704		return -ENOMEM;
   1705	}
   1706
   1707	dist = (struct acpi_madt_generic_distributor *)header;
   1708	gic->raw_dist_base = ioremap(dist->base_address,
   1709				     ACPI_GICV2_DIST_MEM_SIZE);
   1710	if (!gic->raw_dist_base) {
   1711		pr_err("Unable to map GICD registers\n");
   1712		gic_teardown(gic);
   1713		return -ENOMEM;
   1714	}
   1715
   1716	/*
   1717	 * Disable split EOI/Deactivate if HYP is not available. ACPI
   1718	 * guarantees that we'll always have a GICv2, so the CPU
   1719	 * interface will always be the right size.
   1720	 */
   1721	if (!is_hyp_mode_available())
   1722		static_branch_disable(&supports_deactivate_key);
   1723
   1724	/*
   1725	 * Initialize GIC instance zero (no multi-GIC support).
   1726	 */
   1727	domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
   1728	if (!domain_handle) {
   1729		pr_err("Unable to allocate domain handle\n");
   1730		gic_teardown(gic);
   1731		return -ENOMEM;
   1732	}
   1733
   1734	ret = __gic_init_bases(gic, domain_handle);
   1735	if (ret) {
   1736		pr_err("Failed to initialise GIC\n");
   1737		irq_domain_free_fwnode(domain_handle);
   1738		gic_teardown(gic);
   1739		return ret;
   1740	}
   1741
   1742	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
   1743
   1744	if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
   1745		gicv2m_init(NULL, gic_data[0].domain);
   1746
   1747	if (static_branch_likely(&supports_deactivate_key))
   1748		gic_acpi_setup_kvm_info();
   1749
   1750	return 0;
   1751}
   1752IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
   1753		     gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
   1754		     gic_v2_acpi_init);
   1755IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
   1756		     gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
   1757		     gic_v2_acpi_init);
   1758#endif