cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq-gic-v3.c (62718B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
      4 * Author: Marc Zyngier <marc.zyngier@arm.com>
      5 */
      6
      7#define pr_fmt(fmt)	"GICv3: " fmt
      8
      9#include <linux/acpi.h>
     10#include <linux/cpu.h>
     11#include <linux/cpu_pm.h>
     12#include <linux/delay.h>
     13#include <linux/interrupt.h>
     14#include <linux/irqdomain.h>
     15#include <linux/of.h>
     16#include <linux/of_address.h>
     17#include <linux/of_irq.h>
     18#include <linux/percpu.h>
     19#include <linux/refcount.h>
     20#include <linux/slab.h>
     21
     22#include <linux/irqchip.h>
     23#include <linux/irqchip/arm-gic-common.h>
     24#include <linux/irqchip/arm-gic-v3.h>
     25#include <linux/irqchip/irq-partition-percpu.h>
     26
     27#include <asm/cputype.h>
     28#include <asm/exception.h>
     29#include <asm/smp_plat.h>
     30#include <asm/virt.h>
     31
     32#include "irq-gic-common.h"
     33
     34#define GICD_INT_NMI_PRI	(GICD_INT_DEF_PRI & ~0x80)
     35
     36#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996	(1ULL << 0)
     37#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539	(1ULL << 1)
     38
     39#define GIC_IRQ_TYPE_PARTITION	(GIC_IRQ_TYPE_LPI + 1)
     40
     41struct redist_region {
     42	void __iomem		*redist_base;
     43	phys_addr_t		phys_base;
     44	bool			single_redist;
     45};
     46
     47struct gic_chip_data {
     48	struct fwnode_handle	*fwnode;
     49	void __iomem		*dist_base;
     50	struct redist_region	*redist_regions;
     51	struct rdists		rdists;
     52	struct irq_domain	*domain;
     53	u64			redist_stride;
     54	u32			nr_redist_regions;
     55	u64			flags;
     56	bool			has_rss;
     57	unsigned int		ppi_nr;
     58	struct partition_desc	**ppi_descs;
     59};
     60
     61static struct gic_chip_data gic_data __read_mostly;
     62static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
     63
     64#define GIC_ID_NR	(1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
     65#define GIC_LINE_NR	min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
     66#define GIC_ESPI_NR	GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
     67
     68/*
     69 * The behaviours of RPR and PMR registers differ depending on the value of
     70 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
     71 * distributor and redistributors depends on whether security is enabled in the
     72 * GIC.
     73 *
     74 * When security is enabled, non-secure priority values from the (re)distributor
     75 * are presented to the GIC CPUIF as follow:
     76 *     (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
     77 *
     78 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
     79 * EL1 are subject to a similar operation thus matching the priorities presented
     80 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
     81 * these values are unchanged by the GIC.
     82 *
     83 * see GICv3/GICv4 Architecture Specification (IHI0069D):
     84 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
     85 *   priorities.
     86 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
     87 *   interrupt.
     88 */
     89static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
     90
     91/*
     92 * Global static key controlling whether an update to PMR allowing more
     93 * interrupts requires to be propagated to the redistributor (DSB SY).
     94 * And this needs to be exported for modules to be able to enable
     95 * interrupts...
     96 */
     97DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
     98EXPORT_SYMBOL(gic_pmr_sync);
     99
    100DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
    101EXPORT_SYMBOL(gic_nonsecure_priorities);
    102
    103/*
    104 * When the Non-secure world has access to group 0 interrupts (as a
    105 * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
    106 * return the Distributor's view of the interrupt priority.
    107 *
    108 * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
    109 * written by software is moved to the Non-secure range by the Distributor.
    110 *
    111 * If both are true (which is when gic_nonsecure_priorities gets enabled),
    112 * we need to shift down the priority programmed by software to match it
    113 * against the value returned by ICC_RPR_EL1.
    114 */
    115#define GICD_INT_RPR_PRI(priority)					\
    116	({								\
    117		u32 __priority = (priority);				\
    118		if (static_branch_unlikely(&gic_nonsecure_priorities))	\
    119			__priority = 0x80 | (__priority >> 1);		\
    120									\
    121		__priority;						\
    122	})
    123
    124/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
    125static refcount_t *ppi_nmi_refs;
    126
    127static struct gic_kvm_info gic_v3_kvm_info __initdata;
    128static DEFINE_PER_CPU(bool, has_rss);
    129
    130#define MPIDR_RS(mpidr)			(((mpidr) & 0xF0UL) >> 4)
    131#define gic_data_rdist()		(this_cpu_ptr(gic_data.rdists.rdist))
    132#define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
    133#define gic_data_rdist_sgi_base()	(gic_data_rdist_rd_base() + SZ_64K)
    134
    135/* Our default, arbitrary priority value. Linux only uses one anyway. */
    136#define DEFAULT_PMR_VALUE	0xf0
    137
    138enum gic_intid_range {
    139	SGI_RANGE,
    140	PPI_RANGE,
    141	SPI_RANGE,
    142	EPPI_RANGE,
    143	ESPI_RANGE,
    144	LPI_RANGE,
    145	__INVALID_RANGE__
    146};
    147
    148static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
    149{
    150	switch (hwirq) {
    151	case 0 ... 15:
    152		return SGI_RANGE;
    153	case 16 ... 31:
    154		return PPI_RANGE;
    155	case 32 ... 1019:
    156		return SPI_RANGE;
    157	case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
    158		return EPPI_RANGE;
    159	case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
    160		return ESPI_RANGE;
    161	case 8192 ... GENMASK(23, 0):
    162		return LPI_RANGE;
    163	default:
    164		return __INVALID_RANGE__;
    165	}
    166}
    167
    168static enum gic_intid_range get_intid_range(struct irq_data *d)
    169{
    170	return __get_intid_range(d->hwirq);
    171}
    172
    173static inline unsigned int gic_irq(struct irq_data *d)
    174{
    175	return d->hwirq;
    176}
    177
    178static inline bool gic_irq_in_rdist(struct irq_data *d)
    179{
    180	switch (get_intid_range(d)) {
    181	case SGI_RANGE:
    182	case PPI_RANGE:
    183	case EPPI_RANGE:
    184		return true;
    185	default:
    186		return false;
    187	}
    188}
    189
    190static inline void __iomem *gic_dist_base(struct irq_data *d)
    191{
    192	switch (get_intid_range(d)) {
    193	case SGI_RANGE:
    194	case PPI_RANGE:
    195	case EPPI_RANGE:
    196		/* SGI+PPI -> SGI_base for this CPU */
    197		return gic_data_rdist_sgi_base();
    198
    199	case SPI_RANGE:
    200	case ESPI_RANGE:
    201		/* SPI -> dist_base */
    202		return gic_data.dist_base;
    203
    204	default:
    205		return NULL;
    206	}
    207}
    208
    209static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
    210{
    211	u32 count = 1000000;	/* 1s! */
    212
    213	while (readl_relaxed(base + GICD_CTLR) & bit) {
    214		count--;
    215		if (!count) {
    216			pr_err_ratelimited("RWP timeout, gone fishing\n");
    217			return;
    218		}
    219		cpu_relax();
    220		udelay(1);
    221	}
    222}
    223
    224/* Wait for completion of a distributor change */
    225static void gic_dist_wait_for_rwp(void)
    226{
    227	gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
    228}
    229
    230/* Wait for completion of a redistributor change */
    231static void gic_redist_wait_for_rwp(void)
    232{
    233	gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
    234}
    235
    236#ifdef CONFIG_ARM64
    237
    238static u64 __maybe_unused gic_read_iar(void)
    239{
    240	if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
    241		return gic_read_iar_cavium_thunderx();
    242	else
    243		return gic_read_iar_common();
    244}
    245#endif
    246
    247static void gic_enable_redist(bool enable)
    248{
    249	void __iomem *rbase;
    250	u32 count = 1000000;	/* 1s! */
    251	u32 val;
    252
    253	if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
    254		return;
    255
    256	rbase = gic_data_rdist_rd_base();
    257
    258	val = readl_relaxed(rbase + GICR_WAKER);
    259	if (enable)
    260		/* Wake up this CPU redistributor */
    261		val &= ~GICR_WAKER_ProcessorSleep;
    262	else
    263		val |= GICR_WAKER_ProcessorSleep;
    264	writel_relaxed(val, rbase + GICR_WAKER);
    265
    266	if (!enable) {		/* Check that GICR_WAKER is writeable */
    267		val = readl_relaxed(rbase + GICR_WAKER);
    268		if (!(val & GICR_WAKER_ProcessorSleep))
    269			return;	/* No PM support in this redistributor */
    270	}
    271
    272	while (--count) {
    273		val = readl_relaxed(rbase + GICR_WAKER);
    274		if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
    275			break;
    276		cpu_relax();
    277		udelay(1);
    278	}
    279	if (!count)
    280		pr_err_ratelimited("redistributor failed to %s...\n",
    281				   enable ? "wakeup" : "sleep");
    282}
    283
    284/*
    285 * Routines to disable, enable, EOI and route interrupts
    286 */
    287static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
    288{
    289	switch (get_intid_range(d)) {
    290	case SGI_RANGE:
    291	case PPI_RANGE:
    292	case SPI_RANGE:
    293		*index = d->hwirq;
    294		return offset;
    295	case EPPI_RANGE:
    296		/*
    297		 * Contrary to the ESPI range, the EPPI range is contiguous
    298		 * to the PPI range in the registers, so let's adjust the
    299		 * displacement accordingly. Consistency is overrated.
    300		 */
    301		*index = d->hwirq - EPPI_BASE_INTID + 32;
    302		return offset;
    303	case ESPI_RANGE:
    304		*index = d->hwirq - ESPI_BASE_INTID;
    305		switch (offset) {
    306		case GICD_ISENABLER:
    307			return GICD_ISENABLERnE;
    308		case GICD_ICENABLER:
    309			return GICD_ICENABLERnE;
    310		case GICD_ISPENDR:
    311			return GICD_ISPENDRnE;
    312		case GICD_ICPENDR:
    313			return GICD_ICPENDRnE;
    314		case GICD_ISACTIVER:
    315			return GICD_ISACTIVERnE;
    316		case GICD_ICACTIVER:
    317			return GICD_ICACTIVERnE;
    318		case GICD_IPRIORITYR:
    319			return GICD_IPRIORITYRnE;
    320		case GICD_ICFGR:
    321			return GICD_ICFGRnE;
    322		case GICD_IROUTER:
    323			return GICD_IROUTERnE;
    324		default:
    325			break;
    326		}
    327		break;
    328	default:
    329		break;
    330	}
    331
    332	WARN_ON(1);
    333	*index = d->hwirq;
    334	return offset;
    335}
    336
    337static int gic_peek_irq(struct irq_data *d, u32 offset)
    338{
    339	void __iomem *base;
    340	u32 index, mask;
    341
    342	offset = convert_offset_index(d, offset, &index);
    343	mask = 1 << (index % 32);
    344
    345	if (gic_irq_in_rdist(d))
    346		base = gic_data_rdist_sgi_base();
    347	else
    348		base = gic_data.dist_base;
    349
    350	return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
    351}
    352
    353static void gic_poke_irq(struct irq_data *d, u32 offset)
    354{
    355	void __iomem *base;
    356	u32 index, mask;
    357
    358	offset = convert_offset_index(d, offset, &index);
    359	mask = 1 << (index % 32);
    360
    361	if (gic_irq_in_rdist(d))
    362		base = gic_data_rdist_sgi_base();
    363	else
    364		base = gic_data.dist_base;
    365
    366	writel_relaxed(mask, base + offset + (index / 32) * 4);
    367}
    368
    369static void gic_mask_irq(struct irq_data *d)
    370{
    371	gic_poke_irq(d, GICD_ICENABLER);
    372	if (gic_irq_in_rdist(d))
    373		gic_redist_wait_for_rwp();
    374	else
    375		gic_dist_wait_for_rwp();
    376}
    377
    378static void gic_eoimode1_mask_irq(struct irq_data *d)
    379{
    380	gic_mask_irq(d);
    381	/*
    382	 * When masking a forwarded interrupt, make sure it is
    383	 * deactivated as well.
    384	 *
    385	 * This ensures that an interrupt that is getting
    386	 * disabled/masked will not get "stuck", because there is
    387	 * noone to deactivate it (guest is being terminated).
    388	 */
    389	if (irqd_is_forwarded_to_vcpu(d))
    390		gic_poke_irq(d, GICD_ICACTIVER);
    391}
    392
    393static void gic_unmask_irq(struct irq_data *d)
    394{
    395	gic_poke_irq(d, GICD_ISENABLER);
    396}
    397
    398static inline bool gic_supports_nmi(void)
    399{
    400	return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
    401	       static_branch_likely(&supports_pseudo_nmis);
    402}
    403
    404static int gic_irq_set_irqchip_state(struct irq_data *d,
    405				     enum irqchip_irq_state which, bool val)
    406{
    407	u32 reg;
    408
    409	if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
    410		return -EINVAL;
    411
    412	switch (which) {
    413	case IRQCHIP_STATE_PENDING:
    414		reg = val ? GICD_ISPENDR : GICD_ICPENDR;
    415		break;
    416
    417	case IRQCHIP_STATE_ACTIVE:
    418		reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
    419		break;
    420
    421	case IRQCHIP_STATE_MASKED:
    422		if (val) {
    423			gic_mask_irq(d);
    424			return 0;
    425		}
    426		reg = GICD_ISENABLER;
    427		break;
    428
    429	default:
    430		return -EINVAL;
    431	}
    432
    433	gic_poke_irq(d, reg);
    434	return 0;
    435}
    436
    437static int gic_irq_get_irqchip_state(struct irq_data *d,
    438				     enum irqchip_irq_state which, bool *val)
    439{
    440	if (d->hwirq >= 8192) /* PPI/SPI only */
    441		return -EINVAL;
    442
    443	switch (which) {
    444	case IRQCHIP_STATE_PENDING:
    445		*val = gic_peek_irq(d, GICD_ISPENDR);
    446		break;
    447
    448	case IRQCHIP_STATE_ACTIVE:
    449		*val = gic_peek_irq(d, GICD_ISACTIVER);
    450		break;
    451
    452	case IRQCHIP_STATE_MASKED:
    453		*val = !gic_peek_irq(d, GICD_ISENABLER);
    454		break;
    455
    456	default:
    457		return -EINVAL;
    458	}
    459
    460	return 0;
    461}
    462
    463static void gic_irq_set_prio(struct irq_data *d, u8 prio)
    464{
    465	void __iomem *base = gic_dist_base(d);
    466	u32 offset, index;
    467
    468	offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
    469
    470	writeb_relaxed(prio, base + offset + index);
    471}
    472
    473static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
    474{
    475	switch (__get_intid_range(hwirq)) {
    476	case PPI_RANGE:
    477		return hwirq - 16;
    478	case EPPI_RANGE:
    479		return hwirq - EPPI_BASE_INTID + 16;
    480	default:
    481		unreachable();
    482	}
    483}
    484
    485static u32 gic_get_ppi_index(struct irq_data *d)
    486{
    487	return __gic_get_ppi_index(d->hwirq);
    488}
    489
    490static int gic_irq_nmi_setup(struct irq_data *d)
    491{
    492	struct irq_desc *desc = irq_to_desc(d->irq);
    493
    494	if (!gic_supports_nmi())
    495		return -EINVAL;
    496
    497	if (gic_peek_irq(d, GICD_ISENABLER)) {
    498		pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
    499		return -EINVAL;
    500	}
    501
    502	/*
    503	 * A secondary irq_chip should be in charge of LPI request,
    504	 * it should not be possible to get there
    505	 */
    506	if (WARN_ON(gic_irq(d) >= 8192))
    507		return -EINVAL;
    508
    509	/* desc lock should already be held */
    510	if (gic_irq_in_rdist(d)) {
    511		u32 idx = gic_get_ppi_index(d);
    512
    513		/* Setting up PPI as NMI, only switch handler for first NMI */
    514		if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
    515			refcount_set(&ppi_nmi_refs[idx], 1);
    516			desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
    517		}
    518	} else {
    519		desc->handle_irq = handle_fasteoi_nmi;
    520	}
    521
    522	gic_irq_set_prio(d, GICD_INT_NMI_PRI);
    523
    524	return 0;
    525}
    526
    527static void gic_irq_nmi_teardown(struct irq_data *d)
    528{
    529	struct irq_desc *desc = irq_to_desc(d->irq);
    530
    531	if (WARN_ON(!gic_supports_nmi()))
    532		return;
    533
    534	if (gic_peek_irq(d, GICD_ISENABLER)) {
    535		pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
    536		return;
    537	}
    538
    539	/*
    540	 * A secondary irq_chip should be in charge of LPI request,
    541	 * it should not be possible to get there
    542	 */
    543	if (WARN_ON(gic_irq(d) >= 8192))
    544		return;
    545
    546	/* desc lock should already be held */
    547	if (gic_irq_in_rdist(d)) {
    548		u32 idx = gic_get_ppi_index(d);
    549
    550		/* Tearing down NMI, only switch handler for last NMI */
    551		if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
    552			desc->handle_irq = handle_percpu_devid_irq;
    553	} else {
    554		desc->handle_irq = handle_fasteoi_irq;
    555	}
    556
    557	gic_irq_set_prio(d, GICD_INT_DEF_PRI);
    558}
    559
    560static void gic_eoi_irq(struct irq_data *d)
    561{
    562	write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
    563	isb();
    564}
    565
    566static void gic_eoimode1_eoi_irq(struct irq_data *d)
    567{
    568	/*
    569	 * No need to deactivate an LPI, or an interrupt that
    570	 * is is getting forwarded to a vcpu.
    571	 */
    572	if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
    573		return;
    574	gic_write_dir(gic_irq(d));
    575}
    576
    577static int gic_set_type(struct irq_data *d, unsigned int type)
    578{
    579	enum gic_intid_range range;
    580	unsigned int irq = gic_irq(d);
    581	void __iomem *base;
    582	u32 offset, index;
    583	int ret;
    584
    585	range = get_intid_range(d);
    586
    587	/* Interrupt configuration for SGIs can't be changed */
    588	if (range == SGI_RANGE)
    589		return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
    590
    591	/* SPIs have restrictions on the supported types */
    592	if ((range == SPI_RANGE || range == ESPI_RANGE) &&
    593	    type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
    594		return -EINVAL;
    595
    596	if (gic_irq_in_rdist(d))
    597		base = gic_data_rdist_sgi_base();
    598	else
    599		base = gic_data.dist_base;
    600
    601	offset = convert_offset_index(d, GICD_ICFGR, &index);
    602
    603	ret = gic_configure_irq(index, type, base + offset, NULL);
    604	if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
    605		/* Misconfigured PPIs are usually not fatal */
    606		pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
    607		ret = 0;
    608	}
    609
    610	return ret;
    611}
    612
    613static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
    614{
    615	if (get_intid_range(d) == SGI_RANGE)
    616		return -EINVAL;
    617
    618	if (vcpu)
    619		irqd_set_forwarded_to_vcpu(d);
    620	else
    621		irqd_clr_forwarded_to_vcpu(d);
    622	return 0;
    623}
    624
    625static u64 gic_mpidr_to_affinity(unsigned long mpidr)
    626{
    627	u64 aff;
    628
    629	aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
    630	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
    631	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
    632	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
    633
    634	return aff;
    635}
    636
    637static void gic_deactivate_unhandled(u32 irqnr)
    638{
    639	if (static_branch_likely(&supports_deactivate_key)) {
    640		if (irqnr < 8192)
    641			gic_write_dir(irqnr);
    642	} else {
    643		write_gicreg(irqnr, ICC_EOIR1_EL1);
    644		isb();
    645	}
    646}
    647
    648/*
    649 * Follow a read of the IAR with any HW maintenance that needs to happen prior
    650 * to invoking the relevant IRQ handler. We must do two things:
    651 *
    652 * (1) Ensure instruction ordering between a read of IAR and subsequent
    653 *     instructions in the IRQ handler using an ISB.
    654 *
    655 *     It is possible for the IAR to report an IRQ which was signalled *after*
    656 *     the CPU took an IRQ exception as multiple interrupts can race to be
    657 *     recognized by the GIC, earlier interrupts could be withdrawn, and/or
    658 *     later interrupts could be prioritized by the GIC.
    659 *
    660 *     For devices which are tightly coupled to the CPU, such as PMUs, a
    661 *     context synchronization event is necessary to ensure that system
    662 *     register state is not stale, as these may have been indirectly written
    663 *     *after* exception entry.
    664 *
    665 * (2) Deactivate the interrupt when EOI mode 1 is in use.
    666 */
    667static inline void gic_complete_ack(u32 irqnr)
    668{
    669	if (static_branch_likely(&supports_deactivate_key))
    670		write_gicreg(irqnr, ICC_EOIR1_EL1);
    671
    672	isb();
    673}
    674
    675static bool gic_rpr_is_nmi_prio(void)
    676{
    677	if (!gic_supports_nmi())
    678		return false;
    679
    680	return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
    681}
    682
    683static bool gic_irqnr_is_special(u32 irqnr)
    684{
    685	return irqnr >= 1020 && irqnr <= 1023;
    686}
    687
    688static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
    689{
    690	if (gic_irqnr_is_special(irqnr))
    691		return;
    692
    693	gic_complete_ack(irqnr);
    694
    695	if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
    696		WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
    697		gic_deactivate_unhandled(irqnr);
    698	}
    699}
    700
    701static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
    702{
    703	if (gic_irqnr_is_special(irqnr))
    704		return;
    705
    706	gic_complete_ack(irqnr);
    707
    708	if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
    709		WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
    710		gic_deactivate_unhandled(irqnr);
    711	}
    712}
    713
    714/*
    715 * An exception has been taken from a context with IRQs enabled, and this could
    716 * be an IRQ or an NMI.
    717 *
    718 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
    719 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
    720 * after handling any NMI but before handling any IRQ.
    721 *
    722 * The entry code has performed IRQ entry, and if an NMI is detected we must
    723 * perform NMI entry/exit around invoking the handler.
    724 */
    725static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
    726{
    727	bool is_nmi;
    728	u32 irqnr;
    729
    730	irqnr = gic_read_iar();
    731
    732	is_nmi = gic_rpr_is_nmi_prio();
    733
    734	if (is_nmi) {
    735		nmi_enter();
    736		__gic_handle_nmi(irqnr, regs);
    737		nmi_exit();
    738	}
    739
    740	if (gic_prio_masking_enabled()) {
    741		gic_pmr_mask_irqs();
    742		gic_arch_enable_irqs();
    743	}
    744
    745	if (!is_nmi)
    746		__gic_handle_irq(irqnr, regs);
    747}
    748
    749/*
    750 * An exception has been taken from a context with IRQs disabled, which can only
    751 * be an NMI.
    752 *
    753 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
    754 * DAIF.IF (and ICC_PMR_EL1) unchanged.
    755 *
    756 * The entry code has performed NMI entry.
    757 */
    758static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
    759{
    760	u64 pmr;
    761	u32 irqnr;
    762
    763	/*
    764	 * We were in a context with IRQs disabled. However, the
    765	 * entry code has set PMR to a value that allows any
    766	 * interrupt to be acknowledged, and not just NMIs. This can
    767	 * lead to surprising effects if the NMI has been retired in
    768	 * the meantime, and that there is an IRQ pending. The IRQ
    769	 * would then be taken in NMI context, something that nobody
    770	 * wants to debug twice.
    771	 *
    772	 * Until we sort this, drop PMR again to a level that will
    773	 * actually only allow NMIs before reading IAR, and then
    774	 * restore it to what it was.
    775	 */
    776	pmr = gic_read_pmr();
    777	gic_pmr_mask_irqs();
    778	isb();
    779	irqnr = gic_read_iar();
    780	gic_write_pmr(pmr);
    781
    782	__gic_handle_nmi(irqnr, regs);
    783}
    784
    785static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
    786{
    787	if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
    788		__gic_handle_irq_from_irqsoff(regs);
    789	else
    790		__gic_handle_irq_from_irqson(regs);
    791}
    792
    793static u32 gic_get_pribits(void)
    794{
    795	u32 pribits;
    796
    797	pribits = gic_read_ctlr();
    798	pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
    799	pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
    800	pribits++;
    801
    802	return pribits;
    803}
    804
    805static bool gic_has_group0(void)
    806{
    807	u32 val;
    808	u32 old_pmr;
    809
    810	old_pmr = gic_read_pmr();
    811
    812	/*
    813	 * Let's find out if Group0 is under control of EL3 or not by
    814	 * setting the highest possible, non-zero priority in PMR.
    815	 *
    816	 * If SCR_EL3.FIQ is set, the priority gets shifted down in
    817	 * order for the CPU interface to set bit 7, and keep the
    818	 * actual priority in the non-secure range. In the process, it
    819	 * looses the least significant bit and the actual priority
    820	 * becomes 0x80. Reading it back returns 0, indicating that
    821	 * we're don't have access to Group0.
    822	 */
    823	gic_write_pmr(BIT(8 - gic_get_pribits()));
    824	val = gic_read_pmr();
    825
    826	gic_write_pmr(old_pmr);
    827
    828	return val != 0;
    829}
    830
    831static void __init gic_dist_init(void)
    832{
    833	unsigned int i;
    834	u64 affinity;
    835	void __iomem *base = gic_data.dist_base;
    836	u32 val;
    837
    838	/* Disable the distributor */
    839	writel_relaxed(0, base + GICD_CTLR);
    840	gic_dist_wait_for_rwp();
    841
    842	/*
    843	 * Configure SPIs as non-secure Group-1. This will only matter
    844	 * if the GIC only has a single security state. This will not
    845	 * do the right thing if the kernel is running in secure mode,
    846	 * but that's not the intended use case anyway.
    847	 */
    848	for (i = 32; i < GIC_LINE_NR; i += 32)
    849		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
    850
    851	/* Extended SPI range, not handled by the GICv2/GICv3 common code */
    852	for (i = 0; i < GIC_ESPI_NR; i += 32) {
    853		writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
    854		writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
    855	}
    856
    857	for (i = 0; i < GIC_ESPI_NR; i += 32)
    858		writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
    859
    860	for (i = 0; i < GIC_ESPI_NR; i += 16)
    861		writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
    862
    863	for (i = 0; i < GIC_ESPI_NR; i += 4)
    864		writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
    865
    866	/* Now do the common stuff */
    867	gic_dist_config(base, GIC_LINE_NR, NULL);
    868
    869	val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
    870	if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
    871		pr_info("Enabling SGIs without active state\n");
    872		val |= GICD_CTLR_nASSGIreq;
    873	}
    874
    875	/* Enable distributor with ARE, Group1, and wait for it to drain */
    876	writel_relaxed(val, base + GICD_CTLR);
    877	gic_dist_wait_for_rwp();
    878
    879	/*
    880	 * Set all global interrupts to the boot CPU only. ARE must be
    881	 * enabled.
    882	 */
    883	affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
    884	for (i = 32; i < GIC_LINE_NR; i++)
    885		gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
    886
    887	for (i = 0; i < GIC_ESPI_NR; i++)
    888		gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
    889}
    890
    891static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
    892{
    893	int ret = -ENODEV;
    894	int i;
    895
    896	for (i = 0; i < gic_data.nr_redist_regions; i++) {
    897		void __iomem *ptr = gic_data.redist_regions[i].redist_base;
    898		u64 typer;
    899		u32 reg;
    900
    901		reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
    902		if (reg != GIC_PIDR2_ARCH_GICv3 &&
    903		    reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
    904			pr_warn("No redistributor present @%p\n", ptr);
    905			break;
    906		}
    907
    908		do {
    909			typer = gic_read_typer(ptr + GICR_TYPER);
    910			ret = fn(gic_data.redist_regions + i, ptr);
    911			if (!ret)
    912				return 0;
    913
    914			if (gic_data.redist_regions[i].single_redist)
    915				break;
    916
    917			if (gic_data.redist_stride) {
    918				ptr += gic_data.redist_stride;
    919			} else {
    920				ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
    921				if (typer & GICR_TYPER_VLPIS)
    922					ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
    923			}
    924		} while (!(typer & GICR_TYPER_LAST));
    925	}
    926
    927	return ret ? -ENODEV : 0;
    928}
    929
    930static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
    931{
    932	unsigned long mpidr = cpu_logical_map(smp_processor_id());
    933	u64 typer;
    934	u32 aff;
    935
    936	/*
    937	 * Convert affinity to a 32bit value that can be matched to
    938	 * GICR_TYPER bits [63:32].
    939	 */
    940	aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
    941	       MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
    942	       MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
    943	       MPIDR_AFFINITY_LEVEL(mpidr, 0));
    944
    945	typer = gic_read_typer(ptr + GICR_TYPER);
    946	if ((typer >> 32) == aff) {
    947		u64 offset = ptr - region->redist_base;
    948		raw_spin_lock_init(&gic_data_rdist()->rd_lock);
    949		gic_data_rdist_rd_base() = ptr;
    950		gic_data_rdist()->phys_base = region->phys_base + offset;
    951
    952		pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
    953			smp_processor_id(), mpidr,
    954			(int)(region - gic_data.redist_regions),
    955			&gic_data_rdist()->phys_base);
    956		return 0;
    957	}
    958
    959	/* Try next one */
    960	return 1;
    961}
    962
    963static int gic_populate_rdist(void)
    964{
    965	if (gic_iterate_rdists(__gic_populate_rdist) == 0)
    966		return 0;
    967
    968	/* We couldn't even deal with ourselves... */
    969	WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
    970	     smp_processor_id(),
    971	     (unsigned long)cpu_logical_map(smp_processor_id()));
    972	return -ENODEV;
    973}
    974
    975static int __gic_update_rdist_properties(struct redist_region *region,
    976					 void __iomem *ptr)
    977{
    978	u64 typer = gic_read_typer(ptr + GICR_TYPER);
    979	u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
    980
    981	/* Boot-time cleanip */
    982	if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
    983		u64 val;
    984
    985		/* Deactivate any present vPE */
    986		val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
    987		if (val & GICR_VPENDBASER_Valid)
    988			gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
    989					      ptr + SZ_128K + GICR_VPENDBASER);
    990
    991		/* Mark the VPE table as invalid */
    992		val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
    993		val &= ~GICR_VPROPBASER_4_1_VALID;
    994		gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
    995	}
    996
    997	gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
    998
    999	/*
   1000	 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
   1001	 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
   1002	 * that the ITS driver can make use of for LPIs (and not VLPIs).
   1003	 *
   1004	 * These are 3 different ways to express the same thing, depending
   1005	 * on the revision of the architecture and its relaxations over
   1006	 * time. Just group them under the 'direct_lpi' banner.
   1007	 */
   1008	gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
   1009	gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
   1010					   !!(ctlr & GICR_CTLR_IR) |
   1011					   gic_data.rdists.has_rvpeid);
   1012	gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
   1013
   1014	/* Detect non-sensical configurations */
   1015	if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
   1016		gic_data.rdists.has_direct_lpi = false;
   1017		gic_data.rdists.has_vlpis = false;
   1018		gic_data.rdists.has_rvpeid = false;
   1019	}
   1020
   1021	gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
   1022
   1023	return 1;
   1024}
   1025
   1026static void gic_update_rdist_properties(void)
   1027{
   1028	gic_data.ppi_nr = UINT_MAX;
   1029	gic_iterate_rdists(__gic_update_rdist_properties);
   1030	if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
   1031		gic_data.ppi_nr = 0;
   1032	pr_info("GICv3 features: %d PPIs%s%s\n",
   1033		gic_data.ppi_nr,
   1034		gic_data.has_rss ? ", RSS" : "",
   1035		gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
   1036
   1037	if (gic_data.rdists.has_vlpis)
   1038		pr_info("GICv4 features: %s%s%s\n",
   1039			gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
   1040			gic_data.rdists.has_rvpeid ? "RVPEID " : "",
   1041			gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
   1042}
   1043
   1044/* Check whether it's single security state view */
   1045static inline bool gic_dist_security_disabled(void)
   1046{
   1047	return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
   1048}
   1049
   1050static void gic_cpu_sys_reg_init(void)
   1051{
   1052	int i, cpu = smp_processor_id();
   1053	u64 mpidr = cpu_logical_map(cpu);
   1054	u64 need_rss = MPIDR_RS(mpidr);
   1055	bool group0;
   1056	u32 pribits;
   1057
   1058	/*
   1059	 * Need to check that the SRE bit has actually been set. If
   1060	 * not, it means that SRE is disabled at EL2. We're going to
   1061	 * die painfully, and there is nothing we can do about it.
   1062	 *
   1063	 * Kindly inform the luser.
   1064	 */
   1065	if (!gic_enable_sre())
   1066		pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
   1067
   1068	pribits = gic_get_pribits();
   1069
   1070	group0 = gic_has_group0();
   1071
   1072	/* Set priority mask register */
   1073	if (!gic_prio_masking_enabled()) {
   1074		write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
   1075	} else if (gic_supports_nmi()) {
   1076		/*
   1077		 * Mismatch configuration with boot CPU, the system is likely
   1078		 * to die as interrupt masking will not work properly on all
   1079		 * CPUs
   1080		 *
   1081		 * The boot CPU calls this function before enabling NMI support,
   1082		 * and as a result we'll never see this warning in the boot path
   1083		 * for that CPU.
   1084		 */
   1085		if (static_branch_unlikely(&gic_nonsecure_priorities))
   1086			WARN_ON(!group0 || gic_dist_security_disabled());
   1087		else
   1088			WARN_ON(group0 && !gic_dist_security_disabled());
   1089	}
   1090
   1091	/*
   1092	 * Some firmwares hand over to the kernel with the BPR changed from
   1093	 * its reset value (and with a value large enough to prevent
   1094	 * any pre-emptive interrupts from working at all). Writing a zero
   1095	 * to BPR restores is reset value.
   1096	 */
   1097	gic_write_bpr1(0);
   1098
   1099	if (static_branch_likely(&supports_deactivate_key)) {
   1100		/* EOI drops priority only (mode 1) */
   1101		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
   1102	} else {
   1103		/* EOI deactivates interrupt too (mode 0) */
   1104		gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
   1105	}
   1106
   1107	/* Always whack Group0 before Group1 */
   1108	if (group0) {
   1109		switch(pribits) {
   1110		case 8:
   1111		case 7:
   1112			write_gicreg(0, ICC_AP0R3_EL1);
   1113			write_gicreg(0, ICC_AP0R2_EL1);
   1114			fallthrough;
   1115		case 6:
   1116			write_gicreg(0, ICC_AP0R1_EL1);
   1117			fallthrough;
   1118		case 5:
   1119		case 4:
   1120			write_gicreg(0, ICC_AP0R0_EL1);
   1121		}
   1122
   1123		isb();
   1124	}
   1125
   1126	switch(pribits) {
   1127	case 8:
   1128	case 7:
   1129		write_gicreg(0, ICC_AP1R3_EL1);
   1130		write_gicreg(0, ICC_AP1R2_EL1);
   1131		fallthrough;
   1132	case 6:
   1133		write_gicreg(0, ICC_AP1R1_EL1);
   1134		fallthrough;
   1135	case 5:
   1136	case 4:
   1137		write_gicreg(0, ICC_AP1R0_EL1);
   1138	}
   1139
   1140	isb();
   1141
   1142	/* ... and let's hit the road... */
   1143	gic_write_grpen1(1);
   1144
   1145	/* Keep the RSS capability status in per_cpu variable */
   1146	per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
   1147
   1148	/* Check all the CPUs have capable of sending SGIs to other CPUs */
   1149	for_each_online_cpu(i) {
   1150		bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
   1151
   1152		need_rss |= MPIDR_RS(cpu_logical_map(i));
   1153		if (need_rss && (!have_rss))
   1154			pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
   1155				cpu, (unsigned long)mpidr,
   1156				i, (unsigned long)cpu_logical_map(i));
   1157	}
   1158
   1159	/**
   1160	 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
   1161	 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
   1162	 * UNPREDICTABLE choice of :
   1163	 *   - The write is ignored.
   1164	 *   - The RS field is treated as 0.
   1165	 */
   1166	if (need_rss && (!gic_data.has_rss))
   1167		pr_crit_once("RSS is required but GICD doesn't support it\n");
   1168}
   1169
   1170static bool gicv3_nolpi;
   1171
   1172static int __init gicv3_nolpi_cfg(char *buf)
   1173{
   1174	return strtobool(buf, &gicv3_nolpi);
   1175}
   1176early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
   1177
   1178static int gic_dist_supports_lpis(void)
   1179{
   1180	return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
   1181		!!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
   1182		!gicv3_nolpi);
   1183}
   1184
   1185static void gic_cpu_init(void)
   1186{
   1187	void __iomem *rbase;
   1188	int i;
   1189
   1190	/* Register ourselves with the rest of the world */
   1191	if (gic_populate_rdist())
   1192		return;
   1193
   1194	gic_enable_redist(true);
   1195
   1196	WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
   1197	     !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
   1198	     "Distributor has extended ranges, but CPU%d doesn't\n",
   1199	     smp_processor_id());
   1200
   1201	rbase = gic_data_rdist_sgi_base();
   1202
   1203	/* Configure SGIs/PPIs as non-secure Group-1 */
   1204	for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
   1205		writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
   1206
   1207	gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
   1208
   1209	/* initialise system registers */
   1210	gic_cpu_sys_reg_init();
   1211}
   1212
   1213#ifdef CONFIG_SMP
   1214
   1215#define MPIDR_TO_SGI_RS(mpidr)	(MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
   1216#define MPIDR_TO_SGI_CLUSTER_ID(mpidr)	((mpidr) & ~0xFUL)
   1217
   1218static int gic_starting_cpu(unsigned int cpu)
   1219{
   1220	gic_cpu_init();
   1221
   1222	if (gic_dist_supports_lpis())
   1223		its_cpu_init();
   1224
   1225	return 0;
   1226}
   1227
   1228static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
   1229				   unsigned long cluster_id)
   1230{
   1231	int next_cpu, cpu = *base_cpu;
   1232	unsigned long mpidr = cpu_logical_map(cpu);
   1233	u16 tlist = 0;
   1234
   1235	while (cpu < nr_cpu_ids) {
   1236		tlist |= 1 << (mpidr & 0xf);
   1237
   1238		next_cpu = cpumask_next(cpu, mask);
   1239		if (next_cpu >= nr_cpu_ids)
   1240			goto out;
   1241		cpu = next_cpu;
   1242
   1243		mpidr = cpu_logical_map(cpu);
   1244
   1245		if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
   1246			cpu--;
   1247			goto out;
   1248		}
   1249	}
   1250out:
   1251	*base_cpu = cpu;
   1252	return tlist;
   1253}
   1254
   1255#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
   1256	(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
   1257		<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
   1258
   1259static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
   1260{
   1261	u64 val;
   1262
   1263	val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3)	|
   1264	       MPIDR_TO_SGI_AFFINITY(cluster_id, 2)	|
   1265	       irq << ICC_SGI1R_SGI_ID_SHIFT		|
   1266	       MPIDR_TO_SGI_AFFINITY(cluster_id, 1)	|
   1267	       MPIDR_TO_SGI_RS(cluster_id)		|
   1268	       tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
   1269
   1270	pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
   1271	gic_write_sgi1r(val);
   1272}
   1273
   1274static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
   1275{
   1276	int cpu;
   1277
   1278	if (WARN_ON(d->hwirq >= 16))
   1279		return;
   1280
   1281	/*
   1282	 * Ensure that stores to Normal memory are visible to the
   1283	 * other CPUs before issuing the IPI.
   1284	 */
   1285	dsb(ishst);
   1286
   1287	for_each_cpu(cpu, mask) {
   1288		u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
   1289		u16 tlist;
   1290
   1291		tlist = gic_compute_target_list(&cpu, mask, cluster_id);
   1292		gic_send_sgi(cluster_id, tlist, d->hwirq);
   1293	}
   1294
   1295	/* Force the above writes to ICC_SGI1R_EL1 to be executed */
   1296	isb();
   1297}
   1298
   1299static void __init gic_smp_init(void)
   1300{
   1301	struct irq_fwspec sgi_fwspec = {
   1302		.fwnode		= gic_data.fwnode,
   1303		.param_count	= 1,
   1304	};
   1305	int base_sgi;
   1306
   1307	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
   1308				  "irqchip/arm/gicv3:starting",
   1309				  gic_starting_cpu, NULL);
   1310
   1311	/* Register all 8 non-secure SGIs */
   1312	base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
   1313					   NUMA_NO_NODE, &sgi_fwspec,
   1314					   false, NULL);
   1315	if (WARN_ON(base_sgi <= 0))
   1316		return;
   1317
   1318	set_smp_ipi_range(base_sgi, 8);
   1319}
   1320
   1321static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
   1322			    bool force)
   1323{
   1324	unsigned int cpu;
   1325	u32 offset, index;
   1326	void __iomem *reg;
   1327	int enabled;
   1328	u64 val;
   1329
   1330	if (force)
   1331		cpu = cpumask_first(mask_val);
   1332	else
   1333		cpu = cpumask_any_and(mask_val, cpu_online_mask);
   1334
   1335	if (cpu >= nr_cpu_ids)
   1336		return -EINVAL;
   1337
   1338	if (gic_irq_in_rdist(d))
   1339		return -EINVAL;
   1340
   1341	/* If interrupt was enabled, disable it first */
   1342	enabled = gic_peek_irq(d, GICD_ISENABLER);
   1343	if (enabled)
   1344		gic_mask_irq(d);
   1345
   1346	offset = convert_offset_index(d, GICD_IROUTER, &index);
   1347	reg = gic_dist_base(d) + offset + (index * 8);
   1348	val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
   1349
   1350	gic_write_irouter(val, reg);
   1351
   1352	/*
   1353	 * If the interrupt was enabled, enabled it again. Otherwise,
   1354	 * just wait for the distributor to have digested our changes.
   1355	 */
   1356	if (enabled)
   1357		gic_unmask_irq(d);
   1358
   1359	irq_data_update_effective_affinity(d, cpumask_of(cpu));
   1360
   1361	return IRQ_SET_MASK_OK_DONE;
   1362}
   1363#else
   1364#define gic_set_affinity	NULL
   1365#define gic_ipi_send_mask	NULL
   1366#define gic_smp_init()		do { } while(0)
   1367#endif
   1368
   1369static int gic_retrigger(struct irq_data *data)
   1370{
   1371	return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
   1372}
   1373
   1374#ifdef CONFIG_CPU_PM
   1375static int gic_cpu_pm_notifier(struct notifier_block *self,
   1376			       unsigned long cmd, void *v)
   1377{
   1378	if (cmd == CPU_PM_EXIT) {
   1379		if (gic_dist_security_disabled())
   1380			gic_enable_redist(true);
   1381		gic_cpu_sys_reg_init();
   1382	} else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
   1383		gic_write_grpen1(0);
   1384		gic_enable_redist(false);
   1385	}
   1386	return NOTIFY_OK;
   1387}
   1388
   1389static struct notifier_block gic_cpu_pm_notifier_block = {
   1390	.notifier_call = gic_cpu_pm_notifier,
   1391};
   1392
   1393static void gic_cpu_pm_init(void)
   1394{
   1395	cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
   1396}
   1397
   1398#else
   1399static inline void gic_cpu_pm_init(void) { }
   1400#endif /* CONFIG_CPU_PM */
   1401
   1402static struct irq_chip gic_chip = {
   1403	.name			= "GICv3",
   1404	.irq_mask		= gic_mask_irq,
   1405	.irq_unmask		= gic_unmask_irq,
   1406	.irq_eoi		= gic_eoi_irq,
   1407	.irq_set_type		= gic_set_type,
   1408	.irq_set_affinity	= gic_set_affinity,
   1409	.irq_retrigger          = gic_retrigger,
   1410	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
   1411	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
   1412	.irq_nmi_setup		= gic_irq_nmi_setup,
   1413	.irq_nmi_teardown	= gic_irq_nmi_teardown,
   1414	.ipi_send_mask		= gic_ipi_send_mask,
   1415	.flags			= IRQCHIP_SET_TYPE_MASKED |
   1416				  IRQCHIP_SKIP_SET_WAKE |
   1417				  IRQCHIP_MASK_ON_SUSPEND,
   1418};
   1419
   1420static struct irq_chip gic_eoimode1_chip = {
   1421	.name			= "GICv3",
   1422	.irq_mask		= gic_eoimode1_mask_irq,
   1423	.irq_unmask		= gic_unmask_irq,
   1424	.irq_eoi		= gic_eoimode1_eoi_irq,
   1425	.irq_set_type		= gic_set_type,
   1426	.irq_set_affinity	= gic_set_affinity,
   1427	.irq_retrigger          = gic_retrigger,
   1428	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
   1429	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
   1430	.irq_set_vcpu_affinity	= gic_irq_set_vcpu_affinity,
   1431	.irq_nmi_setup		= gic_irq_nmi_setup,
   1432	.irq_nmi_teardown	= gic_irq_nmi_teardown,
   1433	.ipi_send_mask		= gic_ipi_send_mask,
   1434	.flags			= IRQCHIP_SET_TYPE_MASKED |
   1435				  IRQCHIP_SKIP_SET_WAKE |
   1436				  IRQCHIP_MASK_ON_SUSPEND,
   1437};
   1438
   1439static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
   1440			      irq_hw_number_t hw)
   1441{
   1442	struct irq_chip *chip = &gic_chip;
   1443	struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
   1444
   1445	if (static_branch_likely(&supports_deactivate_key))
   1446		chip = &gic_eoimode1_chip;
   1447
   1448	switch (__get_intid_range(hw)) {
   1449	case SGI_RANGE:
   1450	case PPI_RANGE:
   1451	case EPPI_RANGE:
   1452		irq_set_percpu_devid(irq);
   1453		irq_domain_set_info(d, irq, hw, chip, d->host_data,
   1454				    handle_percpu_devid_irq, NULL, NULL);
   1455		break;
   1456
   1457	case SPI_RANGE:
   1458	case ESPI_RANGE:
   1459		irq_domain_set_info(d, irq, hw, chip, d->host_data,
   1460				    handle_fasteoi_irq, NULL, NULL);
   1461		irq_set_probe(irq);
   1462		irqd_set_single_target(irqd);
   1463		break;
   1464
   1465	case LPI_RANGE:
   1466		if (!gic_dist_supports_lpis())
   1467			return -EPERM;
   1468		irq_domain_set_info(d, irq, hw, chip, d->host_data,
   1469				    handle_fasteoi_irq, NULL, NULL);
   1470		break;
   1471
   1472	default:
   1473		return -EPERM;
   1474	}
   1475
   1476	/* Prevents SW retriggers which mess up the ACK/EOI ordering */
   1477	irqd_set_handle_enforce_irqctx(irqd);
   1478	return 0;
   1479}
   1480
   1481static int gic_irq_domain_translate(struct irq_domain *d,
   1482				    struct irq_fwspec *fwspec,
   1483				    unsigned long *hwirq,
   1484				    unsigned int *type)
   1485{
   1486	if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
   1487		*hwirq = fwspec->param[0];
   1488		*type = IRQ_TYPE_EDGE_RISING;
   1489		return 0;
   1490	}
   1491
   1492	if (is_of_node(fwspec->fwnode)) {
   1493		if (fwspec->param_count < 3)
   1494			return -EINVAL;
   1495
   1496		switch (fwspec->param[0]) {
   1497		case 0:			/* SPI */
   1498			*hwirq = fwspec->param[1] + 32;
   1499			break;
   1500		case 1:			/* PPI */
   1501			*hwirq = fwspec->param[1] + 16;
   1502			break;
   1503		case 2:			/* ESPI */
   1504			*hwirq = fwspec->param[1] + ESPI_BASE_INTID;
   1505			break;
   1506		case 3:			/* EPPI */
   1507			*hwirq = fwspec->param[1] + EPPI_BASE_INTID;
   1508			break;
   1509		case GIC_IRQ_TYPE_LPI:	/* LPI */
   1510			*hwirq = fwspec->param[1];
   1511			break;
   1512		case GIC_IRQ_TYPE_PARTITION:
   1513			*hwirq = fwspec->param[1];
   1514			if (fwspec->param[1] >= 16)
   1515				*hwirq += EPPI_BASE_INTID - 16;
   1516			else
   1517				*hwirq += 16;
   1518			break;
   1519		default:
   1520			return -EINVAL;
   1521		}
   1522
   1523		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
   1524
   1525		/*
   1526		 * Make it clear that broken DTs are... broken.
   1527		 * Partitioned PPIs are an unfortunate exception.
   1528		 */
   1529		WARN_ON(*type == IRQ_TYPE_NONE &&
   1530			fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
   1531		return 0;
   1532	}
   1533
   1534	if (is_fwnode_irqchip(fwspec->fwnode)) {
   1535		if(fwspec->param_count != 2)
   1536			return -EINVAL;
   1537
   1538		if (fwspec->param[0] < 16) {
   1539			pr_err(FW_BUG "Illegal GSI%d translation request\n",
   1540			       fwspec->param[0]);
   1541			return -EINVAL;
   1542		}
   1543
   1544		*hwirq = fwspec->param[0];
   1545		*type = fwspec->param[1];
   1546
   1547		WARN_ON(*type == IRQ_TYPE_NONE);
   1548		return 0;
   1549	}
   1550
   1551	return -EINVAL;
   1552}
   1553
   1554static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
   1555				unsigned int nr_irqs, void *arg)
   1556{
   1557	int i, ret;
   1558	irq_hw_number_t hwirq;
   1559	unsigned int type = IRQ_TYPE_NONE;
   1560	struct irq_fwspec *fwspec = arg;
   1561
   1562	ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
   1563	if (ret)
   1564		return ret;
   1565
   1566	for (i = 0; i < nr_irqs; i++) {
   1567		ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
   1568		if (ret)
   1569			return ret;
   1570	}
   1571
   1572	return 0;
   1573}
   1574
   1575static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
   1576				unsigned int nr_irqs)
   1577{
   1578	int i;
   1579
   1580	for (i = 0; i < nr_irqs; i++) {
   1581		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
   1582		irq_set_handler(virq + i, NULL);
   1583		irq_domain_reset_irq_data(d);
   1584	}
   1585}
   1586
   1587static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
   1588				      irq_hw_number_t hwirq)
   1589{
   1590	enum gic_intid_range range;
   1591
   1592	if (!gic_data.ppi_descs)
   1593		return false;
   1594
   1595	if (!is_of_node(fwspec->fwnode))
   1596		return false;
   1597
   1598	if (fwspec->param_count < 4 || !fwspec->param[3])
   1599		return false;
   1600
   1601	range = __get_intid_range(hwirq);
   1602	if (range != PPI_RANGE && range != EPPI_RANGE)
   1603		return false;
   1604
   1605	return true;
   1606}
   1607
   1608static int gic_irq_domain_select(struct irq_domain *d,
   1609				 struct irq_fwspec *fwspec,
   1610				 enum irq_domain_bus_token bus_token)
   1611{
   1612	unsigned int type, ret, ppi_idx;
   1613	irq_hw_number_t hwirq;
   1614
   1615	/* Not for us */
   1616        if (fwspec->fwnode != d->fwnode)
   1617		return 0;
   1618
   1619	/* If this is not DT, then we have a single domain */
   1620	if (!is_of_node(fwspec->fwnode))
   1621		return 1;
   1622
   1623	ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
   1624	if (WARN_ON_ONCE(ret))
   1625		return 0;
   1626
   1627	if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
   1628		return d == gic_data.domain;
   1629
   1630	/*
   1631	 * If this is a PPI and we have a 4th (non-null) parameter,
   1632	 * then we need to match the partition domain.
   1633	 */
   1634	ppi_idx = __gic_get_ppi_index(hwirq);
   1635	return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
   1636}
   1637
   1638static const struct irq_domain_ops gic_irq_domain_ops = {
   1639	.translate = gic_irq_domain_translate,
   1640	.alloc = gic_irq_domain_alloc,
   1641	.free = gic_irq_domain_free,
   1642	.select = gic_irq_domain_select,
   1643};
   1644
   1645static int partition_domain_translate(struct irq_domain *d,
   1646				      struct irq_fwspec *fwspec,
   1647				      unsigned long *hwirq,
   1648				      unsigned int *type)
   1649{
   1650	unsigned long ppi_intid;
   1651	struct device_node *np;
   1652	unsigned int ppi_idx;
   1653	int ret;
   1654
   1655	if (!gic_data.ppi_descs)
   1656		return -ENOMEM;
   1657
   1658	np = of_find_node_by_phandle(fwspec->param[3]);
   1659	if (WARN_ON(!np))
   1660		return -EINVAL;
   1661
   1662	ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
   1663	if (WARN_ON_ONCE(ret))
   1664		return 0;
   1665
   1666	ppi_idx = __gic_get_ppi_index(ppi_intid);
   1667	ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
   1668				     of_node_to_fwnode(np));
   1669	if (ret < 0)
   1670		return ret;
   1671
   1672	*hwirq = ret;
   1673	*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
   1674
   1675	return 0;
   1676}
   1677
   1678static const struct irq_domain_ops partition_domain_ops = {
   1679	.translate = partition_domain_translate,
   1680	.select = gic_irq_domain_select,
   1681};
   1682
   1683static bool gic_enable_quirk_msm8996(void *data)
   1684{
   1685	struct gic_chip_data *d = data;
   1686
   1687	d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
   1688
   1689	return true;
   1690}
   1691
   1692static bool gic_enable_quirk_cavium_38539(void *data)
   1693{
   1694	struct gic_chip_data *d = data;
   1695
   1696	d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
   1697
   1698	return true;
   1699}
   1700
   1701static bool gic_enable_quirk_hip06_07(void *data)
   1702{
   1703	struct gic_chip_data *d = data;
   1704
   1705	/*
   1706	 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
   1707	 * not being an actual ARM implementation). The saving grace is
   1708	 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
   1709	 * HIP07 doesn't even have a proper IIDR, and still pretends to
   1710	 * have ESPI. In both cases, put them right.
   1711	 */
   1712	if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
   1713		/* Zero both ESPI and the RES0 field next to it... */
   1714		d->rdists.gicd_typer &= ~GENMASK(9, 8);
   1715		return true;
   1716	}
   1717
   1718	return false;
   1719}
   1720
   1721static const struct gic_quirk gic_quirks[] = {
   1722	{
   1723		.desc	= "GICv3: Qualcomm MSM8996 broken firmware",
   1724		.compatible = "qcom,msm8996-gic-v3",
   1725		.init	= gic_enable_quirk_msm8996,
   1726	},
   1727	{
   1728		.desc	= "GICv3: HIP06 erratum 161010803",
   1729		.iidr	= 0x0204043b,
   1730		.mask	= 0xffffffff,
   1731		.init	= gic_enable_quirk_hip06_07,
   1732	},
   1733	{
   1734		.desc	= "GICv3: HIP07 erratum 161010803",
   1735		.iidr	= 0x00000000,
   1736		.mask	= 0xffffffff,
   1737		.init	= gic_enable_quirk_hip06_07,
   1738	},
   1739	{
   1740		/*
   1741		 * Reserved register accesses generate a Synchronous
   1742		 * External Abort. This erratum applies to:
   1743		 * - ThunderX: CN88xx
   1744		 * - OCTEON TX: CN83xx, CN81xx
   1745		 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
   1746		 */
   1747		.desc	= "GICv3: Cavium erratum 38539",
   1748		.iidr	= 0xa000034c,
   1749		.mask	= 0xe8f00fff,
   1750		.init	= gic_enable_quirk_cavium_38539,
   1751	},
   1752	{
   1753	}
   1754};
   1755
   1756static void gic_enable_nmi_support(void)
   1757{
   1758	int i;
   1759
   1760	if (!gic_prio_masking_enabled())
   1761		return;
   1762
   1763	ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
   1764	if (!ppi_nmi_refs)
   1765		return;
   1766
   1767	for (i = 0; i < gic_data.ppi_nr; i++)
   1768		refcount_set(&ppi_nmi_refs[i], 0);
   1769
   1770	/*
   1771	 * Linux itself doesn't use 1:N distribution, so has no need to
   1772	 * set PMHE. The only reason to have it set is if EL3 requires it
   1773	 * (and we can't change it).
   1774	 */
   1775	if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
   1776		static_branch_enable(&gic_pmr_sync);
   1777
   1778	pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
   1779		static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
   1780
   1781	/*
   1782	 * How priority values are used by the GIC depends on two things:
   1783	 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
   1784	 * and if Group 0 interrupts can be delivered to Linux in the non-secure
   1785	 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
   1786	 * the ICC_PMR_EL1 register and the priority that software assigns to
   1787	 * interrupts:
   1788	 *
   1789	 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
   1790	 * -----------------------------------------------------------
   1791	 *      1       |      -      |  unchanged  |    unchanged
   1792	 * -----------------------------------------------------------
   1793	 *      0       |      1      |  non-secure |    non-secure
   1794	 * -----------------------------------------------------------
   1795	 *      0       |      0      |  unchanged  |    non-secure
   1796	 *
   1797	 * where non-secure means that the value is right-shifted by one and the
   1798	 * MSB bit set, to make it fit in the non-secure priority range.
   1799	 *
   1800	 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
   1801	 * are both either modified or unchanged, we can use the same set of
   1802	 * priorities.
   1803	 *
   1804	 * In the last case, where only the interrupt priorities are modified to
   1805	 * be in the non-secure range, we use a different PMR value to mask IRQs
   1806	 * and the rest of the values that we use remain unchanged.
   1807	 */
   1808	if (gic_has_group0() && !gic_dist_security_disabled())
   1809		static_branch_enable(&gic_nonsecure_priorities);
   1810
   1811	static_branch_enable(&supports_pseudo_nmis);
   1812
   1813	if (static_branch_likely(&supports_deactivate_key))
   1814		gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
   1815	else
   1816		gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
   1817}
   1818
   1819static int __init gic_init_bases(void __iomem *dist_base,
   1820				 struct redist_region *rdist_regs,
   1821				 u32 nr_redist_regions,
   1822				 u64 redist_stride,
   1823				 struct fwnode_handle *handle)
   1824{
   1825	u32 typer;
   1826	int err;
   1827
   1828	if (!is_hyp_mode_available())
   1829		static_branch_disable(&supports_deactivate_key);
   1830
   1831	if (static_branch_likely(&supports_deactivate_key))
   1832		pr_info("GIC: Using split EOI/Deactivate mode\n");
   1833
   1834	gic_data.fwnode = handle;
   1835	gic_data.dist_base = dist_base;
   1836	gic_data.redist_regions = rdist_regs;
   1837	gic_data.nr_redist_regions = nr_redist_regions;
   1838	gic_data.redist_stride = redist_stride;
   1839
   1840	/*
   1841	 * Find out how many interrupts are supported.
   1842	 */
   1843	typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
   1844	gic_data.rdists.gicd_typer = typer;
   1845
   1846	gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
   1847			  gic_quirks, &gic_data);
   1848
   1849	pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
   1850	pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
   1851
   1852	/*
   1853	 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
   1854	 * architecture spec (which says that reserved registers are RES0).
   1855	 */
   1856	if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
   1857		gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
   1858
   1859	gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
   1860						 &gic_data);
   1861	gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
   1862	gic_data.rdists.has_rvpeid = true;
   1863	gic_data.rdists.has_vlpis = true;
   1864	gic_data.rdists.has_direct_lpi = true;
   1865	gic_data.rdists.has_vpend_valid_dirty = true;
   1866
   1867	if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
   1868		err = -ENOMEM;
   1869		goto out_free;
   1870	}
   1871
   1872	irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
   1873
   1874	gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
   1875
   1876	if (typer & GICD_TYPER_MBIS) {
   1877		err = mbi_init(handle, gic_data.domain);
   1878		if (err)
   1879			pr_err("Failed to initialize MBIs\n");
   1880	}
   1881
   1882	set_handle_irq(gic_handle_irq);
   1883
   1884	gic_update_rdist_properties();
   1885
   1886	gic_dist_init();
   1887	gic_cpu_init();
   1888	gic_smp_init();
   1889	gic_cpu_pm_init();
   1890
   1891	if (gic_dist_supports_lpis()) {
   1892		its_init(handle, &gic_data.rdists, gic_data.domain);
   1893		its_cpu_init();
   1894		its_lpi_memreserve_init();
   1895	} else {
   1896		if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
   1897			gicv2m_init(handle, gic_data.domain);
   1898	}
   1899
   1900	gic_enable_nmi_support();
   1901
   1902	return 0;
   1903
   1904out_free:
   1905	if (gic_data.domain)
   1906		irq_domain_remove(gic_data.domain);
   1907	free_percpu(gic_data.rdists.rdist);
   1908	return err;
   1909}
   1910
   1911static int __init gic_validate_dist_version(void __iomem *dist_base)
   1912{
   1913	u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
   1914
   1915	if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
   1916		return -ENODEV;
   1917
   1918	return 0;
   1919}
   1920
   1921/* Create all possible partitions at boot time */
   1922static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
   1923{
   1924	struct device_node *parts_node, *child_part;
   1925	int part_idx = 0, i;
   1926	int nr_parts;
   1927	struct partition_affinity *parts;
   1928
   1929	parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
   1930	if (!parts_node)
   1931		return;
   1932
   1933	gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
   1934	if (!gic_data.ppi_descs)
   1935		goto out_put_node;
   1936
   1937	nr_parts = of_get_child_count(parts_node);
   1938
   1939	if (!nr_parts)
   1940		goto out_put_node;
   1941
   1942	parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
   1943	if (WARN_ON(!parts))
   1944		goto out_put_node;
   1945
   1946	for_each_child_of_node(parts_node, child_part) {
   1947		struct partition_affinity *part;
   1948		int n;
   1949
   1950		part = &parts[part_idx];
   1951
   1952		part->partition_id = of_node_to_fwnode(child_part);
   1953
   1954		pr_info("GIC: PPI partition %pOFn[%d] { ",
   1955			child_part, part_idx);
   1956
   1957		n = of_property_count_elems_of_size(child_part, "affinity",
   1958						    sizeof(u32));
   1959		WARN_ON(n <= 0);
   1960
   1961		for (i = 0; i < n; i++) {
   1962			int err, cpu;
   1963			u32 cpu_phandle;
   1964			struct device_node *cpu_node;
   1965
   1966			err = of_property_read_u32_index(child_part, "affinity",
   1967							 i, &cpu_phandle);
   1968			if (WARN_ON(err))
   1969				continue;
   1970
   1971			cpu_node = of_find_node_by_phandle(cpu_phandle);
   1972			if (WARN_ON(!cpu_node))
   1973				continue;
   1974
   1975			cpu = of_cpu_node_to_id(cpu_node);
   1976			if (WARN_ON(cpu < 0)) {
   1977				of_node_put(cpu_node);
   1978				continue;
   1979			}
   1980
   1981			pr_cont("%pOF[%d] ", cpu_node, cpu);
   1982
   1983			cpumask_set_cpu(cpu, &part->mask);
   1984			of_node_put(cpu_node);
   1985		}
   1986
   1987		pr_cont("}\n");
   1988		part_idx++;
   1989	}
   1990
   1991	for (i = 0; i < gic_data.ppi_nr; i++) {
   1992		unsigned int irq;
   1993		struct partition_desc *desc;
   1994		struct irq_fwspec ppi_fwspec = {
   1995			.fwnode		= gic_data.fwnode,
   1996			.param_count	= 3,
   1997			.param		= {
   1998				[0]	= GIC_IRQ_TYPE_PARTITION,
   1999				[1]	= i,
   2000				[2]	= IRQ_TYPE_NONE,
   2001			},
   2002		};
   2003
   2004		irq = irq_create_fwspec_mapping(&ppi_fwspec);
   2005		if (WARN_ON(!irq))
   2006			continue;
   2007		desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
   2008					     irq, &partition_domain_ops);
   2009		if (WARN_ON(!desc))
   2010			continue;
   2011
   2012		gic_data.ppi_descs[i] = desc;
   2013	}
   2014
   2015out_put_node:
   2016	of_node_put(parts_node);
   2017}
   2018
   2019static void __init gic_of_setup_kvm_info(struct device_node *node)
   2020{
   2021	int ret;
   2022	struct resource r;
   2023	u32 gicv_idx;
   2024
   2025	gic_v3_kvm_info.type = GIC_V3;
   2026
   2027	gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
   2028	if (!gic_v3_kvm_info.maint_irq)
   2029		return;
   2030
   2031	if (of_property_read_u32(node, "#redistributor-regions",
   2032				 &gicv_idx))
   2033		gicv_idx = 1;
   2034
   2035	gicv_idx += 3;	/* Also skip GICD, GICC, GICH */
   2036	ret = of_address_to_resource(node, gicv_idx, &r);
   2037	if (!ret)
   2038		gic_v3_kvm_info.vcpu = r;
   2039
   2040	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
   2041	gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
   2042	vgic_set_kvm_info(&gic_v3_kvm_info);
   2043}
   2044
   2045static void gic_request_region(resource_size_t base, resource_size_t size,
   2046			       const char *name)
   2047{
   2048	if (!request_mem_region(base, size, name))
   2049		pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
   2050			     name, &base);
   2051}
   2052
   2053static void __iomem *gic_of_iomap(struct device_node *node, int idx,
   2054				  const char *name, struct resource *res)
   2055{
   2056	void __iomem *base;
   2057	int ret;
   2058
   2059	ret = of_address_to_resource(node, idx, res);
   2060	if (ret)
   2061		return IOMEM_ERR_PTR(ret);
   2062
   2063	gic_request_region(res->start, resource_size(res), name);
   2064	base = of_iomap(node, idx);
   2065
   2066	return base ?: IOMEM_ERR_PTR(-ENOMEM);
   2067}
   2068
   2069static int __init gic_of_init(struct device_node *node, struct device_node *parent)
   2070{
   2071	void __iomem *dist_base;
   2072	struct redist_region *rdist_regs;
   2073	struct resource res;
   2074	u64 redist_stride;
   2075	u32 nr_redist_regions;
   2076	int err, i;
   2077
   2078	dist_base = gic_of_iomap(node, 0, "GICD", &res);
   2079	if (IS_ERR(dist_base)) {
   2080		pr_err("%pOF: unable to map gic dist registers\n", node);
   2081		return PTR_ERR(dist_base);
   2082	}
   2083
   2084	err = gic_validate_dist_version(dist_base);
   2085	if (err) {
   2086		pr_err("%pOF: no distributor detected, giving up\n", node);
   2087		goto out_unmap_dist;
   2088	}
   2089
   2090	if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
   2091		nr_redist_regions = 1;
   2092
   2093	rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
   2094			     GFP_KERNEL);
   2095	if (!rdist_regs) {
   2096		err = -ENOMEM;
   2097		goto out_unmap_dist;
   2098	}
   2099
   2100	for (i = 0; i < nr_redist_regions; i++) {
   2101		rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
   2102		if (IS_ERR(rdist_regs[i].redist_base)) {
   2103			pr_err("%pOF: couldn't map region %d\n", node, i);
   2104			err = -ENODEV;
   2105			goto out_unmap_rdist;
   2106		}
   2107		rdist_regs[i].phys_base = res.start;
   2108	}
   2109
   2110	if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
   2111		redist_stride = 0;
   2112
   2113	gic_enable_of_quirks(node, gic_quirks, &gic_data);
   2114
   2115	err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
   2116			     redist_stride, &node->fwnode);
   2117	if (err)
   2118		goto out_unmap_rdist;
   2119
   2120	gic_populate_ppi_partitions(node);
   2121
   2122	if (static_branch_likely(&supports_deactivate_key))
   2123		gic_of_setup_kvm_info(node);
   2124	return 0;
   2125
   2126out_unmap_rdist:
   2127	for (i = 0; i < nr_redist_regions; i++)
   2128		if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
   2129			iounmap(rdist_regs[i].redist_base);
   2130	kfree(rdist_regs);
   2131out_unmap_dist:
   2132	iounmap(dist_base);
   2133	return err;
   2134}
   2135
   2136IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
   2137
   2138#ifdef CONFIG_ACPI
   2139static struct
   2140{
   2141	void __iomem *dist_base;
   2142	struct redist_region *redist_regs;
   2143	u32 nr_redist_regions;
   2144	bool single_redist;
   2145	int enabled_rdists;
   2146	u32 maint_irq;
   2147	int maint_irq_mode;
   2148	phys_addr_t vcpu_base;
   2149} acpi_data __initdata;
   2150
   2151static void __init
   2152gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
   2153{
   2154	static int count = 0;
   2155
   2156	acpi_data.redist_regs[count].phys_base = phys_base;
   2157	acpi_data.redist_regs[count].redist_base = redist_base;
   2158	acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
   2159	count++;
   2160}
   2161
   2162static int __init
   2163gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
   2164			   const unsigned long end)
   2165{
   2166	struct acpi_madt_generic_redistributor *redist =
   2167			(struct acpi_madt_generic_redistributor *)header;
   2168	void __iomem *redist_base;
   2169
   2170	redist_base = ioremap(redist->base_address, redist->length);
   2171	if (!redist_base) {
   2172		pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
   2173		return -ENOMEM;
   2174	}
   2175	gic_request_region(redist->base_address, redist->length, "GICR");
   2176
   2177	gic_acpi_register_redist(redist->base_address, redist_base);
   2178	return 0;
   2179}
   2180
   2181static int __init
   2182gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
   2183			 const unsigned long end)
   2184{
   2185	struct acpi_madt_generic_interrupt *gicc =
   2186				(struct acpi_madt_generic_interrupt *)header;
   2187	u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
   2188	u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
   2189	void __iomem *redist_base;
   2190
   2191	/* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
   2192	if (!(gicc->flags & ACPI_MADT_ENABLED))
   2193		return 0;
   2194
   2195	redist_base = ioremap(gicc->gicr_base_address, size);
   2196	if (!redist_base)
   2197		return -ENOMEM;
   2198	gic_request_region(gicc->gicr_base_address, size, "GICR");
   2199
   2200	gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
   2201	return 0;
   2202}
   2203
   2204static int __init gic_acpi_collect_gicr_base(void)
   2205{
   2206	acpi_tbl_entry_handler redist_parser;
   2207	enum acpi_madt_type type;
   2208
   2209	if (acpi_data.single_redist) {
   2210		type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
   2211		redist_parser = gic_acpi_parse_madt_gicc;
   2212	} else {
   2213		type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
   2214		redist_parser = gic_acpi_parse_madt_redist;
   2215	}
   2216
   2217	/* Collect redistributor base addresses in GICR entries */
   2218	if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
   2219		return 0;
   2220
   2221	pr_info("No valid GICR entries exist\n");
   2222	return -ENODEV;
   2223}
   2224
   2225static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
   2226				  const unsigned long end)
   2227{
   2228	/* Subtable presence means that redist exists, that's it */
   2229	return 0;
   2230}
   2231
   2232static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
   2233				      const unsigned long end)
   2234{
   2235	struct acpi_madt_generic_interrupt *gicc =
   2236				(struct acpi_madt_generic_interrupt *)header;
   2237
   2238	/*
   2239	 * If GICC is enabled and has valid gicr base address, then it means
   2240	 * GICR base is presented via GICC
   2241	 */
   2242	if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
   2243		acpi_data.enabled_rdists++;
   2244		return 0;
   2245	}
   2246
   2247	/*
   2248	 * It's perfectly valid firmware can pass disabled GICC entry, driver
   2249	 * should not treat as errors, skip the entry instead of probe fail.
   2250	 */
   2251	if (!(gicc->flags & ACPI_MADT_ENABLED))
   2252		return 0;
   2253
   2254	return -ENODEV;
   2255}
   2256
   2257static int __init gic_acpi_count_gicr_regions(void)
   2258{
   2259	int count;
   2260
   2261	/*
   2262	 * Count how many redistributor regions we have. It is not allowed
   2263	 * to mix redistributor description, GICR and GICC subtables have to be
   2264	 * mutually exclusive.
   2265	 */
   2266	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
   2267				      gic_acpi_match_gicr, 0);
   2268	if (count > 0) {
   2269		acpi_data.single_redist = false;
   2270		return count;
   2271	}
   2272
   2273	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
   2274				      gic_acpi_match_gicc, 0);
   2275	if (count > 0) {
   2276		acpi_data.single_redist = true;
   2277		count = acpi_data.enabled_rdists;
   2278	}
   2279
   2280	return count;
   2281}
   2282
   2283static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
   2284					   struct acpi_probe_entry *ape)
   2285{
   2286	struct acpi_madt_generic_distributor *dist;
   2287	int count;
   2288
   2289	dist = (struct acpi_madt_generic_distributor *)header;
   2290	if (dist->version != ape->driver_data)
   2291		return false;
   2292
   2293	/* We need to do that exercise anyway, the sooner the better */
   2294	count = gic_acpi_count_gicr_regions();
   2295	if (count <= 0)
   2296		return false;
   2297
   2298	acpi_data.nr_redist_regions = count;
   2299	return true;
   2300}
   2301
   2302static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
   2303						const unsigned long end)
   2304{
   2305	struct acpi_madt_generic_interrupt *gicc =
   2306		(struct acpi_madt_generic_interrupt *)header;
   2307	int maint_irq_mode;
   2308	static int first_madt = true;
   2309
   2310	/* Skip unusable CPUs */
   2311	if (!(gicc->flags & ACPI_MADT_ENABLED))
   2312		return 0;
   2313
   2314	maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
   2315		ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
   2316
   2317	if (first_madt) {
   2318		first_madt = false;
   2319
   2320		acpi_data.maint_irq = gicc->vgic_interrupt;
   2321		acpi_data.maint_irq_mode = maint_irq_mode;
   2322		acpi_data.vcpu_base = gicc->gicv_base_address;
   2323
   2324		return 0;
   2325	}
   2326
   2327	/*
   2328	 * The maintenance interrupt and GICV should be the same for every CPU
   2329	 */
   2330	if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
   2331	    (acpi_data.maint_irq_mode != maint_irq_mode) ||
   2332	    (acpi_data.vcpu_base != gicc->gicv_base_address))
   2333		return -EINVAL;
   2334
   2335	return 0;
   2336}
   2337
   2338static bool __init gic_acpi_collect_virt_info(void)
   2339{
   2340	int count;
   2341
   2342	count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
   2343				      gic_acpi_parse_virt_madt_gicc, 0);
   2344
   2345	return (count > 0);
   2346}
   2347
   2348#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
   2349#define ACPI_GICV2_VCTRL_MEM_SIZE	(SZ_4K)
   2350#define ACPI_GICV2_VCPU_MEM_SIZE	(SZ_8K)
   2351
   2352static void __init gic_acpi_setup_kvm_info(void)
   2353{
   2354	int irq;
   2355
   2356	if (!gic_acpi_collect_virt_info()) {
   2357		pr_warn("Unable to get hardware information used for virtualization\n");
   2358		return;
   2359	}
   2360
   2361	gic_v3_kvm_info.type = GIC_V3;
   2362
   2363	irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
   2364				acpi_data.maint_irq_mode,
   2365				ACPI_ACTIVE_HIGH);
   2366	if (irq <= 0)
   2367		return;
   2368
   2369	gic_v3_kvm_info.maint_irq = irq;
   2370
   2371	if (acpi_data.vcpu_base) {
   2372		struct resource *vcpu = &gic_v3_kvm_info.vcpu;
   2373
   2374		vcpu->flags = IORESOURCE_MEM;
   2375		vcpu->start = acpi_data.vcpu_base;
   2376		vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
   2377	}
   2378
   2379	gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
   2380	gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
   2381	vgic_set_kvm_info(&gic_v3_kvm_info);
   2382}
   2383
   2384static int __init
   2385gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
   2386{
   2387	struct acpi_madt_generic_distributor *dist;
   2388	struct fwnode_handle *domain_handle;
   2389	size_t size;
   2390	int i, err;
   2391
   2392	/* Get distributor base address */
   2393	dist = (struct acpi_madt_generic_distributor *)header;
   2394	acpi_data.dist_base = ioremap(dist->base_address,
   2395				      ACPI_GICV3_DIST_MEM_SIZE);
   2396	if (!acpi_data.dist_base) {
   2397		pr_err("Unable to map GICD registers\n");
   2398		return -ENOMEM;
   2399	}
   2400	gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
   2401
   2402	err = gic_validate_dist_version(acpi_data.dist_base);
   2403	if (err) {
   2404		pr_err("No distributor detected at @%p, giving up\n",
   2405		       acpi_data.dist_base);
   2406		goto out_dist_unmap;
   2407	}
   2408
   2409	size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
   2410	acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
   2411	if (!acpi_data.redist_regs) {
   2412		err = -ENOMEM;
   2413		goto out_dist_unmap;
   2414	}
   2415
   2416	err = gic_acpi_collect_gicr_base();
   2417	if (err)
   2418		goto out_redist_unmap;
   2419
   2420	domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
   2421	if (!domain_handle) {
   2422		err = -ENOMEM;
   2423		goto out_redist_unmap;
   2424	}
   2425
   2426	err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
   2427			     acpi_data.nr_redist_regions, 0, domain_handle);
   2428	if (err)
   2429		goto out_fwhandle_free;
   2430
   2431	acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
   2432
   2433	if (static_branch_likely(&supports_deactivate_key))
   2434		gic_acpi_setup_kvm_info();
   2435
   2436	return 0;
   2437
   2438out_fwhandle_free:
   2439	irq_domain_free_fwnode(domain_handle);
   2440out_redist_unmap:
   2441	for (i = 0; i < acpi_data.nr_redist_regions; i++)
   2442		if (acpi_data.redist_regs[i].redist_base)
   2443			iounmap(acpi_data.redist_regs[i].redist_base);
   2444	kfree(acpi_data.redist_regs);
   2445out_dist_unmap:
   2446	iounmap(acpi_data.dist_base);
   2447	return err;
   2448}
   2449IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
   2450		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
   2451		     gic_acpi_init);
   2452IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
   2453		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
   2454		     gic_acpi_init);
   2455IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
   2456		     acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
   2457		     gic_acpi_init);
   2458#endif