cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

irq-mmp.c (15177B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  linux/arch/arm/mach-mmp/irq.c
      4 *
      5 *  Generic IRQ handling, GPIO IRQ demultiplexing, etc.
      6 *  Copyright (C) 2008 - 2012 Marvell Technology Group Ltd.
      7 *
      8 *  Author:	Bin Yang <bin.yang@marvell.com>
      9 *              Haojian Zhuang <haojian.zhuang@gmail.com>
     10 */
     11
     12#include <linux/module.h>
     13#include <linux/init.h>
     14#include <linux/irq.h>
     15#include <linux/irqchip.h>
     16#include <linux/irqchip/chained_irq.h>
     17#include <linux/irqdomain.h>
     18#include <linux/io.h>
     19#include <linux/ioport.h>
     20#include <linux/of_address.h>
     21#include <linux/of_irq.h>
     22
     23#include <asm/exception.h>
     24#include <asm/hardirq.h>
     25
     26#define MAX_ICU_NR		16
     27
     28#define PJ1_INT_SEL		0x10c
     29#define PJ4_INT_SEL		0x104
     30
     31/* bit fields in PJ1_INT_SEL and PJ4_INT_SEL */
     32#define SEL_INT_PENDING		(1 << 6)
     33#define SEL_INT_NUM_MASK	0x3f
     34
     35#define MMP2_ICU_INT_ROUTE_PJ4_IRQ	(1 << 5)
     36#define MMP2_ICU_INT_ROUTE_PJ4_FIQ	(1 << 6)
     37
     38struct icu_chip_data {
     39	int			nr_irqs;
     40	unsigned int		virq_base;
     41	unsigned int		cascade_irq;
     42	void __iomem		*reg_status;
     43	void __iomem		*reg_mask;
     44	unsigned int		conf_enable;
     45	unsigned int		conf_disable;
     46	unsigned int		conf_mask;
     47	unsigned int		conf2_mask;
     48	unsigned int		clr_mfp_irq_base;
     49	unsigned int		clr_mfp_hwirq;
     50	struct irq_domain	*domain;
     51};
     52
     53struct mmp_intc_conf {
     54	unsigned int	conf_enable;
     55	unsigned int	conf_disable;
     56	unsigned int	conf_mask;
     57	unsigned int	conf2_mask;
     58};
     59
     60static void __iomem *mmp_icu_base;
     61static void __iomem *mmp_icu2_base;
     62static struct icu_chip_data icu_data[MAX_ICU_NR];
     63static int max_icu_nr;
     64
     65extern void mmp2_clear_pmic_int(void);
     66
     67static void icu_mask_ack_irq(struct irq_data *d)
     68{
     69	struct irq_domain *domain = d->domain;
     70	struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
     71	int hwirq;
     72	u32 r;
     73
     74	hwirq = d->irq - data->virq_base;
     75	if (data == &icu_data[0]) {
     76		r = readl_relaxed(mmp_icu_base + (hwirq << 2));
     77		r &= ~data->conf_mask;
     78		r |= data->conf_disable;
     79		writel_relaxed(r, mmp_icu_base + (hwirq << 2));
     80	} else {
     81#ifdef CONFIG_CPU_MMP2
     82		if ((data->virq_base == data->clr_mfp_irq_base)
     83			&& (hwirq == data->clr_mfp_hwirq))
     84			mmp2_clear_pmic_int();
     85#endif
     86		r = readl_relaxed(data->reg_mask) | (1 << hwirq);
     87		writel_relaxed(r, data->reg_mask);
     88	}
     89}
     90
     91static void icu_mask_irq(struct irq_data *d)
     92{
     93	struct irq_domain *domain = d->domain;
     94	struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
     95	int hwirq;
     96	u32 r;
     97
     98	hwirq = d->irq - data->virq_base;
     99	if (data == &icu_data[0]) {
    100		r = readl_relaxed(mmp_icu_base + (hwirq << 2));
    101		r &= ~data->conf_mask;
    102		r |= data->conf_disable;
    103		writel_relaxed(r, mmp_icu_base + (hwirq << 2));
    104
    105		if (data->conf2_mask) {
    106			/*
    107			 * ICU1 (above) only controls PJ4 MP1; if using SMP,
    108			 * we need to also mask the MP2 and MM cores via ICU2.
    109			 */
    110			r = readl_relaxed(mmp_icu2_base + (hwirq << 2));
    111			r &= ~data->conf2_mask;
    112			writel_relaxed(r, mmp_icu2_base + (hwirq << 2));
    113		}
    114	} else {
    115		r = readl_relaxed(data->reg_mask) | (1 << hwirq);
    116		writel_relaxed(r, data->reg_mask);
    117	}
    118}
    119
    120static void icu_unmask_irq(struct irq_data *d)
    121{
    122	struct irq_domain *domain = d->domain;
    123	struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data;
    124	int hwirq;
    125	u32 r;
    126
    127	hwirq = d->irq - data->virq_base;
    128	if (data == &icu_data[0]) {
    129		r = readl_relaxed(mmp_icu_base + (hwirq << 2));
    130		r &= ~data->conf_mask;
    131		r |= data->conf_enable;
    132		writel_relaxed(r, mmp_icu_base + (hwirq << 2));
    133	} else {
    134		r = readl_relaxed(data->reg_mask) & ~(1 << hwirq);
    135		writel_relaxed(r, data->reg_mask);
    136	}
    137}
    138
    139struct irq_chip icu_irq_chip = {
    140	.name		= "icu_irq",
    141	.irq_mask	= icu_mask_irq,
    142	.irq_mask_ack	= icu_mask_ack_irq,
    143	.irq_unmask	= icu_unmask_irq,
    144};
    145
    146static void icu_mux_irq_demux(struct irq_desc *desc)
    147{
    148	unsigned int irq = irq_desc_get_irq(desc);
    149	struct irq_chip *chip = irq_desc_get_chip(desc);
    150	struct irq_domain *domain;
    151	struct icu_chip_data *data;
    152	int i;
    153	unsigned long mask, status, n;
    154
    155	chained_irq_enter(chip, desc);
    156
    157	for (i = 1; i < max_icu_nr; i++) {
    158		if (irq == icu_data[i].cascade_irq) {
    159			domain = icu_data[i].domain;
    160			data = (struct icu_chip_data *)domain->host_data;
    161			break;
    162		}
    163	}
    164	if (i >= max_icu_nr) {
    165		pr_err("Spurious irq %d in MMP INTC\n", irq);
    166		goto out;
    167	}
    168
    169	mask = readl_relaxed(data->reg_mask);
    170	while (1) {
    171		status = readl_relaxed(data->reg_status) & ~mask;
    172		if (status == 0)
    173			break;
    174		for_each_set_bit(n, &status, BITS_PER_LONG) {
    175			generic_handle_irq(icu_data[i].virq_base + n);
    176		}
    177	}
    178
    179out:
    180	chained_irq_exit(chip, desc);
    181}
    182
    183static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
    184			      irq_hw_number_t hw)
    185{
    186	irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
    187	return 0;
    188}
    189
    190static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
    191				const u32 *intspec, unsigned int intsize,
    192				unsigned long *out_hwirq,
    193				unsigned int *out_type)
    194{
    195	*out_hwirq = intspec[0];
    196	return 0;
    197}
    198
    199static const struct irq_domain_ops mmp_irq_domain_ops = {
    200	.map		= mmp_irq_domain_map,
    201	.xlate		= mmp_irq_domain_xlate,
    202};
    203
    204static const struct mmp_intc_conf mmp_conf = {
    205	.conf_enable	= 0x51,
    206	.conf_disable	= 0x0,
    207	.conf_mask	= 0x7f,
    208};
    209
    210static const struct mmp_intc_conf mmp2_conf = {
    211	.conf_enable	= 0x20,
    212	.conf_disable	= 0x0,
    213	.conf_mask	= MMP2_ICU_INT_ROUTE_PJ4_IRQ |
    214			  MMP2_ICU_INT_ROUTE_PJ4_FIQ,
    215};
    216
    217static struct mmp_intc_conf mmp3_conf = {
    218	.conf_enable	= 0x20,
    219	.conf_disable	= 0x0,
    220	.conf_mask	= MMP2_ICU_INT_ROUTE_PJ4_IRQ |
    221			  MMP2_ICU_INT_ROUTE_PJ4_FIQ,
    222	.conf2_mask	= 0xf0,
    223};
    224
    225static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
    226{
    227	int hwirq;
    228
    229	hwirq = readl_relaxed(mmp_icu_base + PJ1_INT_SEL);
    230	if (!(hwirq & SEL_INT_PENDING))
    231		return;
    232	hwirq &= SEL_INT_NUM_MASK;
    233	generic_handle_domain_irq(icu_data[0].domain, hwirq);
    234}
    235
    236static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
    237{
    238	int hwirq;
    239
    240	hwirq = readl_relaxed(mmp_icu_base + PJ4_INT_SEL);
    241	if (!(hwirq & SEL_INT_PENDING))
    242		return;
    243	hwirq &= SEL_INT_NUM_MASK;
    244	generic_handle_domain_irq(icu_data[0].domain, hwirq);
    245}
    246
    247/* MMP (ARMv5) */
    248void __init icu_init_irq(void)
    249{
    250	int irq;
    251
    252	max_icu_nr = 1;
    253	mmp_icu_base = ioremap(0xd4282000, 0x1000);
    254	icu_data[0].conf_enable = mmp_conf.conf_enable;
    255	icu_data[0].conf_disable = mmp_conf.conf_disable;
    256	icu_data[0].conf_mask = mmp_conf.conf_mask;
    257	icu_data[0].nr_irqs = 64;
    258	icu_data[0].virq_base = 0;
    259	icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
    260						   &irq_domain_simple_ops,
    261						   &icu_data[0]);
    262	for (irq = 0; irq < 64; irq++) {
    263		icu_mask_irq(irq_get_irq_data(irq));
    264		irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
    265	}
    266	irq_set_default_host(icu_data[0].domain);
    267	set_handle_irq(mmp_handle_irq);
    268}
    269
    270/* MMP2 (ARMv7) */
    271void __init mmp2_init_icu(void)
    272{
    273	int irq, end;
    274
    275	max_icu_nr = 8;
    276	mmp_icu_base = ioremap(0xd4282000, 0x1000);
    277	icu_data[0].conf_enable = mmp2_conf.conf_enable;
    278	icu_data[0].conf_disable = mmp2_conf.conf_disable;
    279	icu_data[0].conf_mask = mmp2_conf.conf_mask;
    280	icu_data[0].nr_irqs = 64;
    281	icu_data[0].virq_base = 0;
    282	icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0,
    283						   &irq_domain_simple_ops,
    284						   &icu_data[0]);
    285	icu_data[1].reg_status = mmp_icu_base + 0x150;
    286	icu_data[1].reg_mask = mmp_icu_base + 0x168;
    287	icu_data[1].clr_mfp_irq_base = icu_data[0].virq_base +
    288				icu_data[0].nr_irqs;
    289	icu_data[1].clr_mfp_hwirq = 1;		/* offset to IRQ_MMP2_PMIC_BASE */
    290	icu_data[1].nr_irqs = 2;
    291	icu_data[1].cascade_irq = 4;
    292	icu_data[1].virq_base = icu_data[0].virq_base + icu_data[0].nr_irqs;
    293	icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
    294						   icu_data[1].virq_base, 0,
    295						   &irq_domain_simple_ops,
    296						   &icu_data[1]);
    297	icu_data[2].reg_status = mmp_icu_base + 0x154;
    298	icu_data[2].reg_mask = mmp_icu_base + 0x16c;
    299	icu_data[2].nr_irqs = 2;
    300	icu_data[2].cascade_irq = 5;
    301	icu_data[2].virq_base = icu_data[1].virq_base + icu_data[1].nr_irqs;
    302	icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
    303						   icu_data[2].virq_base, 0,
    304						   &irq_domain_simple_ops,
    305						   &icu_data[2]);
    306	icu_data[3].reg_status = mmp_icu_base + 0x180;
    307	icu_data[3].reg_mask = mmp_icu_base + 0x17c;
    308	icu_data[3].nr_irqs = 3;
    309	icu_data[3].cascade_irq = 9;
    310	icu_data[3].virq_base = icu_data[2].virq_base + icu_data[2].nr_irqs;
    311	icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
    312						   icu_data[3].virq_base, 0,
    313						   &irq_domain_simple_ops,
    314						   &icu_data[3]);
    315	icu_data[4].reg_status = mmp_icu_base + 0x158;
    316	icu_data[4].reg_mask = mmp_icu_base + 0x170;
    317	icu_data[4].nr_irqs = 5;
    318	icu_data[4].cascade_irq = 17;
    319	icu_data[4].virq_base = icu_data[3].virq_base + icu_data[3].nr_irqs;
    320	icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
    321						   icu_data[4].virq_base, 0,
    322						   &irq_domain_simple_ops,
    323						   &icu_data[4]);
    324	icu_data[5].reg_status = mmp_icu_base + 0x15c;
    325	icu_data[5].reg_mask = mmp_icu_base + 0x174;
    326	icu_data[5].nr_irqs = 15;
    327	icu_data[5].cascade_irq = 35;
    328	icu_data[5].virq_base = icu_data[4].virq_base + icu_data[4].nr_irqs;
    329	icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
    330						   icu_data[5].virq_base, 0,
    331						   &irq_domain_simple_ops,
    332						   &icu_data[5]);
    333	icu_data[6].reg_status = mmp_icu_base + 0x160;
    334	icu_data[6].reg_mask = mmp_icu_base + 0x178;
    335	icu_data[6].nr_irqs = 2;
    336	icu_data[6].cascade_irq = 51;
    337	icu_data[6].virq_base = icu_data[5].virq_base + icu_data[5].nr_irqs;
    338	icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
    339						   icu_data[6].virq_base, 0,
    340						   &irq_domain_simple_ops,
    341						   &icu_data[6]);
    342	icu_data[7].reg_status = mmp_icu_base + 0x188;
    343	icu_data[7].reg_mask = mmp_icu_base + 0x184;
    344	icu_data[7].nr_irqs = 2;
    345	icu_data[7].cascade_irq = 55;
    346	icu_data[7].virq_base = icu_data[6].virq_base + icu_data[6].nr_irqs;
    347	icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
    348						   icu_data[7].virq_base, 0,
    349						   &irq_domain_simple_ops,
    350						   &icu_data[7]);
    351	end = icu_data[7].virq_base + icu_data[7].nr_irqs;
    352	for (irq = 0; irq < end; irq++) {
    353		icu_mask_irq(irq_get_irq_data(irq));
    354		if (irq == icu_data[1].cascade_irq ||
    355		    irq == icu_data[2].cascade_irq ||
    356		    irq == icu_data[3].cascade_irq ||
    357		    irq == icu_data[4].cascade_irq ||
    358		    irq == icu_data[5].cascade_irq ||
    359		    irq == icu_data[6].cascade_irq ||
    360		    irq == icu_data[7].cascade_irq) {
    361			irq_set_chip(irq, &icu_irq_chip);
    362			irq_set_chained_handler(irq, icu_mux_irq_demux);
    363		} else {
    364			irq_set_chip_and_handler(irq, &icu_irq_chip,
    365						 handle_level_irq);
    366		}
    367	}
    368	irq_set_default_host(icu_data[0].domain);
    369	set_handle_irq(mmp2_handle_irq);
    370}
    371
    372#ifdef CONFIG_OF
    373static int __init mmp_init_bases(struct device_node *node)
    374{
    375	int ret, nr_irqs, irq, i = 0;
    376
    377	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs", &nr_irqs);
    378	if (ret) {
    379		pr_err("Not found mrvl,intc-nr-irqs property\n");
    380		return ret;
    381	}
    382
    383	mmp_icu_base = of_iomap(node, 0);
    384	if (!mmp_icu_base) {
    385		pr_err("Failed to get interrupt controller register\n");
    386		return -ENOMEM;
    387	}
    388
    389	icu_data[0].virq_base = 0;
    390	icu_data[0].domain = irq_domain_add_linear(node, nr_irqs,
    391						   &mmp_irq_domain_ops,
    392						   &icu_data[0]);
    393	for (irq = 0; irq < nr_irqs; irq++) {
    394		ret = irq_create_mapping(icu_data[0].domain, irq);
    395		if (!ret) {
    396			pr_err("Failed to mapping hwirq\n");
    397			goto err;
    398		}
    399		if (!irq)
    400			icu_data[0].virq_base = ret;
    401	}
    402	icu_data[0].nr_irqs = nr_irqs;
    403	return 0;
    404err:
    405	if (icu_data[0].virq_base) {
    406		for (i = 0; i < irq; i++)
    407			irq_dispose_mapping(icu_data[0].virq_base + i);
    408	}
    409	irq_domain_remove(icu_data[0].domain);
    410	iounmap(mmp_icu_base);
    411	return -EINVAL;
    412}
    413
    414static int __init mmp_of_init(struct device_node *node,
    415			      struct device_node *parent)
    416{
    417	int ret;
    418
    419	ret = mmp_init_bases(node);
    420	if (ret < 0)
    421		return ret;
    422
    423	icu_data[0].conf_enable = mmp_conf.conf_enable;
    424	icu_data[0].conf_disable = mmp_conf.conf_disable;
    425	icu_data[0].conf_mask = mmp_conf.conf_mask;
    426	set_handle_irq(mmp_handle_irq);
    427	max_icu_nr = 1;
    428	return 0;
    429}
    430IRQCHIP_DECLARE(mmp_intc, "mrvl,mmp-intc", mmp_of_init);
    431
    432static int __init mmp2_of_init(struct device_node *node,
    433			       struct device_node *parent)
    434{
    435	int ret;
    436
    437	ret = mmp_init_bases(node);
    438	if (ret < 0)
    439		return ret;
    440
    441	icu_data[0].conf_enable = mmp2_conf.conf_enable;
    442	icu_data[0].conf_disable = mmp2_conf.conf_disable;
    443	icu_data[0].conf_mask = mmp2_conf.conf_mask;
    444	set_handle_irq(mmp2_handle_irq);
    445	max_icu_nr = 1;
    446	return 0;
    447}
    448IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init);
    449
    450static int __init mmp3_of_init(struct device_node *node,
    451			       struct device_node *parent)
    452{
    453	int ret;
    454
    455	mmp_icu2_base = of_iomap(node, 1);
    456	if (!mmp_icu2_base) {
    457		pr_err("Failed to get interrupt controller register #2\n");
    458		return -ENODEV;
    459	}
    460
    461	ret = mmp_init_bases(node);
    462	if (ret < 0) {
    463		iounmap(mmp_icu2_base);
    464		return ret;
    465	}
    466
    467	icu_data[0].conf_enable = mmp3_conf.conf_enable;
    468	icu_data[0].conf_disable = mmp3_conf.conf_disable;
    469	icu_data[0].conf_mask = mmp3_conf.conf_mask;
    470	icu_data[0].conf2_mask = mmp3_conf.conf2_mask;
    471
    472	if (!parent) {
    473		/* This is the main interrupt controller. */
    474		set_handle_irq(mmp2_handle_irq);
    475	}
    476
    477	max_icu_nr = 1;
    478	return 0;
    479}
    480IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init);
    481
    482static int __init mmp2_mux_of_init(struct device_node *node,
    483				   struct device_node *parent)
    484{
    485	int i, ret, irq, j = 0;
    486	u32 nr_irqs, mfp_irq;
    487	u32 reg[4];
    488
    489	if (!parent)
    490		return -ENODEV;
    491
    492	i = max_icu_nr;
    493	ret = of_property_read_u32(node, "mrvl,intc-nr-irqs",
    494				   &nr_irqs);
    495	if (ret) {
    496		pr_err("Not found mrvl,intc-nr-irqs property\n");
    497		return -EINVAL;
    498	}
    499
    500	/*
    501	 * For historical reasons, the "regs" property of the
    502	 * mrvl,mmp2-mux-intc is not a regular "regs" property containing
    503	 * addresses on the parent bus, but offsets from the intc's base.
    504	 * That is why we can't use of_address_to_resource() here.
    505	 */
    506	ret = of_property_read_variable_u32_array(node, "reg", reg,
    507						  ARRAY_SIZE(reg),
    508						  ARRAY_SIZE(reg));
    509	if (ret < 0) {
    510		pr_err("Not found reg property\n");
    511		return -EINVAL;
    512	}
    513	icu_data[i].reg_status = mmp_icu_base + reg[0];
    514	icu_data[i].reg_mask = mmp_icu_base + reg[2];
    515	icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0);
    516	if (!icu_data[i].cascade_irq)
    517		return -EINVAL;
    518
    519	icu_data[i].virq_base = 0;
    520	icu_data[i].domain = irq_domain_add_linear(node, nr_irqs,
    521						   &mmp_irq_domain_ops,
    522						   &icu_data[i]);
    523	for (irq = 0; irq < nr_irqs; irq++) {
    524		ret = irq_create_mapping(icu_data[i].domain, irq);
    525		if (!ret) {
    526			pr_err("Failed to mapping hwirq\n");
    527			goto err;
    528		}
    529		if (!irq)
    530			icu_data[i].virq_base = ret;
    531	}
    532	icu_data[i].nr_irqs = nr_irqs;
    533	if (!of_property_read_u32(node, "mrvl,clr-mfp-irq",
    534				  &mfp_irq)) {
    535		icu_data[i].clr_mfp_irq_base = icu_data[i].virq_base;
    536		icu_data[i].clr_mfp_hwirq = mfp_irq;
    537	}
    538	irq_set_chained_handler(icu_data[i].cascade_irq,
    539				icu_mux_irq_demux);
    540	max_icu_nr++;
    541	return 0;
    542err:
    543	if (icu_data[i].virq_base) {
    544		for (j = 0; j < irq; j++)
    545			irq_dispose_mapping(icu_data[i].virq_base + j);
    546	}
    547	irq_domain_remove(icu_data[i].domain);
    548	return -EINVAL;
    549}
    550IRQCHIP_DECLARE(mmp2_mux_intc, "mrvl,mmp2-mux-intc", mmp2_mux_of_init);
    551#endif