cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

octeon-irq.c (75273B)


      1/*
      2 * This file is subject to the terms and conditions of the GNU General Public
      3 * License.  See the file "COPYING" in the main directory of this archive
      4 * for more details.
      5 *
      6 * Copyright (C) 2004-2016 Cavium, Inc.
      7 */
      8
      9#include <linux/of_address.h>
     10#include <linux/interrupt.h>
     11#include <linux/irqdomain.h>
     12#include <linux/bitops.h>
     13#include <linux/of_irq.h>
     14#include <linux/percpu.h>
     15#include <linux/slab.h>
     16#include <linux/irq.h>
     17#include <linux/smp.h>
     18#include <linux/of.h>
     19
     20#include <asm/octeon/octeon.h>
     21#include <asm/octeon/cvmx-ciu2-defs.h>
     22#include <asm/octeon/cvmx-ciu3-defs.h>
     23
     24static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
     25static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
     26static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
     27static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
     28
     29static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
     30static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
     31#define CIU3_MBOX_PER_CORE 10
     32
     33/*
     34 * The 8 most significant bits of the intsn identify the interrupt major block.
     35 * Each major block might use its own interrupt domain. Thus 256 domains are
     36 * needed.
     37 */
     38#define MAX_CIU3_DOMAINS		256
     39
     40typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
     41
     42/* Information for each ciu3 in the system */
     43struct octeon_ciu3_info {
     44	u64			ciu3_addr;
     45	int			node;
     46	struct irq_domain	*domain[MAX_CIU3_DOMAINS];
     47	octeon_ciu3_intsn2hw_t	intsn2hw[MAX_CIU3_DOMAINS];
     48};
     49
     50/* Each ciu3 in the system uses its own data (one ciu3 per node) */
     51static struct octeon_ciu3_info	*octeon_ciu3_info_per_node[4];
     52
     53struct octeon_irq_ciu_domain_data {
     54	int num_sum;  /* number of sum registers (2 or 3). */
     55};
     56
     57/* Register offsets from ciu3_addr */
     58#define CIU3_CONST		0x220
     59#define CIU3_IDT_CTL(_idt)	((_idt) * 8 + 0x110000)
     60#define CIU3_IDT_PP(_idt, _idx)	((_idt) * 32 + (_idx) * 8 + 0x120000)
     61#define CIU3_IDT_IO(_idt)	((_idt) * 8 + 0x130000)
     62#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
     63#define CIU3_DEST_IO_INT(_io)	((_io) * 8 + 0x210000)
     64#define CIU3_ISC_CTL(_intsn)	((_intsn) * 8 + 0x80000000)
     65#define CIU3_ISC_W1C(_intsn)	((_intsn) * 8 + 0x90000000)
     66#define CIU3_ISC_W1S(_intsn)	((_intsn) * 8 + 0xa0000000)
     67
     68static __read_mostly int octeon_irq_ciu_to_irq[8][64];
     69
     70struct octeon_ciu_chip_data {
     71	union {
     72		struct {		/* only used for ciu3 */
     73			u64 ciu3_addr;
     74			unsigned int intsn;
     75		};
     76		struct {		/* only used for ciu/ciu2 */
     77			u8 line;
     78			u8 bit;
     79		};
     80	};
     81	int gpio_line;
     82	int current_cpu;	/* Next CPU expected to take this irq */
     83	int ciu_node; /* NUMA node number of the CIU */
     84};
     85
     86struct octeon_core_chip_data {
     87	struct mutex core_irq_mutex;
     88	bool current_en;
     89	bool desired_en;
     90	u8 bit;
     91};
     92
     93#define MIPS_CORE_IRQ_LINES 8
     94
     95static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
     96
     97static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
     98				      struct irq_chip *chip,
     99				      irq_flow_handler_t handler)
    100{
    101	struct octeon_ciu_chip_data *cd;
    102
    103	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
    104	if (!cd)
    105		return -ENOMEM;
    106
    107	irq_set_chip_and_handler(irq, chip, handler);
    108
    109	cd->line = line;
    110	cd->bit = bit;
    111	cd->gpio_line = gpio_line;
    112
    113	irq_set_chip_data(irq, cd);
    114	octeon_irq_ciu_to_irq[line][bit] = irq;
    115	return 0;
    116}
    117
    118static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
    119{
    120	struct irq_data *data = irq_get_irq_data(irq);
    121	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
    122
    123	irq_set_chip_data(irq, NULL);
    124	kfree(cd);
    125}
    126
    127static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
    128					int irq, int line, int bit)
    129{
    130	return irq_domain_associate(domain, irq, line << 6 | bit);
    131}
    132
    133static int octeon_coreid_for_cpu(int cpu)
    134{
    135#ifdef CONFIG_SMP
    136	return cpu_logical_map(cpu);
    137#else
    138	return cvmx_get_core_num();
    139#endif
    140}
    141
    142static int octeon_cpu_for_coreid(int coreid)
    143{
    144#ifdef CONFIG_SMP
    145	return cpu_number_map(coreid);
    146#else
    147	return smp_processor_id();
    148#endif
    149}
    150
    151static void octeon_irq_core_ack(struct irq_data *data)
    152{
    153	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
    154	unsigned int bit = cd->bit;
    155
    156	/*
    157	 * We don't need to disable IRQs to make these atomic since
    158	 * they are already disabled earlier in the low level
    159	 * interrupt code.
    160	 */
    161	clear_c0_status(0x100 << bit);
    162	/* The two user interrupts must be cleared manually. */
    163	if (bit < 2)
    164		clear_c0_cause(0x100 << bit);
    165}
    166
    167static void octeon_irq_core_eoi(struct irq_data *data)
    168{
    169	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
    170
    171	/*
    172	 * We don't need to disable IRQs to make these atomic since
    173	 * they are already disabled earlier in the low level
    174	 * interrupt code.
    175	 */
    176	set_c0_status(0x100 << cd->bit);
    177}
    178
    179static void octeon_irq_core_set_enable_local(void *arg)
    180{
    181	struct irq_data *data = arg;
    182	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
    183	unsigned int mask = 0x100 << cd->bit;
    184
    185	/*
    186	 * Interrupts are already disabled, so these are atomic.
    187	 */
    188	if (cd->desired_en)
    189		set_c0_status(mask);
    190	else
    191		clear_c0_status(mask);
    192
    193}
    194
    195static void octeon_irq_core_disable(struct irq_data *data)
    196{
    197	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
    198	cd->desired_en = false;
    199}
    200
    201static void octeon_irq_core_enable(struct irq_data *data)
    202{
    203	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
    204	cd->desired_en = true;
    205}
    206
    207static void octeon_irq_core_bus_lock(struct irq_data *data)
    208{
    209	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
    210
    211	mutex_lock(&cd->core_irq_mutex);
    212}
    213
    214static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
    215{
    216	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
    217
    218	if (cd->desired_en != cd->current_en) {
    219		on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
    220
    221		cd->current_en = cd->desired_en;
    222	}
    223
    224	mutex_unlock(&cd->core_irq_mutex);
    225}
    226
    227static struct irq_chip octeon_irq_chip_core = {
    228	.name = "Core",
    229	.irq_enable = octeon_irq_core_enable,
    230	.irq_disable = octeon_irq_core_disable,
    231	.irq_ack = octeon_irq_core_ack,
    232	.irq_eoi = octeon_irq_core_eoi,
    233	.irq_bus_lock = octeon_irq_core_bus_lock,
    234	.irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
    235
    236	.irq_cpu_online = octeon_irq_core_eoi,
    237	.irq_cpu_offline = octeon_irq_core_ack,
    238	.flags = IRQCHIP_ONOFFLINE_ENABLED,
    239};
    240
    241static void __init octeon_irq_init_core(void)
    242{
    243	int i;
    244	int irq;
    245	struct octeon_core_chip_data *cd;
    246
    247	for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
    248		cd = &octeon_irq_core_chip_data[i];
    249		cd->current_en = false;
    250		cd->desired_en = false;
    251		cd->bit = i;
    252		mutex_init(&cd->core_irq_mutex);
    253
    254		irq = OCTEON_IRQ_SW0 + i;
    255		irq_set_chip_data(irq, cd);
    256		irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
    257					 handle_percpu_irq);
    258	}
    259}
    260
    261static int next_cpu_for_irq(struct irq_data *data)
    262{
    263
    264#ifdef CONFIG_SMP
    265	int cpu;
    266	struct cpumask *mask = irq_data_get_affinity_mask(data);
    267	int weight = cpumask_weight(mask);
    268	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
    269
    270	if (weight > 1) {
    271		cpu = cd->current_cpu;
    272		for (;;) {
    273			cpu = cpumask_next(cpu, mask);
    274			if (cpu >= nr_cpu_ids) {
    275				cpu = -1;
    276				continue;
    277			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
    278				break;
    279			}
    280		}
    281	} else if (weight == 1) {
    282		cpu = cpumask_first(mask);
    283	} else {
    284		cpu = smp_processor_id();
    285	}
    286	cd->current_cpu = cpu;
    287	return cpu;
    288#else
    289	return smp_processor_id();
    290#endif
    291}
    292
    293static void octeon_irq_ciu_enable(struct irq_data *data)
    294{
    295	int cpu = next_cpu_for_irq(data);
    296	int coreid = octeon_coreid_for_cpu(cpu);
    297	unsigned long *pen;
    298	unsigned long flags;
    299	struct octeon_ciu_chip_data *cd;
    300	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
    301
    302	cd = irq_data_get_irq_chip_data(data);
    303
    304	raw_spin_lock_irqsave(lock, flags);
    305	if (cd->line == 0) {
    306		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
    307		__set_bit(cd->bit, pen);
    308		/*
    309		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    310		 * enabling the irq.
    311		 */
    312		wmb();
    313		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
    314	} else {
    315		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
    316		__set_bit(cd->bit, pen);
    317		/*
    318		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    319		 * enabling the irq.
    320		 */
    321		wmb();
    322		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
    323	}
    324	raw_spin_unlock_irqrestore(lock, flags);
    325}
    326
    327static void octeon_irq_ciu_enable_local(struct irq_data *data)
    328{
    329	unsigned long *pen;
    330	unsigned long flags;
    331	struct octeon_ciu_chip_data *cd;
    332	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
    333
    334	cd = irq_data_get_irq_chip_data(data);
    335
    336	raw_spin_lock_irqsave(lock, flags);
    337	if (cd->line == 0) {
    338		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
    339		__set_bit(cd->bit, pen);
    340		/*
    341		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    342		 * enabling the irq.
    343		 */
    344		wmb();
    345		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
    346	} else {
    347		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
    348		__set_bit(cd->bit, pen);
    349		/*
    350		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    351		 * enabling the irq.
    352		 */
    353		wmb();
    354		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
    355	}
    356	raw_spin_unlock_irqrestore(lock, flags);
    357}
    358
    359static void octeon_irq_ciu_disable_local(struct irq_data *data)
    360{
    361	unsigned long *pen;
    362	unsigned long flags;
    363	struct octeon_ciu_chip_data *cd;
    364	raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
    365
    366	cd = irq_data_get_irq_chip_data(data);
    367
    368	raw_spin_lock_irqsave(lock, flags);
    369	if (cd->line == 0) {
    370		pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
    371		__clear_bit(cd->bit, pen);
    372		/*
    373		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    374		 * enabling the irq.
    375		 */
    376		wmb();
    377		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
    378	} else {
    379		pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
    380		__clear_bit(cd->bit, pen);
    381		/*
    382		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    383		 * enabling the irq.
    384		 */
    385		wmb();
    386		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
    387	}
    388	raw_spin_unlock_irqrestore(lock, flags);
    389}
    390
    391static void octeon_irq_ciu_disable_all(struct irq_data *data)
    392{
    393	unsigned long flags;
    394	unsigned long *pen;
    395	int cpu;
    396	struct octeon_ciu_chip_data *cd;
    397	raw_spinlock_t *lock;
    398
    399	cd = irq_data_get_irq_chip_data(data);
    400
    401	for_each_online_cpu(cpu) {
    402		int coreid = octeon_coreid_for_cpu(cpu);
    403		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
    404		if (cd->line == 0)
    405			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
    406		else
    407			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
    408
    409		raw_spin_lock_irqsave(lock, flags);
    410		__clear_bit(cd->bit, pen);
    411		/*
    412		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    413		 * enabling the irq.
    414		 */
    415		wmb();
    416		if (cd->line == 0)
    417			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
    418		else
    419			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
    420		raw_spin_unlock_irqrestore(lock, flags);
    421	}
    422}
    423
    424static void octeon_irq_ciu_enable_all(struct irq_data *data)
    425{
    426	unsigned long flags;
    427	unsigned long *pen;
    428	int cpu;
    429	struct octeon_ciu_chip_data *cd;
    430	raw_spinlock_t *lock;
    431
    432	cd = irq_data_get_irq_chip_data(data);
    433
    434	for_each_online_cpu(cpu) {
    435		int coreid = octeon_coreid_for_cpu(cpu);
    436		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
    437		if (cd->line == 0)
    438			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
    439		else
    440			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
    441
    442		raw_spin_lock_irqsave(lock, flags);
    443		__set_bit(cd->bit, pen);
    444		/*
    445		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    446		 * enabling the irq.
    447		 */
    448		wmb();
    449		if (cd->line == 0)
    450			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
    451		else
    452			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
    453		raw_spin_unlock_irqrestore(lock, flags);
    454	}
    455}
    456
    457/*
    458 * Enable the irq on the next core in the affinity set for chips that
    459 * have the EN*_W1{S,C} registers.
    460 */
    461static void octeon_irq_ciu_enable_v2(struct irq_data *data)
    462{
    463	u64 mask;
    464	int cpu = next_cpu_for_irq(data);
    465	struct octeon_ciu_chip_data *cd;
    466
    467	cd = irq_data_get_irq_chip_data(data);
    468	mask = 1ull << (cd->bit);
    469
    470	/*
    471	 * Called under the desc lock, so these should never get out
    472	 * of sync.
    473	 */
    474	if (cd->line == 0) {
    475		int index = octeon_coreid_for_cpu(cpu) * 2;
    476		set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
    477		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
    478	} else {
    479		int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
    480		set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
    481		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
    482	}
    483}
    484
    485/*
    486 * Enable the irq in the sum2 registers.
    487 */
    488static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
    489{
    490	u64 mask;
    491	int cpu = next_cpu_for_irq(data);
    492	int index = octeon_coreid_for_cpu(cpu);
    493	struct octeon_ciu_chip_data *cd;
    494
    495	cd = irq_data_get_irq_chip_data(data);
    496	mask = 1ull << (cd->bit);
    497
    498	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
    499}
    500
    501/*
    502 * Disable the irq in the sum2 registers.
    503 */
    504static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
    505{
    506	u64 mask;
    507	int cpu = next_cpu_for_irq(data);
    508	int index = octeon_coreid_for_cpu(cpu);
    509	struct octeon_ciu_chip_data *cd;
    510
    511	cd = irq_data_get_irq_chip_data(data);
    512	mask = 1ull << (cd->bit);
    513
    514	cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
    515}
    516
    517static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
    518{
    519	u64 mask;
    520	int cpu = next_cpu_for_irq(data);
    521	int index = octeon_coreid_for_cpu(cpu);
    522	struct octeon_ciu_chip_data *cd;
    523
    524	cd = irq_data_get_irq_chip_data(data);
    525	mask = 1ull << (cd->bit);
    526
    527	cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
    528}
    529
    530static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
    531{
    532	int cpu;
    533	struct octeon_ciu_chip_data *cd;
    534	u64 mask;
    535
    536	cd = irq_data_get_irq_chip_data(data);
    537	mask = 1ull << (cd->bit);
    538
    539	for_each_online_cpu(cpu) {
    540		int coreid = octeon_coreid_for_cpu(cpu);
    541
    542		cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
    543	}
    544}
    545
    546/*
    547 * Enable the irq on the current CPU for chips that
    548 * have the EN*_W1{S,C} registers.
    549 */
    550static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
    551{
    552	u64 mask;
    553	struct octeon_ciu_chip_data *cd;
    554
    555	cd = irq_data_get_irq_chip_data(data);
    556	mask = 1ull << (cd->bit);
    557
    558	if (cd->line == 0) {
    559		int index = cvmx_get_core_num() * 2;
    560		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
    561		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
    562	} else {
    563		int index = cvmx_get_core_num() * 2 + 1;
    564		set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
    565		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
    566	}
    567}
    568
    569static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
    570{
    571	u64 mask;
    572	struct octeon_ciu_chip_data *cd;
    573
    574	cd = irq_data_get_irq_chip_data(data);
    575	mask = 1ull << (cd->bit);
    576
    577	if (cd->line == 0) {
    578		int index = cvmx_get_core_num() * 2;
    579		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
    580		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
    581	} else {
    582		int index = cvmx_get_core_num() * 2 + 1;
    583		clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
    584		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
    585	}
    586}
    587
    588/*
    589 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
    590 */
    591static void octeon_irq_ciu_ack(struct irq_data *data)
    592{
    593	u64 mask;
    594	struct octeon_ciu_chip_data *cd;
    595
    596	cd = irq_data_get_irq_chip_data(data);
    597	mask = 1ull << (cd->bit);
    598
    599	if (cd->line == 0) {
    600		int index = cvmx_get_core_num() * 2;
    601		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
    602	} else {
    603		cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
    604	}
    605}
    606
    607/*
    608 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
    609 * registers.
    610 */
    611static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
    612{
    613	int cpu;
    614	u64 mask;
    615	struct octeon_ciu_chip_data *cd;
    616
    617	cd = irq_data_get_irq_chip_data(data);
    618	mask = 1ull << (cd->bit);
    619
    620	if (cd->line == 0) {
    621		for_each_online_cpu(cpu) {
    622			int index = octeon_coreid_for_cpu(cpu) * 2;
    623			clear_bit(cd->bit,
    624				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
    625			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
    626		}
    627	} else {
    628		for_each_online_cpu(cpu) {
    629			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
    630			clear_bit(cd->bit,
    631				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
    632			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
    633		}
    634	}
    635}
    636
    637/*
    638 * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
    639 * registers.
    640 */
    641static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
    642{
    643	int cpu;
    644	u64 mask;
    645	struct octeon_ciu_chip_data *cd;
    646
    647	cd = irq_data_get_irq_chip_data(data);
    648	mask = 1ull << (cd->bit);
    649
    650	if (cd->line == 0) {
    651		for_each_online_cpu(cpu) {
    652			int index = octeon_coreid_for_cpu(cpu) * 2;
    653			set_bit(cd->bit,
    654				&per_cpu(octeon_irq_ciu0_en_mirror, cpu));
    655			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
    656		}
    657	} else {
    658		for_each_online_cpu(cpu) {
    659			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
    660			set_bit(cd->bit,
    661				&per_cpu(octeon_irq_ciu1_en_mirror, cpu));
    662			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
    663		}
    664	}
    665}
    666
    667static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
    668{
    669	irqd_set_trigger_type(data, t);
    670
    671	if (t & IRQ_TYPE_EDGE_BOTH)
    672		irq_set_handler_locked(data, handle_edge_irq);
    673	else
    674		irq_set_handler_locked(data, handle_level_irq);
    675
    676	return IRQ_SET_MASK_OK;
    677}
    678
    679static void octeon_irq_gpio_setup(struct irq_data *data)
    680{
    681	union cvmx_gpio_bit_cfgx cfg;
    682	struct octeon_ciu_chip_data *cd;
    683	u32 t = irqd_get_trigger_type(data);
    684
    685	cd = irq_data_get_irq_chip_data(data);
    686
    687	cfg.u64 = 0;
    688	cfg.s.int_en = 1;
    689	cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
    690	cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
    691
    692	/* 140 nS glitch filter*/
    693	cfg.s.fil_cnt = 7;
    694	cfg.s.fil_sel = 3;
    695
    696	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
    697}
    698
    699static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
    700{
    701	octeon_irq_gpio_setup(data);
    702	octeon_irq_ciu_enable_v2(data);
    703}
    704
    705static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
    706{
    707	octeon_irq_gpio_setup(data);
    708	octeon_irq_ciu_enable(data);
    709}
    710
    711static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
    712{
    713	irqd_set_trigger_type(data, t);
    714	octeon_irq_gpio_setup(data);
    715
    716	if (t & IRQ_TYPE_EDGE_BOTH)
    717		irq_set_handler_locked(data, handle_edge_irq);
    718	else
    719		irq_set_handler_locked(data, handle_level_irq);
    720
    721	return IRQ_SET_MASK_OK;
    722}
    723
    724static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
    725{
    726	struct octeon_ciu_chip_data *cd;
    727
    728	cd = irq_data_get_irq_chip_data(data);
    729	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
    730
    731	octeon_irq_ciu_disable_all_v2(data);
    732}
    733
    734static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
    735{
    736	struct octeon_ciu_chip_data *cd;
    737
    738	cd = irq_data_get_irq_chip_data(data);
    739	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
    740
    741	octeon_irq_ciu_disable_all(data);
    742}
    743
    744static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
    745{
    746	struct octeon_ciu_chip_data *cd;
    747	u64 mask;
    748
    749	cd = irq_data_get_irq_chip_data(data);
    750	mask = 1ull << (cd->gpio_line);
    751
    752	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
    753}
    754
    755#ifdef CONFIG_SMP
    756
    757static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
    758{
    759	int cpu = smp_processor_id();
    760	cpumask_t new_affinity;
    761	struct cpumask *mask = irq_data_get_affinity_mask(data);
    762
    763	if (!cpumask_test_cpu(cpu, mask))
    764		return;
    765
    766	if (cpumask_weight(mask) > 1) {
    767		/*
    768		 * It has multi CPU affinity, just remove this CPU
    769		 * from the affinity set.
    770		 */
    771		cpumask_copy(&new_affinity, mask);
    772		cpumask_clear_cpu(cpu, &new_affinity);
    773	} else {
    774		/* Otherwise, put it on lowest numbered online CPU. */
    775		cpumask_clear(&new_affinity);
    776		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
    777	}
    778	irq_set_affinity_locked(data, &new_affinity, false);
    779}
    780
    781static int octeon_irq_ciu_set_affinity(struct irq_data *data,
    782				       const struct cpumask *dest, bool force)
    783{
    784	int cpu;
    785	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
    786	unsigned long flags;
    787	struct octeon_ciu_chip_data *cd;
    788	unsigned long *pen;
    789	raw_spinlock_t *lock;
    790
    791	cd = irq_data_get_irq_chip_data(data);
    792
    793	/*
    794	 * For non-v2 CIU, we will allow only single CPU affinity.
    795	 * This removes the need to do locking in the .ack/.eoi
    796	 * functions.
    797	 */
    798	if (cpumask_weight(dest) != 1)
    799		return -EINVAL;
    800
    801	if (!enable_one)
    802		return 0;
    803
    804
    805	for_each_online_cpu(cpu) {
    806		int coreid = octeon_coreid_for_cpu(cpu);
    807
    808		lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
    809		raw_spin_lock_irqsave(lock, flags);
    810
    811		if (cd->line == 0)
    812			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
    813		else
    814			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
    815
    816		if (cpumask_test_cpu(cpu, dest) && enable_one) {
    817			enable_one = false;
    818			__set_bit(cd->bit, pen);
    819		} else {
    820			__clear_bit(cd->bit, pen);
    821		}
    822		/*
    823		 * Must be visible to octeon_irq_ip{2,3}_ciu() before
    824		 * enabling the irq.
    825		 */
    826		wmb();
    827
    828		if (cd->line == 0)
    829			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
    830		else
    831			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
    832
    833		raw_spin_unlock_irqrestore(lock, flags);
    834	}
    835	return 0;
    836}
    837
    838/*
    839 * Set affinity for the irq for chips that have the EN*_W1{S,C}
    840 * registers.
    841 */
    842static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
    843					  const struct cpumask *dest,
    844					  bool force)
    845{
    846	int cpu;
    847	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
    848	u64 mask;
    849	struct octeon_ciu_chip_data *cd;
    850
    851	if (!enable_one)
    852		return 0;
    853
    854	cd = irq_data_get_irq_chip_data(data);
    855	mask = 1ull << cd->bit;
    856
    857	if (cd->line == 0) {
    858		for_each_online_cpu(cpu) {
    859			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
    860			int index = octeon_coreid_for_cpu(cpu) * 2;
    861			if (cpumask_test_cpu(cpu, dest) && enable_one) {
    862				enable_one = false;
    863				set_bit(cd->bit, pen);
    864				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
    865			} else {
    866				clear_bit(cd->bit, pen);
    867				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
    868			}
    869		}
    870	} else {
    871		for_each_online_cpu(cpu) {
    872			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
    873			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
    874			if (cpumask_test_cpu(cpu, dest) && enable_one) {
    875				enable_one = false;
    876				set_bit(cd->bit, pen);
    877				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
    878			} else {
    879				clear_bit(cd->bit, pen);
    880				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
    881			}
    882		}
    883	}
    884	return 0;
    885}
    886
    887static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
    888					    const struct cpumask *dest,
    889					    bool force)
    890{
    891	int cpu;
    892	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
    893	u64 mask;
    894	struct octeon_ciu_chip_data *cd;
    895
    896	if (!enable_one)
    897		return 0;
    898
    899	cd = irq_data_get_irq_chip_data(data);
    900	mask = 1ull << cd->bit;
    901
    902	for_each_online_cpu(cpu) {
    903		int index = octeon_coreid_for_cpu(cpu);
    904
    905		if (cpumask_test_cpu(cpu, dest) && enable_one) {
    906			enable_one = false;
    907			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
    908		} else {
    909			cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
    910		}
    911	}
    912	return 0;
    913}
    914#endif
    915
    916static unsigned int edge_startup(struct irq_data *data)
    917{
    918	/* ack any pending edge-irq at startup, so there is
    919	 * an _edge_ to fire on when the event reappears.
    920	 */
    921	data->chip->irq_ack(data);
    922	data->chip->irq_enable(data);
    923	return 0;
    924}
    925
    926/*
    927 * Newer octeon chips have support for lockless CIU operation.
    928 */
    929static struct irq_chip octeon_irq_chip_ciu_v2 = {
    930	.name = "CIU",
    931	.irq_enable = octeon_irq_ciu_enable_v2,
    932	.irq_disable = octeon_irq_ciu_disable_all_v2,
    933	.irq_mask = octeon_irq_ciu_disable_local_v2,
    934	.irq_unmask = octeon_irq_ciu_enable_v2,
    935#ifdef CONFIG_SMP
    936	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
    937	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
    938#endif
    939};
    940
    941static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
    942	.name = "CIU",
    943	.irq_enable = octeon_irq_ciu_enable_v2,
    944	.irq_disable = octeon_irq_ciu_disable_all_v2,
    945	.irq_ack = octeon_irq_ciu_ack,
    946	.irq_mask = octeon_irq_ciu_disable_local_v2,
    947	.irq_unmask = octeon_irq_ciu_enable_v2,
    948#ifdef CONFIG_SMP
    949	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
    950	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
    951#endif
    952};
    953
    954/*
    955 * Newer octeon chips have support for lockless CIU operation.
    956 */
    957static struct irq_chip octeon_irq_chip_ciu_sum2 = {
    958	.name = "CIU",
    959	.irq_enable = octeon_irq_ciu_enable_sum2,
    960	.irq_disable = octeon_irq_ciu_disable_all_sum2,
    961	.irq_mask = octeon_irq_ciu_disable_local_sum2,
    962	.irq_unmask = octeon_irq_ciu_enable_sum2,
    963#ifdef CONFIG_SMP
    964	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
    965	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
    966#endif
    967};
    968
    969static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
    970	.name = "CIU",
    971	.irq_enable = octeon_irq_ciu_enable_sum2,
    972	.irq_disable = octeon_irq_ciu_disable_all_sum2,
    973	.irq_ack = octeon_irq_ciu_ack_sum2,
    974	.irq_mask = octeon_irq_ciu_disable_local_sum2,
    975	.irq_unmask = octeon_irq_ciu_enable_sum2,
    976#ifdef CONFIG_SMP
    977	.irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
    978	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
    979#endif
    980};
    981
    982static struct irq_chip octeon_irq_chip_ciu = {
    983	.name = "CIU",
    984	.irq_enable = octeon_irq_ciu_enable,
    985	.irq_disable = octeon_irq_ciu_disable_all,
    986	.irq_mask = octeon_irq_ciu_disable_local,
    987	.irq_unmask = octeon_irq_ciu_enable,
    988#ifdef CONFIG_SMP
    989	.irq_set_affinity = octeon_irq_ciu_set_affinity,
    990	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
    991#endif
    992};
    993
    994static struct irq_chip octeon_irq_chip_ciu_edge = {
    995	.name = "CIU",
    996	.irq_enable = octeon_irq_ciu_enable,
    997	.irq_disable = octeon_irq_ciu_disable_all,
    998	.irq_ack = octeon_irq_ciu_ack,
    999	.irq_mask = octeon_irq_ciu_disable_local,
   1000	.irq_unmask = octeon_irq_ciu_enable,
   1001#ifdef CONFIG_SMP
   1002	.irq_set_affinity = octeon_irq_ciu_set_affinity,
   1003	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
   1004#endif
   1005};
   1006
   1007/* The mbox versions don't do any affinity or round-robin. */
   1008static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
   1009	.name = "CIU-M",
   1010	.irq_enable = octeon_irq_ciu_enable_all_v2,
   1011	.irq_disable = octeon_irq_ciu_disable_all_v2,
   1012	.irq_ack = octeon_irq_ciu_disable_local_v2,
   1013	.irq_eoi = octeon_irq_ciu_enable_local_v2,
   1014
   1015	.irq_cpu_online = octeon_irq_ciu_enable_local_v2,
   1016	.irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
   1017	.flags = IRQCHIP_ONOFFLINE_ENABLED,
   1018};
   1019
   1020static struct irq_chip octeon_irq_chip_ciu_mbox = {
   1021	.name = "CIU-M",
   1022	.irq_enable = octeon_irq_ciu_enable_all,
   1023	.irq_disable = octeon_irq_ciu_disable_all,
   1024	.irq_ack = octeon_irq_ciu_disable_local,
   1025	.irq_eoi = octeon_irq_ciu_enable_local,
   1026
   1027	.irq_cpu_online = octeon_irq_ciu_enable_local,
   1028	.irq_cpu_offline = octeon_irq_ciu_disable_local,
   1029	.flags = IRQCHIP_ONOFFLINE_ENABLED,
   1030};
   1031
   1032static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
   1033	.name = "CIU-GPIO",
   1034	.irq_enable = octeon_irq_ciu_enable_gpio_v2,
   1035	.irq_disable = octeon_irq_ciu_disable_gpio_v2,
   1036	.irq_ack = octeon_irq_ciu_gpio_ack,
   1037	.irq_mask = octeon_irq_ciu_disable_local_v2,
   1038	.irq_unmask = octeon_irq_ciu_enable_v2,
   1039	.irq_set_type = octeon_irq_ciu_gpio_set_type,
   1040#ifdef CONFIG_SMP
   1041	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
   1042	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
   1043#endif
   1044	.flags = IRQCHIP_SET_TYPE_MASKED,
   1045};
   1046
   1047static struct irq_chip octeon_irq_chip_ciu_gpio = {
   1048	.name = "CIU-GPIO",
   1049	.irq_enable = octeon_irq_ciu_enable_gpio,
   1050	.irq_disable = octeon_irq_ciu_disable_gpio,
   1051	.irq_mask = octeon_irq_ciu_disable_local,
   1052	.irq_unmask = octeon_irq_ciu_enable,
   1053	.irq_ack = octeon_irq_ciu_gpio_ack,
   1054	.irq_set_type = octeon_irq_ciu_gpio_set_type,
   1055#ifdef CONFIG_SMP
   1056	.irq_set_affinity = octeon_irq_ciu_set_affinity,
   1057	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
   1058#endif
   1059	.flags = IRQCHIP_SET_TYPE_MASKED,
   1060};
   1061
   1062/*
   1063 * Watchdog interrupts are special.  They are associated with a single
   1064 * core, so we hardwire the affinity to that core.
   1065 */
   1066static void octeon_irq_ciu_wd_enable(struct irq_data *data)
   1067{
   1068	unsigned long flags;
   1069	unsigned long *pen;
   1070	int coreid = data->irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
   1071	int cpu = octeon_cpu_for_coreid(coreid);
   1072	raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
   1073
   1074	raw_spin_lock_irqsave(lock, flags);
   1075	pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
   1076	__set_bit(coreid, pen);
   1077	/*
   1078	 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
   1079	 * the irq.
   1080	 */
   1081	wmb();
   1082	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
   1083	raw_spin_unlock_irqrestore(lock, flags);
   1084}
   1085
   1086/*
   1087 * Watchdog interrupts are special.  They are associated with a single
   1088 * core, so we hardwire the affinity to that core.
   1089 */
   1090static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
   1091{
   1092	int coreid = data->irq - OCTEON_IRQ_WDOG0;
   1093	int cpu = octeon_cpu_for_coreid(coreid);
   1094
   1095	set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
   1096	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
   1097}
   1098
   1099
   1100static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
   1101	.name = "CIU-W",
   1102	.irq_enable = octeon_irq_ciu1_wd_enable_v2,
   1103	.irq_disable = octeon_irq_ciu_disable_all_v2,
   1104	.irq_mask = octeon_irq_ciu_disable_local_v2,
   1105	.irq_unmask = octeon_irq_ciu_enable_local_v2,
   1106};
   1107
   1108static struct irq_chip octeon_irq_chip_ciu_wd = {
   1109	.name = "CIU-W",
   1110	.irq_enable = octeon_irq_ciu_wd_enable,
   1111	.irq_disable = octeon_irq_ciu_disable_all,
   1112	.irq_mask = octeon_irq_ciu_disable_local,
   1113	.irq_unmask = octeon_irq_ciu_enable_local,
   1114};
   1115
   1116static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
   1117{
   1118	bool edge = false;
   1119
   1120	if (line == 0)
   1121		switch (bit) {
   1122		case 48 ... 49: /* GMX DRP */
   1123		case 50: /* IPD_DRP */
   1124		case 52 ... 55: /* Timers */
   1125		case 58: /* MPI */
   1126			edge = true;
   1127			break;
   1128		default:
   1129			break;
   1130		}
   1131	else /* line == 1 */
   1132		switch (bit) {
   1133		case 47: /* PTP */
   1134			edge = true;
   1135			break;
   1136		default:
   1137			break;
   1138		}
   1139	return edge;
   1140}
   1141
   1142struct octeon_irq_gpio_domain_data {
   1143	unsigned int base_hwirq;
   1144};
   1145
   1146static int octeon_irq_gpio_xlat(struct irq_domain *d,
   1147				struct device_node *node,
   1148				const u32 *intspec,
   1149				unsigned int intsize,
   1150				unsigned long *out_hwirq,
   1151				unsigned int *out_type)
   1152{
   1153	unsigned int type;
   1154	unsigned int pin;
   1155	unsigned int trigger;
   1156
   1157	if (irq_domain_get_of_node(d) != node)
   1158		return -EINVAL;
   1159
   1160	if (intsize < 2)
   1161		return -EINVAL;
   1162
   1163	pin = intspec[0];
   1164	if (pin >= 16)
   1165		return -EINVAL;
   1166
   1167	trigger = intspec[1];
   1168
   1169	switch (trigger) {
   1170	case 1:
   1171		type = IRQ_TYPE_EDGE_RISING;
   1172		break;
   1173	case 2:
   1174		type = IRQ_TYPE_EDGE_FALLING;
   1175		break;
   1176	case 4:
   1177		type = IRQ_TYPE_LEVEL_HIGH;
   1178		break;
   1179	case 8:
   1180		type = IRQ_TYPE_LEVEL_LOW;
   1181		break;
   1182	default:
   1183		pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
   1184		       node,
   1185		       trigger);
   1186		type = IRQ_TYPE_LEVEL_LOW;
   1187		break;
   1188	}
   1189	*out_type = type;
   1190	*out_hwirq = pin;
   1191
   1192	return 0;
   1193}
   1194
   1195static int octeon_irq_ciu_xlat(struct irq_domain *d,
   1196			       struct device_node *node,
   1197			       const u32 *intspec,
   1198			       unsigned int intsize,
   1199			       unsigned long *out_hwirq,
   1200			       unsigned int *out_type)
   1201{
   1202	unsigned int ciu, bit;
   1203	struct octeon_irq_ciu_domain_data *dd = d->host_data;
   1204
   1205	ciu = intspec[0];
   1206	bit = intspec[1];
   1207
   1208	if (ciu >= dd->num_sum || bit > 63)
   1209		return -EINVAL;
   1210
   1211	*out_hwirq = (ciu << 6) | bit;
   1212	*out_type = 0;
   1213
   1214	return 0;
   1215}
   1216
   1217static struct irq_chip *octeon_irq_ciu_chip;
   1218static struct irq_chip *octeon_irq_ciu_chip_edge;
   1219static struct irq_chip *octeon_irq_gpio_chip;
   1220
   1221static int octeon_irq_ciu_map(struct irq_domain *d,
   1222			      unsigned int virq, irq_hw_number_t hw)
   1223{
   1224	int rv;
   1225	unsigned int line = hw >> 6;
   1226	unsigned int bit = hw & 63;
   1227	struct octeon_irq_ciu_domain_data *dd = d->host_data;
   1228
   1229	if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
   1230		return -EINVAL;
   1231
   1232	if (line == 2) {
   1233		if (octeon_irq_ciu_is_edge(line, bit))
   1234			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
   1235				&octeon_irq_chip_ciu_sum2_edge,
   1236				handle_edge_irq);
   1237		else
   1238			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
   1239				&octeon_irq_chip_ciu_sum2,
   1240				handle_level_irq);
   1241	} else {
   1242		if (octeon_irq_ciu_is_edge(line, bit))
   1243			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
   1244				octeon_irq_ciu_chip_edge,
   1245				handle_edge_irq);
   1246		else
   1247			rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
   1248				octeon_irq_ciu_chip,
   1249				handle_level_irq);
   1250	}
   1251	return rv;
   1252}
   1253
   1254static int octeon_irq_gpio_map(struct irq_domain *d,
   1255			       unsigned int virq, irq_hw_number_t hw)
   1256{
   1257	struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
   1258	unsigned int line, bit;
   1259	int r;
   1260
   1261	line = (hw + gpiod->base_hwirq) >> 6;
   1262	bit = (hw + gpiod->base_hwirq) & 63;
   1263	if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
   1264		octeon_irq_ciu_to_irq[line][bit] != 0)
   1265		return -EINVAL;
   1266
   1267	/*
   1268	 * Default to handle_level_irq. If the DT contains a different
   1269	 * trigger type, it will call the irq_set_type callback and
   1270	 * the handler gets updated.
   1271	 */
   1272	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
   1273				       octeon_irq_gpio_chip, handle_level_irq);
   1274	return r;
   1275}
   1276
   1277static const struct irq_domain_ops octeon_irq_domain_ciu_ops = {
   1278	.map = octeon_irq_ciu_map,
   1279	.unmap = octeon_irq_free_cd,
   1280	.xlate = octeon_irq_ciu_xlat,
   1281};
   1282
   1283static const struct irq_domain_ops octeon_irq_domain_gpio_ops = {
   1284	.map = octeon_irq_gpio_map,
   1285	.unmap = octeon_irq_free_cd,
   1286	.xlate = octeon_irq_gpio_xlat,
   1287};
   1288
   1289static void octeon_irq_ip2_ciu(void)
   1290{
   1291	const unsigned long core_id = cvmx_get_core_num();
   1292	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
   1293
   1294	ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
   1295	if (likely(ciu_sum)) {
   1296		int bit = fls64(ciu_sum) - 1;
   1297		int irq = octeon_irq_ciu_to_irq[0][bit];
   1298		if (likely(irq))
   1299			do_IRQ(irq);
   1300		else
   1301			spurious_interrupt();
   1302	} else {
   1303		spurious_interrupt();
   1304	}
   1305}
   1306
   1307static void octeon_irq_ip3_ciu(void)
   1308{
   1309	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
   1310
   1311	ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
   1312	if (likely(ciu_sum)) {
   1313		int bit = fls64(ciu_sum) - 1;
   1314		int irq = octeon_irq_ciu_to_irq[1][bit];
   1315		if (likely(irq))
   1316			do_IRQ(irq);
   1317		else
   1318			spurious_interrupt();
   1319	} else {
   1320		spurious_interrupt();
   1321	}
   1322}
   1323
   1324static void octeon_irq_ip4_ciu(void)
   1325{
   1326	int coreid = cvmx_get_core_num();
   1327	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
   1328	u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
   1329
   1330	ciu_sum &= ciu_en;
   1331	if (likely(ciu_sum)) {
   1332		int bit = fls64(ciu_sum) - 1;
   1333		int irq = octeon_irq_ciu_to_irq[2][bit];
   1334
   1335		if (likely(irq))
   1336			do_IRQ(irq);
   1337		else
   1338			spurious_interrupt();
   1339	} else {
   1340		spurious_interrupt();
   1341	}
   1342}
   1343
   1344static bool octeon_irq_use_ip4;
   1345
   1346static void octeon_irq_local_enable_ip4(void *arg)
   1347{
   1348	set_c0_status(STATUSF_IP4);
   1349}
   1350
   1351static void octeon_irq_ip4_mask(void)
   1352{
   1353	clear_c0_status(STATUSF_IP4);
   1354	spurious_interrupt();
   1355}
   1356
   1357static void (*octeon_irq_ip2)(void);
   1358static void (*octeon_irq_ip3)(void);
   1359static void (*octeon_irq_ip4)(void);
   1360
   1361void (*octeon_irq_setup_secondary)(void);
   1362
   1363void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
   1364{
   1365	octeon_irq_ip4 = h;
   1366	octeon_irq_use_ip4 = true;
   1367	on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
   1368}
   1369
   1370static void octeon_irq_percpu_enable(void)
   1371{
   1372	irq_cpu_online();
   1373}
   1374
   1375static void octeon_irq_init_ciu_percpu(void)
   1376{
   1377	int coreid = cvmx_get_core_num();
   1378
   1379
   1380	__this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
   1381	__this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
   1382	wmb();
   1383	raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
   1384	/*
   1385	 * Disable All CIU Interrupts. The ones we need will be
   1386	 * enabled later.  Read the SUM register so we know the write
   1387	 * completed.
   1388	 */
   1389	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
   1390	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
   1391	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
   1392	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
   1393	cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
   1394}
   1395
   1396static void octeon_irq_init_ciu2_percpu(void)
   1397{
   1398	u64 regx, ipx;
   1399	int coreid = cvmx_get_core_num();
   1400	u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
   1401
   1402	/*
   1403	 * Disable All CIU2 Interrupts. The ones we need will be
   1404	 * enabled later.  Read the SUM register so we know the write
   1405	 * completed.
   1406	 *
   1407	 * There are 9 registers and 3 IPX levels with strides 0x1000
   1408	 * and 0x200 respectively.  Use loops to clear them.
   1409	 */
   1410	for (regx = 0; regx <= 0x8000; regx += 0x1000) {
   1411		for (ipx = 0; ipx <= 0x400; ipx += 0x200)
   1412			cvmx_write_csr(base + regx + ipx, 0);
   1413	}
   1414
   1415	cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
   1416}
   1417
   1418static void octeon_irq_setup_secondary_ciu(void)
   1419{
   1420	octeon_irq_init_ciu_percpu();
   1421	octeon_irq_percpu_enable();
   1422
   1423	/* Enable the CIU lines */
   1424	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
   1425	if (octeon_irq_use_ip4)
   1426		set_c0_status(STATUSF_IP4);
   1427	else
   1428		clear_c0_status(STATUSF_IP4);
   1429}
   1430
   1431static void octeon_irq_setup_secondary_ciu2(void)
   1432{
   1433	octeon_irq_init_ciu2_percpu();
   1434	octeon_irq_percpu_enable();
   1435
   1436	/* Enable the CIU lines */
   1437	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
   1438	if (octeon_irq_use_ip4)
   1439		set_c0_status(STATUSF_IP4);
   1440	else
   1441		clear_c0_status(STATUSF_IP4);
   1442}
   1443
   1444static int __init octeon_irq_init_ciu(
   1445	struct device_node *ciu_node, struct device_node *parent)
   1446{
   1447	int i, r;
   1448	struct irq_chip *chip;
   1449	struct irq_chip *chip_edge;
   1450	struct irq_chip *chip_mbox;
   1451	struct irq_chip *chip_wd;
   1452	struct irq_domain *ciu_domain = NULL;
   1453	struct octeon_irq_ciu_domain_data *dd;
   1454
   1455	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
   1456	if (!dd)
   1457		return -ENOMEM;
   1458
   1459	octeon_irq_init_ciu_percpu();
   1460	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
   1461
   1462	octeon_irq_ip2 = octeon_irq_ip2_ciu;
   1463	octeon_irq_ip3 = octeon_irq_ip3_ciu;
   1464	if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
   1465		&& !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
   1466		octeon_irq_ip4 =  octeon_irq_ip4_ciu;
   1467		dd->num_sum = 3;
   1468		octeon_irq_use_ip4 = true;
   1469	} else {
   1470		octeon_irq_ip4 = octeon_irq_ip4_mask;
   1471		dd->num_sum = 2;
   1472		octeon_irq_use_ip4 = false;
   1473	}
   1474	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
   1475	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
   1476	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
   1477	    OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
   1478		chip = &octeon_irq_chip_ciu_v2;
   1479		chip_edge = &octeon_irq_chip_ciu_v2_edge;
   1480		chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
   1481		chip_wd = &octeon_irq_chip_ciu_wd_v2;
   1482		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
   1483	} else {
   1484		chip = &octeon_irq_chip_ciu;
   1485		chip_edge = &octeon_irq_chip_ciu_edge;
   1486		chip_mbox = &octeon_irq_chip_ciu_mbox;
   1487		chip_wd = &octeon_irq_chip_ciu_wd;
   1488		octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
   1489	}
   1490	octeon_irq_ciu_chip = chip;
   1491	octeon_irq_ciu_chip_edge = chip_edge;
   1492
   1493	/* Mips internal */
   1494	octeon_irq_init_core();
   1495
   1496	ciu_domain = irq_domain_add_tree(
   1497		ciu_node, &octeon_irq_domain_ciu_ops, dd);
   1498	irq_set_default_host(ciu_domain);
   1499
   1500	/* CIU_0 */
   1501	for (i = 0; i < 16; i++) {
   1502		r = octeon_irq_force_ciu_mapping(
   1503			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
   1504		if (r)
   1505			goto err;
   1506	}
   1507
   1508	r = irq_alloc_desc_at(OCTEON_IRQ_MBOX0, -1);
   1509	if (r < 0) {
   1510		pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX0");
   1511		goto err;
   1512	}
   1513	r = octeon_irq_set_ciu_mapping(
   1514		OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
   1515	if (r)
   1516		goto err;
   1517	r = irq_alloc_desc_at(OCTEON_IRQ_MBOX1, -1);
   1518	if (r < 0) {
   1519		pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX1");
   1520		goto err;
   1521	}
   1522	r = octeon_irq_set_ciu_mapping(
   1523		OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
   1524	if (r)
   1525		goto err;
   1526
   1527	for (i = 0; i < 4; i++) {
   1528		r = octeon_irq_force_ciu_mapping(
   1529			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
   1530		if (r)
   1531			goto err;
   1532	}
   1533	for (i = 0; i < 4; i++) {
   1534		r = octeon_irq_force_ciu_mapping(
   1535			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
   1536		if (r)
   1537			goto err;
   1538	}
   1539
   1540	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
   1541	if (r)
   1542		goto err;
   1543
   1544	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
   1545	if (r)
   1546		goto err;
   1547
   1548	for (i = 0; i < 4; i++) {
   1549		r = octeon_irq_force_ciu_mapping(
   1550			ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
   1551		if (r)
   1552			goto err;
   1553	}
   1554
   1555	r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
   1556	if (r)
   1557		goto err;
   1558
   1559	r = irq_alloc_descs(OCTEON_IRQ_WDOG0, OCTEON_IRQ_WDOG0, 16, -1);
   1560	if (r < 0) {
   1561		pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_WDOGx");
   1562		goto err;
   1563	}
   1564	/* CIU_1 */
   1565	for (i = 0; i < 16; i++) {
   1566		r = octeon_irq_set_ciu_mapping(
   1567			i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
   1568			handle_level_irq);
   1569		if (r)
   1570			goto err;
   1571	}
   1572
   1573	/* Enable the CIU lines */
   1574	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
   1575	if (octeon_irq_use_ip4)
   1576		set_c0_status(STATUSF_IP4);
   1577	else
   1578		clear_c0_status(STATUSF_IP4);
   1579
   1580	return 0;
   1581err:
   1582	return r;
   1583}
   1584
   1585static int __init octeon_irq_init_gpio(
   1586	struct device_node *gpio_node, struct device_node *parent)
   1587{
   1588	struct octeon_irq_gpio_domain_data *gpiod;
   1589	u32 interrupt_cells;
   1590	unsigned int base_hwirq;
   1591	int r;
   1592
   1593	r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
   1594	if (r)
   1595		return r;
   1596
   1597	if (interrupt_cells == 1) {
   1598		u32 v;
   1599
   1600		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
   1601		if (r) {
   1602			pr_warn("No \"interrupts\" property.\n");
   1603			return r;
   1604		}
   1605		base_hwirq = v;
   1606	} else if (interrupt_cells == 2) {
   1607		u32 v0, v1;
   1608
   1609		r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
   1610		if (r) {
   1611			pr_warn("No \"interrupts\" property.\n");
   1612			return r;
   1613		}
   1614		r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
   1615		if (r) {
   1616			pr_warn("No \"interrupts\" property.\n");
   1617			return r;
   1618		}
   1619		base_hwirq = (v0 << 6) | v1;
   1620	} else {
   1621		pr_warn("Bad \"#interrupt-cells\" property: %u\n",
   1622			interrupt_cells);
   1623		return -EINVAL;
   1624	}
   1625
   1626	gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
   1627	if (gpiod) {
   1628		/* gpio domain host_data is the base hwirq number. */
   1629		gpiod->base_hwirq = base_hwirq;
   1630		irq_domain_add_linear(
   1631			gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
   1632	} else {
   1633		pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
   1634		return -ENOMEM;
   1635	}
   1636
   1637	/*
   1638	 * Clear the OF_POPULATED flag that was set by of_irq_init()
   1639	 * so that all GPIO devices will be probed.
   1640	 */
   1641	of_node_clear_flag(gpio_node, OF_POPULATED);
   1642
   1643	return 0;
   1644}
   1645/*
   1646 * Watchdog interrupts are special.  They are associated with a single
   1647 * core, so we hardwire the affinity to that core.
   1648 */
   1649static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
   1650{
   1651	u64 mask;
   1652	u64 en_addr;
   1653	int coreid = data->irq - OCTEON_IRQ_WDOG0;
   1654	struct octeon_ciu_chip_data *cd;
   1655
   1656	cd = irq_data_get_irq_chip_data(data);
   1657	mask = 1ull << (cd->bit);
   1658
   1659	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
   1660		(0x1000ull * cd->line);
   1661	cvmx_write_csr(en_addr, mask);
   1662
   1663}
   1664
   1665static void octeon_irq_ciu2_enable(struct irq_data *data)
   1666{
   1667	u64 mask;
   1668	u64 en_addr;
   1669	int cpu = next_cpu_for_irq(data);
   1670	int coreid = octeon_coreid_for_cpu(cpu);
   1671	struct octeon_ciu_chip_data *cd;
   1672
   1673	cd = irq_data_get_irq_chip_data(data);
   1674	mask = 1ull << (cd->bit);
   1675
   1676	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
   1677		(0x1000ull * cd->line);
   1678	cvmx_write_csr(en_addr, mask);
   1679}
   1680
   1681static void octeon_irq_ciu2_enable_local(struct irq_data *data)
   1682{
   1683	u64 mask;
   1684	u64 en_addr;
   1685	int coreid = cvmx_get_core_num();
   1686	struct octeon_ciu_chip_data *cd;
   1687
   1688	cd = irq_data_get_irq_chip_data(data);
   1689	mask = 1ull << (cd->bit);
   1690
   1691	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
   1692		(0x1000ull * cd->line);
   1693	cvmx_write_csr(en_addr, mask);
   1694
   1695}
   1696
   1697static void octeon_irq_ciu2_disable_local(struct irq_data *data)
   1698{
   1699	u64 mask;
   1700	u64 en_addr;
   1701	int coreid = cvmx_get_core_num();
   1702	struct octeon_ciu_chip_data *cd;
   1703
   1704	cd = irq_data_get_irq_chip_data(data);
   1705	mask = 1ull << (cd->bit);
   1706
   1707	en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
   1708		(0x1000ull * cd->line);
   1709	cvmx_write_csr(en_addr, mask);
   1710
   1711}
   1712
   1713static void octeon_irq_ciu2_ack(struct irq_data *data)
   1714{
   1715	u64 mask;
   1716	u64 en_addr;
   1717	int coreid = cvmx_get_core_num();
   1718	struct octeon_ciu_chip_data *cd;
   1719
   1720	cd = irq_data_get_irq_chip_data(data);
   1721	mask = 1ull << (cd->bit);
   1722
   1723	en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
   1724	cvmx_write_csr(en_addr, mask);
   1725
   1726}
   1727
   1728static void octeon_irq_ciu2_disable_all(struct irq_data *data)
   1729{
   1730	int cpu;
   1731	u64 mask;
   1732	struct octeon_ciu_chip_data *cd;
   1733
   1734	cd = irq_data_get_irq_chip_data(data);
   1735	mask = 1ull << (cd->bit);
   1736
   1737	for_each_online_cpu(cpu) {
   1738		u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
   1739			octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
   1740		cvmx_write_csr(en_addr, mask);
   1741	}
   1742}
   1743
   1744static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
   1745{
   1746	int cpu;
   1747	u64 mask;
   1748
   1749	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
   1750
   1751	for_each_online_cpu(cpu) {
   1752		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
   1753			octeon_coreid_for_cpu(cpu));
   1754		cvmx_write_csr(en_addr, mask);
   1755	}
   1756}
   1757
   1758static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
   1759{
   1760	int cpu;
   1761	u64 mask;
   1762
   1763	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
   1764
   1765	for_each_online_cpu(cpu) {
   1766		u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
   1767			octeon_coreid_for_cpu(cpu));
   1768		cvmx_write_csr(en_addr, mask);
   1769	}
   1770}
   1771
   1772static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
   1773{
   1774	u64 mask;
   1775	u64 en_addr;
   1776	int coreid = cvmx_get_core_num();
   1777
   1778	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
   1779	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
   1780	cvmx_write_csr(en_addr, mask);
   1781}
   1782
   1783static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
   1784{
   1785	u64 mask;
   1786	u64 en_addr;
   1787	int coreid = cvmx_get_core_num();
   1788
   1789	mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
   1790	en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
   1791	cvmx_write_csr(en_addr, mask);
   1792}
   1793
   1794#ifdef CONFIG_SMP
   1795static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
   1796					const struct cpumask *dest, bool force)
   1797{
   1798	int cpu;
   1799	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
   1800	u64 mask;
   1801	struct octeon_ciu_chip_data *cd;
   1802
   1803	if (!enable_one)
   1804		return 0;
   1805
   1806	cd = irq_data_get_irq_chip_data(data);
   1807	mask = 1ull << cd->bit;
   1808
   1809	for_each_online_cpu(cpu) {
   1810		u64 en_addr;
   1811		if (cpumask_test_cpu(cpu, dest) && enable_one) {
   1812			enable_one = false;
   1813			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
   1814				octeon_coreid_for_cpu(cpu)) +
   1815				(0x1000ull * cd->line);
   1816		} else {
   1817			en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
   1818				octeon_coreid_for_cpu(cpu)) +
   1819				(0x1000ull * cd->line);
   1820		}
   1821		cvmx_write_csr(en_addr, mask);
   1822	}
   1823
   1824	return 0;
   1825}
   1826#endif
   1827
   1828static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
   1829{
   1830	octeon_irq_gpio_setup(data);
   1831	octeon_irq_ciu2_enable(data);
   1832}
   1833
   1834static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
   1835{
   1836	struct octeon_ciu_chip_data *cd;
   1837
   1838	cd = irq_data_get_irq_chip_data(data);
   1839
   1840	cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
   1841
   1842	octeon_irq_ciu2_disable_all(data);
   1843}
   1844
   1845static struct irq_chip octeon_irq_chip_ciu2 = {
   1846	.name = "CIU2-E",
   1847	.irq_enable = octeon_irq_ciu2_enable,
   1848	.irq_disable = octeon_irq_ciu2_disable_all,
   1849	.irq_mask = octeon_irq_ciu2_disable_local,
   1850	.irq_unmask = octeon_irq_ciu2_enable,
   1851#ifdef CONFIG_SMP
   1852	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
   1853	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
   1854#endif
   1855};
   1856
   1857static struct irq_chip octeon_irq_chip_ciu2_edge = {
   1858	.name = "CIU2-E",
   1859	.irq_enable = octeon_irq_ciu2_enable,
   1860	.irq_disable = octeon_irq_ciu2_disable_all,
   1861	.irq_ack = octeon_irq_ciu2_ack,
   1862	.irq_mask = octeon_irq_ciu2_disable_local,
   1863	.irq_unmask = octeon_irq_ciu2_enable,
   1864#ifdef CONFIG_SMP
   1865	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
   1866	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
   1867#endif
   1868};
   1869
   1870static struct irq_chip octeon_irq_chip_ciu2_mbox = {
   1871	.name = "CIU2-M",
   1872	.irq_enable = octeon_irq_ciu2_mbox_enable_all,
   1873	.irq_disable = octeon_irq_ciu2_mbox_disable_all,
   1874	.irq_ack = octeon_irq_ciu2_mbox_disable_local,
   1875	.irq_eoi = octeon_irq_ciu2_mbox_enable_local,
   1876
   1877	.irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
   1878	.irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
   1879	.flags = IRQCHIP_ONOFFLINE_ENABLED,
   1880};
   1881
   1882static struct irq_chip octeon_irq_chip_ciu2_wd = {
   1883	.name = "CIU2-W",
   1884	.irq_enable = octeon_irq_ciu2_wd_enable,
   1885	.irq_disable = octeon_irq_ciu2_disable_all,
   1886	.irq_mask = octeon_irq_ciu2_disable_local,
   1887	.irq_unmask = octeon_irq_ciu2_enable_local,
   1888};
   1889
   1890static struct irq_chip octeon_irq_chip_ciu2_gpio = {
   1891	.name = "CIU-GPIO",
   1892	.irq_enable = octeon_irq_ciu2_enable_gpio,
   1893	.irq_disable = octeon_irq_ciu2_disable_gpio,
   1894	.irq_ack = octeon_irq_ciu_gpio_ack,
   1895	.irq_mask = octeon_irq_ciu2_disable_local,
   1896	.irq_unmask = octeon_irq_ciu2_enable,
   1897	.irq_set_type = octeon_irq_ciu_gpio_set_type,
   1898#ifdef CONFIG_SMP
   1899	.irq_set_affinity = octeon_irq_ciu2_set_affinity,
   1900	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
   1901#endif
   1902	.flags = IRQCHIP_SET_TYPE_MASKED,
   1903};
   1904
   1905static int octeon_irq_ciu2_xlat(struct irq_domain *d,
   1906				struct device_node *node,
   1907				const u32 *intspec,
   1908				unsigned int intsize,
   1909				unsigned long *out_hwirq,
   1910				unsigned int *out_type)
   1911{
   1912	unsigned int ciu, bit;
   1913
   1914	ciu = intspec[0];
   1915	bit = intspec[1];
   1916
   1917	*out_hwirq = (ciu << 6) | bit;
   1918	*out_type = 0;
   1919
   1920	return 0;
   1921}
   1922
   1923static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
   1924{
   1925	bool edge = false;
   1926
   1927	if (line == 3) /* MIO */
   1928		switch (bit) {
   1929		case 2:	 /* IPD_DRP */
   1930		case 8 ... 11: /* Timers */
   1931		case 48: /* PTP */
   1932			edge = true;
   1933			break;
   1934		default:
   1935			break;
   1936		}
   1937	else if (line == 6) /* PKT */
   1938		switch (bit) {
   1939		case 52 ... 53: /* ILK_DRP */
   1940		case 8 ... 12:	/* GMX_DRP */
   1941			edge = true;
   1942			break;
   1943		default:
   1944			break;
   1945		}
   1946	return edge;
   1947}
   1948
   1949static int octeon_irq_ciu2_map(struct irq_domain *d,
   1950			       unsigned int virq, irq_hw_number_t hw)
   1951{
   1952	unsigned int line = hw >> 6;
   1953	unsigned int bit = hw & 63;
   1954
   1955	/*
   1956	 * Don't map irq if it is reserved for GPIO.
   1957	 * (Line 7 are the GPIO lines.)
   1958	 */
   1959	if (line == 7)
   1960		return 0;
   1961
   1962	if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
   1963		return -EINVAL;
   1964
   1965	if (octeon_irq_ciu2_is_edge(line, bit))
   1966		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
   1967					   &octeon_irq_chip_ciu2_edge,
   1968					   handle_edge_irq);
   1969	else
   1970		octeon_irq_set_ciu_mapping(virq, line, bit, 0,
   1971					   &octeon_irq_chip_ciu2,
   1972					   handle_level_irq);
   1973
   1974	return 0;
   1975}
   1976
   1977static const struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
   1978	.map = octeon_irq_ciu2_map,
   1979	.unmap = octeon_irq_free_cd,
   1980	.xlate = octeon_irq_ciu2_xlat,
   1981};
   1982
   1983static void octeon_irq_ciu2(void)
   1984{
   1985	int line;
   1986	int bit;
   1987	int irq;
   1988	u64 src_reg, src, sum;
   1989	const unsigned long core_id = cvmx_get_core_num();
   1990
   1991	sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
   1992
   1993	if (unlikely(!sum))
   1994		goto spurious;
   1995
   1996	line = fls64(sum) - 1;
   1997	src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
   1998	src = cvmx_read_csr(src_reg);
   1999
   2000	if (unlikely(!src))
   2001		goto spurious;
   2002
   2003	bit = fls64(src) - 1;
   2004	irq = octeon_irq_ciu_to_irq[line][bit];
   2005	if (unlikely(!irq))
   2006		goto spurious;
   2007
   2008	do_IRQ(irq);
   2009	goto out;
   2010
   2011spurious:
   2012	spurious_interrupt();
   2013out:
   2014	/* CN68XX pass 1.x has an errata that accessing the ACK registers
   2015		can stop interrupts from propagating */
   2016	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
   2017		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
   2018	else
   2019		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
   2020	return;
   2021}
   2022
   2023static void octeon_irq_ciu2_mbox(void)
   2024{
   2025	int line;
   2026
   2027	const unsigned long core_id = cvmx_get_core_num();
   2028	u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
   2029
   2030	if (unlikely(!sum))
   2031		goto spurious;
   2032
   2033	line = fls64(sum) - 1;
   2034
   2035	do_IRQ(OCTEON_IRQ_MBOX0 + line);
   2036	goto out;
   2037
   2038spurious:
   2039	spurious_interrupt();
   2040out:
   2041	/* CN68XX pass 1.x has an errata that accessing the ACK registers
   2042		can stop interrupts from propagating */
   2043	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
   2044		cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
   2045	else
   2046		cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
   2047	return;
   2048}
   2049
   2050static int __init octeon_irq_init_ciu2(
   2051	struct device_node *ciu_node, struct device_node *parent)
   2052{
   2053	unsigned int i, r;
   2054	struct irq_domain *ciu_domain = NULL;
   2055
   2056	octeon_irq_init_ciu2_percpu();
   2057	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
   2058
   2059	octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
   2060	octeon_irq_ip2 = octeon_irq_ciu2;
   2061	octeon_irq_ip3 = octeon_irq_ciu2_mbox;
   2062	octeon_irq_ip4 = octeon_irq_ip4_mask;
   2063
   2064	/* Mips internal */
   2065	octeon_irq_init_core();
   2066
   2067	ciu_domain = irq_domain_add_tree(
   2068		ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
   2069	irq_set_default_host(ciu_domain);
   2070
   2071	/* CUI2 */
   2072	for (i = 0; i < 64; i++) {
   2073		r = octeon_irq_force_ciu_mapping(
   2074			ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
   2075		if (r)
   2076			goto err;
   2077	}
   2078
   2079	for (i = 0; i < 32; i++) {
   2080		r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
   2081			&octeon_irq_chip_ciu2_wd, handle_level_irq);
   2082		if (r)
   2083			goto err;
   2084	}
   2085
   2086	for (i = 0; i < 4; i++) {
   2087		r = octeon_irq_force_ciu_mapping(
   2088			ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
   2089		if (r)
   2090			goto err;
   2091	}
   2092
   2093	for (i = 0; i < 4; i++) {
   2094		r = octeon_irq_force_ciu_mapping(
   2095			ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
   2096		if (r)
   2097			goto err;
   2098	}
   2099
   2100	for (i = 0; i < 4; i++) {
   2101		r = octeon_irq_force_ciu_mapping(
   2102			ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
   2103		if (r)
   2104			goto err;
   2105	}
   2106
   2107	irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
   2108	irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
   2109	irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
   2110	irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
   2111
   2112	/* Enable the CIU lines */
   2113	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
   2114	clear_c0_status(STATUSF_IP4);
   2115	return 0;
   2116err:
   2117	return r;
   2118}
   2119
   2120struct octeon_irq_cib_host_data {
   2121	raw_spinlock_t lock;
   2122	u64 raw_reg;
   2123	u64 en_reg;
   2124	int max_bits;
   2125};
   2126
   2127struct octeon_irq_cib_chip_data {
   2128	struct octeon_irq_cib_host_data *host_data;
   2129	int bit;
   2130};
   2131
   2132static void octeon_irq_cib_enable(struct irq_data *data)
   2133{
   2134	unsigned long flags;
   2135	u64 en;
   2136	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
   2137	struct octeon_irq_cib_host_data *host_data = cd->host_data;
   2138
   2139	raw_spin_lock_irqsave(&host_data->lock, flags);
   2140	en = cvmx_read_csr(host_data->en_reg);
   2141	en |= 1ull << cd->bit;
   2142	cvmx_write_csr(host_data->en_reg, en);
   2143	raw_spin_unlock_irqrestore(&host_data->lock, flags);
   2144}
   2145
   2146static void octeon_irq_cib_disable(struct irq_data *data)
   2147{
   2148	unsigned long flags;
   2149	u64 en;
   2150	struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
   2151	struct octeon_irq_cib_host_data *host_data = cd->host_data;
   2152
   2153	raw_spin_lock_irqsave(&host_data->lock, flags);
   2154	en = cvmx_read_csr(host_data->en_reg);
   2155	en &= ~(1ull << cd->bit);
   2156	cvmx_write_csr(host_data->en_reg, en);
   2157	raw_spin_unlock_irqrestore(&host_data->lock, flags);
   2158}
   2159
   2160static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
   2161{
   2162	irqd_set_trigger_type(data, t);
   2163	return IRQ_SET_MASK_OK;
   2164}
   2165
   2166static struct irq_chip octeon_irq_chip_cib = {
   2167	.name = "CIB",
   2168	.irq_enable = octeon_irq_cib_enable,
   2169	.irq_disable = octeon_irq_cib_disable,
   2170	.irq_mask = octeon_irq_cib_disable,
   2171	.irq_unmask = octeon_irq_cib_enable,
   2172	.irq_set_type = octeon_irq_cib_set_type,
   2173};
   2174
   2175static int octeon_irq_cib_xlat(struct irq_domain *d,
   2176				   struct device_node *node,
   2177				   const u32 *intspec,
   2178				   unsigned int intsize,
   2179				   unsigned long *out_hwirq,
   2180				   unsigned int *out_type)
   2181{
   2182	unsigned int type = 0;
   2183
   2184	if (intsize == 2)
   2185		type = intspec[1];
   2186
   2187	switch (type) {
   2188	case 0: /* unofficial value, but we might as well let it work. */
   2189	case 4: /* official value for level triggering. */
   2190		*out_type = IRQ_TYPE_LEVEL_HIGH;
   2191		break;
   2192	case 1: /* official value for edge triggering. */
   2193		*out_type = IRQ_TYPE_EDGE_RISING;
   2194		break;
   2195	default: /* Nothing else is acceptable. */
   2196		return -EINVAL;
   2197	}
   2198
   2199	*out_hwirq = intspec[0];
   2200
   2201	return 0;
   2202}
   2203
   2204static int octeon_irq_cib_map(struct irq_domain *d,
   2205			      unsigned int virq, irq_hw_number_t hw)
   2206{
   2207	struct octeon_irq_cib_host_data *host_data = d->host_data;
   2208	struct octeon_irq_cib_chip_data *cd;
   2209
   2210	if (hw >= host_data->max_bits) {
   2211		pr_err("ERROR: %s mapping %u is too big!\n",
   2212		       irq_domain_get_of_node(d)->name, (unsigned)hw);
   2213		return -EINVAL;
   2214	}
   2215
   2216	cd = kzalloc(sizeof(*cd), GFP_KERNEL);
   2217	if (!cd)
   2218		return -ENOMEM;
   2219
   2220	cd->host_data = host_data;
   2221	cd->bit = hw;
   2222
   2223	irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
   2224				 handle_simple_irq);
   2225	irq_set_chip_data(virq, cd);
   2226	return 0;
   2227}
   2228
   2229static const struct irq_domain_ops octeon_irq_domain_cib_ops = {
   2230	.map = octeon_irq_cib_map,
   2231	.unmap = octeon_irq_free_cd,
   2232	.xlate = octeon_irq_cib_xlat,
   2233};
   2234
   2235/* Chain to real handler. */
   2236static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
   2237{
   2238	u64 en;
   2239	u64 raw;
   2240	u64 bits;
   2241	int i;
   2242	int irq;
   2243	struct irq_domain *cib_domain = data;
   2244	struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
   2245
   2246	en = cvmx_read_csr(host_data->en_reg);
   2247	raw = cvmx_read_csr(host_data->raw_reg);
   2248
   2249	bits = en & raw;
   2250
   2251	for (i = 0; i < host_data->max_bits; i++) {
   2252		if ((bits & 1ull << i) == 0)
   2253			continue;
   2254		irq = irq_find_mapping(cib_domain, i);
   2255		if (!irq) {
   2256			unsigned long flags;
   2257
   2258			pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
   2259				i, host_data->raw_reg);
   2260			raw_spin_lock_irqsave(&host_data->lock, flags);
   2261			en = cvmx_read_csr(host_data->en_reg);
   2262			en &= ~(1ull << i);
   2263			cvmx_write_csr(host_data->en_reg, en);
   2264			cvmx_write_csr(host_data->raw_reg, 1ull << i);
   2265			raw_spin_unlock_irqrestore(&host_data->lock, flags);
   2266		} else {
   2267			struct irq_desc *desc = irq_to_desc(irq);
   2268			struct irq_data *irq_data = irq_desc_get_irq_data(desc);
   2269			/* If edge, acknowledge the bit we will be sending. */
   2270			if (irqd_get_trigger_type(irq_data) &
   2271				IRQ_TYPE_EDGE_BOTH)
   2272				cvmx_write_csr(host_data->raw_reg, 1ull << i);
   2273			generic_handle_irq_desc(desc);
   2274		}
   2275	}
   2276
   2277	return IRQ_HANDLED;
   2278}
   2279
   2280static int __init octeon_irq_init_cib(struct device_node *ciu_node,
   2281				      struct device_node *parent)
   2282{
   2283	const __be32 *addr;
   2284	u32 val;
   2285	struct octeon_irq_cib_host_data *host_data;
   2286	int parent_irq;
   2287	int r;
   2288	struct irq_domain *cib_domain;
   2289
   2290	parent_irq = irq_of_parse_and_map(ciu_node, 0);
   2291	if (!parent_irq) {
   2292		pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
   2293			ciu_node);
   2294		return -EINVAL;
   2295	}
   2296
   2297	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
   2298	if (!host_data)
   2299		return -ENOMEM;
   2300	raw_spin_lock_init(&host_data->lock);
   2301
   2302	addr = of_get_address(ciu_node, 0, NULL, NULL);
   2303	if (!addr) {
   2304		pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
   2305		return -EINVAL;
   2306	}
   2307	host_data->raw_reg = (u64)phys_to_virt(
   2308		of_translate_address(ciu_node, addr));
   2309
   2310	addr = of_get_address(ciu_node, 1, NULL, NULL);
   2311	if (!addr) {
   2312		pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
   2313		return -EINVAL;
   2314	}
   2315	host_data->en_reg = (u64)phys_to_virt(
   2316		of_translate_address(ciu_node, addr));
   2317
   2318	r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
   2319	if (r) {
   2320		pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
   2321			ciu_node);
   2322		return r;
   2323	}
   2324	host_data->max_bits = val;
   2325
   2326	cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
   2327					   &octeon_irq_domain_cib_ops,
   2328					   host_data);
   2329	if (!cib_domain) {
   2330		pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
   2331		return -ENOMEM;
   2332	}
   2333
   2334	cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
   2335	cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
   2336
   2337	r = request_irq(parent_irq, octeon_irq_cib_handler,
   2338			IRQF_NO_THREAD, "cib", cib_domain);
   2339	if (r) {
   2340		pr_err("request_irq cib failed %d\n", r);
   2341		return r;
   2342	}
   2343	pr_info("CIB interrupt controller probed: %llx %d\n",
   2344		host_data->raw_reg, host_data->max_bits);
   2345	return 0;
   2346}
   2347
   2348int octeon_irq_ciu3_xlat(struct irq_domain *d,
   2349			 struct device_node *node,
   2350			 const u32 *intspec,
   2351			 unsigned int intsize,
   2352			 unsigned long *out_hwirq,
   2353			 unsigned int *out_type)
   2354{
   2355	struct octeon_ciu3_info *ciu3_info = d->host_data;
   2356	unsigned int hwirq, type, intsn_major;
   2357	union cvmx_ciu3_iscx_ctl isc;
   2358
   2359	if (intsize < 2)
   2360		return -EINVAL;
   2361	hwirq = intspec[0];
   2362	type = intspec[1];
   2363
   2364	if (hwirq >= (1 << 20))
   2365		return -EINVAL;
   2366
   2367	intsn_major = hwirq >> 12;
   2368	switch (intsn_major) {
   2369	case 0x04: /* Software handled separately. */
   2370		return -EINVAL;
   2371	default:
   2372		break;
   2373	}
   2374
   2375	isc.u64 =  cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
   2376	if (!isc.s.imp)
   2377		return -EINVAL;
   2378
   2379	switch (type) {
   2380	case 4: /* official value for level triggering. */
   2381		*out_type = IRQ_TYPE_LEVEL_HIGH;
   2382		break;
   2383	case 0: /* unofficial value, but we might as well let it work. */
   2384	case 1: /* official value for edge triggering. */
   2385		*out_type = IRQ_TYPE_EDGE_RISING;
   2386		break;
   2387	default: /* Nothing else is acceptable. */
   2388		return -EINVAL;
   2389	}
   2390
   2391	*out_hwirq = hwirq;
   2392
   2393	return 0;
   2394}
   2395
   2396void octeon_irq_ciu3_enable(struct irq_data *data)
   2397{
   2398	int cpu;
   2399	union cvmx_ciu3_iscx_ctl isc_ctl;
   2400	union cvmx_ciu3_iscx_w1c isc_w1c;
   2401	u64 isc_ctl_addr;
   2402
   2403	struct octeon_ciu_chip_data *cd;
   2404
   2405	cpu = next_cpu_for_irq(data);
   2406
   2407	cd = irq_data_get_irq_chip_data(data);
   2408
   2409	isc_w1c.u64 = 0;
   2410	isc_w1c.s.en = 1;
   2411	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
   2412
   2413	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
   2414	isc_ctl.u64 = 0;
   2415	isc_ctl.s.en = 1;
   2416	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
   2417	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
   2418	cvmx_read_csr(isc_ctl_addr);
   2419}
   2420
   2421void octeon_irq_ciu3_disable(struct irq_data *data)
   2422{
   2423	u64 isc_ctl_addr;
   2424	union cvmx_ciu3_iscx_w1c isc_w1c;
   2425
   2426	struct octeon_ciu_chip_data *cd;
   2427
   2428	cd = irq_data_get_irq_chip_data(data);
   2429
   2430	isc_w1c.u64 = 0;
   2431	isc_w1c.s.en = 1;
   2432
   2433	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
   2434	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
   2435	cvmx_write_csr(isc_ctl_addr, 0);
   2436	cvmx_read_csr(isc_ctl_addr);
   2437}
   2438
   2439void octeon_irq_ciu3_ack(struct irq_data *data)
   2440{
   2441	u64 isc_w1c_addr;
   2442	union cvmx_ciu3_iscx_w1c isc_w1c;
   2443	struct octeon_ciu_chip_data *cd;
   2444	u32 trigger_type = irqd_get_trigger_type(data);
   2445
   2446	/*
   2447	 * We use a single irq_chip, so we have to do nothing to ack a
   2448	 * level interrupt.
   2449	 */
   2450	if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
   2451		return;
   2452
   2453	cd = irq_data_get_irq_chip_data(data);
   2454
   2455	isc_w1c.u64 = 0;
   2456	isc_w1c.s.raw = 1;
   2457
   2458	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
   2459	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
   2460	cvmx_read_csr(isc_w1c_addr);
   2461}
   2462
   2463void octeon_irq_ciu3_mask(struct irq_data *data)
   2464{
   2465	union cvmx_ciu3_iscx_w1c isc_w1c;
   2466	u64 isc_w1c_addr;
   2467	struct octeon_ciu_chip_data *cd;
   2468
   2469	cd = irq_data_get_irq_chip_data(data);
   2470
   2471	isc_w1c.u64 = 0;
   2472	isc_w1c.s.en = 1;
   2473
   2474	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
   2475	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
   2476	cvmx_read_csr(isc_w1c_addr);
   2477}
   2478
   2479void octeon_irq_ciu3_mask_ack(struct irq_data *data)
   2480{
   2481	union cvmx_ciu3_iscx_w1c isc_w1c;
   2482	u64 isc_w1c_addr;
   2483	struct octeon_ciu_chip_data *cd;
   2484	u32 trigger_type = irqd_get_trigger_type(data);
   2485
   2486	cd = irq_data_get_irq_chip_data(data);
   2487
   2488	isc_w1c.u64 = 0;
   2489	isc_w1c.s.en = 1;
   2490
   2491	/*
   2492	 * We use a single irq_chip, so only ack an edge (!level)
   2493	 * interrupt.
   2494	 */
   2495	if (trigger_type & IRQ_TYPE_EDGE_BOTH)
   2496		isc_w1c.s.raw = 1;
   2497
   2498	isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
   2499	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
   2500	cvmx_read_csr(isc_w1c_addr);
   2501}
   2502
   2503#ifdef CONFIG_SMP
   2504static int octeon_irq_ciu3_set_affinity(struct irq_data *data,
   2505					const struct cpumask *dest, bool force)
   2506{
   2507	union cvmx_ciu3_iscx_ctl isc_ctl;
   2508	union cvmx_ciu3_iscx_w1c isc_w1c;
   2509	u64 isc_ctl_addr;
   2510	int cpu;
   2511	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
   2512	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
   2513
   2514	if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
   2515		return -EINVAL;
   2516
   2517	if (!enable_one)
   2518		return IRQ_SET_MASK_OK;
   2519
   2520	cd = irq_data_get_irq_chip_data(data);
   2521	cpu = cpumask_first(dest);
   2522	if (cpu >= nr_cpu_ids)
   2523		cpu = smp_processor_id();
   2524	cd->current_cpu = cpu;
   2525
   2526	isc_w1c.u64 = 0;
   2527	isc_w1c.s.en = 1;
   2528	cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
   2529
   2530	isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
   2531	isc_ctl.u64 = 0;
   2532	isc_ctl.s.en = 1;
   2533	isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
   2534	cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
   2535	cvmx_read_csr(isc_ctl_addr);
   2536
   2537	return IRQ_SET_MASK_OK;
   2538}
   2539#endif
   2540
   2541static struct irq_chip octeon_irq_chip_ciu3 = {
   2542	.name = "CIU3",
   2543	.irq_startup = edge_startup,
   2544	.irq_enable = octeon_irq_ciu3_enable,
   2545	.irq_disable = octeon_irq_ciu3_disable,
   2546	.irq_ack = octeon_irq_ciu3_ack,
   2547	.irq_mask = octeon_irq_ciu3_mask,
   2548	.irq_mask_ack = octeon_irq_ciu3_mask_ack,
   2549	.irq_unmask = octeon_irq_ciu3_enable,
   2550	.irq_set_type = octeon_irq_ciu_set_type,
   2551#ifdef CONFIG_SMP
   2552	.irq_set_affinity = octeon_irq_ciu3_set_affinity,
   2553	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
   2554#endif
   2555};
   2556
   2557int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
   2558			 irq_hw_number_t hw, struct irq_chip *chip)
   2559{
   2560	struct octeon_ciu3_info *ciu3_info = d->host_data;
   2561	struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
   2562						       ciu3_info->node);
   2563	if (!cd)
   2564		return -ENOMEM;
   2565	cd->intsn = hw;
   2566	cd->current_cpu = -1;
   2567	cd->ciu3_addr = ciu3_info->ciu3_addr;
   2568	cd->ciu_node = ciu3_info->node;
   2569	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
   2570	irq_set_chip_data(virq, cd);
   2571
   2572	return 0;
   2573}
   2574
   2575static int octeon_irq_ciu3_map(struct irq_domain *d,
   2576			       unsigned int virq, irq_hw_number_t hw)
   2577{
   2578	return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
   2579}
   2580
   2581static const struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
   2582	.map = octeon_irq_ciu3_map,
   2583	.unmap = octeon_irq_free_cd,
   2584	.xlate = octeon_irq_ciu3_xlat,
   2585};
   2586
   2587static void octeon_irq_ciu3_ip2(void)
   2588{
   2589	union cvmx_ciu3_destx_pp_int dest_pp_int;
   2590	struct octeon_ciu3_info *ciu3_info;
   2591	u64 ciu3_addr;
   2592
   2593	ciu3_info = __this_cpu_read(octeon_ciu3_info);
   2594	ciu3_addr = ciu3_info->ciu3_addr;
   2595
   2596	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
   2597
   2598	if (likely(dest_pp_int.s.intr)) {
   2599		irq_hw_number_t intsn = dest_pp_int.s.intsn;
   2600		irq_hw_number_t hw;
   2601		struct irq_domain *domain;
   2602		/* Get the domain to use from the major block */
   2603		int block = intsn >> 12;
   2604		int ret;
   2605
   2606		domain = ciu3_info->domain[block];
   2607		if (ciu3_info->intsn2hw[block])
   2608			hw = ciu3_info->intsn2hw[block](domain, intsn);
   2609		else
   2610			hw = intsn;
   2611
   2612		irq_enter();
   2613		ret = generic_handle_domain_irq(domain, hw);
   2614		irq_exit();
   2615
   2616		if (ret < 0) {
   2617			union cvmx_ciu3_iscx_w1c isc_w1c;
   2618			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
   2619
   2620			isc_w1c.u64 = 0;
   2621			isc_w1c.s.en = 1;
   2622			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
   2623			cvmx_read_csr(isc_w1c_addr);
   2624			spurious_interrupt();
   2625		}
   2626	} else {
   2627		spurious_interrupt();
   2628	}
   2629}
   2630
   2631/*
   2632 * 10 mbox per core starting from zero.
   2633 * Base mbox is core * 10
   2634 */
   2635static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
   2636{
   2637	/* SW (mbox) are 0x04 in bits 12..19 */
   2638	return 0x04000 + CIU3_MBOX_PER_CORE * core;
   2639}
   2640
   2641static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
   2642{
   2643	return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
   2644}
   2645
   2646static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
   2647{
   2648	int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
   2649
   2650	return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
   2651}
   2652
   2653static void octeon_irq_ciu3_mbox(void)
   2654{
   2655	union cvmx_ciu3_destx_pp_int dest_pp_int;
   2656	struct octeon_ciu3_info *ciu3_info;
   2657	u64 ciu3_addr;
   2658	int core = cvmx_get_local_core_num();
   2659
   2660	ciu3_info = __this_cpu_read(octeon_ciu3_info);
   2661	ciu3_addr = ciu3_info->ciu3_addr;
   2662
   2663	dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
   2664
   2665	if (likely(dest_pp_int.s.intr)) {
   2666		irq_hw_number_t intsn = dest_pp_int.s.intsn;
   2667		int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
   2668
   2669		if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
   2670			do_IRQ(mbox + OCTEON_IRQ_MBOX0);
   2671		} else {
   2672			union cvmx_ciu3_iscx_w1c isc_w1c;
   2673			u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
   2674
   2675			isc_w1c.u64 = 0;
   2676			isc_w1c.s.en = 1;
   2677			cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
   2678			cvmx_read_csr(isc_w1c_addr);
   2679			spurious_interrupt();
   2680		}
   2681	} else {
   2682		spurious_interrupt();
   2683	}
   2684}
   2685
   2686void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
   2687{
   2688	struct octeon_ciu3_info *ciu3_info;
   2689	unsigned int intsn;
   2690	union cvmx_ciu3_iscx_w1s isc_w1s;
   2691	u64 isc_w1s_addr;
   2692
   2693	if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
   2694		return;
   2695
   2696	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
   2697	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
   2698	isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
   2699
   2700	isc_w1s.u64 = 0;
   2701	isc_w1s.s.raw = 1;
   2702
   2703	cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
   2704	cvmx_read_csr(isc_w1s_addr);
   2705}
   2706
   2707static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
   2708{
   2709	struct octeon_ciu3_info *ciu3_info;
   2710	unsigned int intsn;
   2711	u64 isc_ctl_addr, isc_w1c_addr;
   2712	union cvmx_ciu3_iscx_ctl isc_ctl;
   2713	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
   2714
   2715	intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
   2716	ciu3_info = per_cpu(octeon_ciu3_info, cpu);
   2717	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
   2718	isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
   2719
   2720	isc_ctl.u64 = 0;
   2721	isc_ctl.s.en = 1;
   2722
   2723	cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
   2724	cvmx_write_csr(isc_ctl_addr, 0);
   2725	if (en) {
   2726		unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
   2727
   2728		isc_ctl.u64 = 0;
   2729		isc_ctl.s.en = 1;
   2730		isc_ctl.s.idt = idt;
   2731		cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
   2732	}
   2733	cvmx_read_csr(isc_ctl_addr);
   2734}
   2735
   2736static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
   2737{
   2738	int cpu;
   2739	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
   2740
   2741	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
   2742
   2743	for_each_online_cpu(cpu)
   2744		octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
   2745}
   2746
   2747static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
   2748{
   2749	int cpu;
   2750	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
   2751
   2752	WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
   2753
   2754	for_each_online_cpu(cpu)
   2755		octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
   2756}
   2757
   2758static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
   2759{
   2760	struct octeon_ciu3_info *ciu3_info;
   2761	unsigned int intsn;
   2762	u64 isc_w1c_addr;
   2763	union cvmx_ciu3_iscx_w1c isc_w1c;
   2764	unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
   2765
   2766	intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
   2767
   2768	isc_w1c.u64 = 0;
   2769	isc_w1c.s.raw = 1;
   2770
   2771	ciu3_info = __this_cpu_read(octeon_ciu3_info);
   2772	isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
   2773	cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
   2774	cvmx_read_csr(isc_w1c_addr);
   2775}
   2776
   2777static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
   2778{
   2779	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
   2780}
   2781
   2782static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
   2783{
   2784	octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
   2785}
   2786
   2787static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
   2788{
   2789	u64 b = ciu3_info->ciu3_addr;
   2790	int idt_ip2, idt_ip3, idt_ip4;
   2791	int unused_idt2;
   2792	int core = cvmx_get_local_core_num();
   2793	int i;
   2794
   2795	__this_cpu_write(octeon_ciu3_info, ciu3_info);
   2796
   2797	/*
   2798	 * 4 idt per core starting from 1 because zero is reserved.
   2799	 * Base idt per core is 4 * core + 1
   2800	 */
   2801	idt_ip2 = core * 4 + 1;
   2802	idt_ip3 = core * 4 + 2;
   2803	idt_ip4 = core * 4 + 3;
   2804	unused_idt2 = core * 4 + 4;
   2805	__this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
   2806	__this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
   2807
   2808	/* ip2 interrupts for this CPU */
   2809	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
   2810	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
   2811	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
   2812
   2813	/* ip3 interrupts for this CPU */
   2814	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
   2815	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
   2816	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
   2817
   2818	/* ip4 interrupts for this CPU */
   2819	cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
   2820	cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
   2821	cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
   2822
   2823	cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
   2824	cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
   2825	cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
   2826
   2827	for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
   2828		unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
   2829
   2830		cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
   2831		cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
   2832	}
   2833
   2834	return 0;
   2835}
   2836
   2837static void octeon_irq_setup_secondary_ciu3(void)
   2838{
   2839	struct octeon_ciu3_info *ciu3_info;
   2840
   2841	ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
   2842	octeon_irq_ciu3_alloc_resources(ciu3_info);
   2843	irq_cpu_online();
   2844
   2845	/* Enable the CIU lines */
   2846	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
   2847	if (octeon_irq_use_ip4)
   2848		set_c0_status(STATUSF_IP4);
   2849	else
   2850		clear_c0_status(STATUSF_IP4);
   2851}
   2852
   2853static struct irq_chip octeon_irq_chip_ciu3_mbox = {
   2854	.name = "CIU3-M",
   2855	.irq_enable = octeon_irq_ciu3_mbox_enable,
   2856	.irq_disable = octeon_irq_ciu3_mbox_disable,
   2857	.irq_ack = octeon_irq_ciu3_mbox_ack,
   2858
   2859	.irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
   2860	.irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
   2861	.flags = IRQCHIP_ONOFFLINE_ENABLED,
   2862};
   2863
   2864static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
   2865				       struct device_node *parent)
   2866{
   2867	int i;
   2868	int node;
   2869	struct irq_domain *domain;
   2870	struct octeon_ciu3_info *ciu3_info;
   2871	const __be32 *zero_addr;
   2872	u64 base_addr;
   2873	union cvmx_ciu3_const consts;
   2874
   2875	node = 0; /* of_node_to_nid(ciu_node); */
   2876	ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
   2877
   2878	if (!ciu3_info)
   2879		return -ENOMEM;
   2880
   2881	zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
   2882	if (WARN_ON(!zero_addr))
   2883		return -EINVAL;
   2884
   2885	base_addr = of_translate_address(ciu_node, zero_addr);
   2886	base_addr = (u64)phys_to_virt(base_addr);
   2887
   2888	ciu3_info->ciu3_addr = base_addr;
   2889	ciu3_info->node = node;
   2890
   2891	consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
   2892
   2893	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
   2894
   2895	octeon_irq_ip2 = octeon_irq_ciu3_ip2;
   2896	octeon_irq_ip3 = octeon_irq_ciu3_mbox;
   2897	octeon_irq_ip4 = octeon_irq_ip4_mask;
   2898
   2899	if (node == cvmx_get_node_num()) {
   2900		/* Mips internal */
   2901		octeon_irq_init_core();
   2902
   2903		/* Only do per CPU things if it is the CIU of the boot node. */
   2904		i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
   2905		WARN_ON(i < 0);
   2906
   2907		for (i = 0; i < 8; i++)
   2908			irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
   2909						 &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
   2910	}
   2911
   2912	/*
   2913	 * Initialize all domains to use the default domain. Specific major
   2914	 * blocks will overwrite the default domain as needed.
   2915	 */
   2916	domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
   2917				     ciu3_info);
   2918	for (i = 0; i < MAX_CIU3_DOMAINS; i++)
   2919		ciu3_info->domain[i] = domain;
   2920
   2921	octeon_ciu3_info_per_node[node] = ciu3_info;
   2922
   2923	if (node == cvmx_get_node_num()) {
   2924		/* Only do per CPU things if it is the CIU of the boot node. */
   2925		octeon_irq_ciu3_alloc_resources(ciu3_info);
   2926		if (node == 0)
   2927			irq_set_default_host(domain);
   2928
   2929		octeon_irq_use_ip4 = false;
   2930		/* Enable the CIU lines */
   2931		set_c0_status(STATUSF_IP2 | STATUSF_IP3);
   2932		clear_c0_status(STATUSF_IP4);
   2933	}
   2934
   2935	return 0;
   2936}
   2937
   2938static struct of_device_id ciu_types[] __initdata = {
   2939	{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
   2940	{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
   2941	{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
   2942	{.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
   2943	{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
   2944	{}
   2945};
   2946
   2947void __init arch_init_irq(void)
   2948{
   2949#ifdef CONFIG_SMP
   2950	/* Set the default affinity to the boot cpu. */
   2951	cpumask_clear(irq_default_affinity);
   2952	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
   2953#endif
   2954	of_irq_init(ciu_types);
   2955}
   2956
   2957asmlinkage void plat_irq_dispatch(void)
   2958{
   2959	unsigned long cop0_cause;
   2960	unsigned long cop0_status;
   2961
   2962	while (1) {
   2963		cop0_cause = read_c0_cause();
   2964		cop0_status = read_c0_status();
   2965		cop0_cause &= cop0_status;
   2966		cop0_cause &= ST0_IM;
   2967
   2968		if (cop0_cause & STATUSF_IP2)
   2969			octeon_irq_ip2();
   2970		else if (cop0_cause & STATUSF_IP3)
   2971			octeon_irq_ip3();
   2972		else if (cop0_cause & STATUSF_IP4)
   2973			octeon_irq_ip4();
   2974		else if (cop0_cause)
   2975			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
   2976		else
   2977			break;
   2978	}
   2979}
   2980
   2981#ifdef CONFIG_HOTPLUG_CPU
   2982
   2983void octeon_fixup_irqs(void)
   2984{
   2985	irq_cpu_offline();
   2986}
   2987
   2988#endif /* CONFIG_HOTPLUG_CPU */
   2989
   2990struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
   2991{
   2992	struct octeon_ciu3_info *ciu3_info;
   2993
   2994	ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
   2995	return ciu3_info->domain[block];
   2996}
   2997EXPORT_SYMBOL(octeon_irq_get_block_domain);