cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

chip.c (41211B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
      4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
      5 *
      6 * This file contains the core interrupt handling code, for irq-chip based
      7 * architectures. Detailed information is available in
      8 * Documentation/core-api/genericirq.rst
      9 */
     10
     11#include <linux/irq.h>
     12#include <linux/msi.h>
     13#include <linux/module.h>
     14#include <linux/interrupt.h>
     15#include <linux/kernel_stat.h>
     16#include <linux/irqdomain.h>
     17
     18#include <trace/events/irq.h>
     19
     20#include "internals.h"
     21
     22static irqreturn_t bad_chained_irq(int irq, void *dev_id)
     23{
     24	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
     25	return IRQ_NONE;
     26}
     27
     28/*
     29 * Chained handlers should never call action on their IRQ. This default
     30 * action will emit warning if such thing happens.
     31 */
     32struct irqaction chained_action = {
     33	.handler = bad_chained_irq,
     34};
     35
     36/**
     37 *	irq_set_chip - set the irq chip for an irq
     38 *	@irq:	irq number
     39 *	@chip:	pointer to irq chip description structure
     40 */
     41int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
     42{
     43	unsigned long flags;
     44	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
     45
     46	if (!desc)
     47		return -EINVAL;
     48
     49	desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
     50	irq_put_desc_unlock(desc, flags);
     51	/*
     52	 * For !CONFIG_SPARSE_IRQ make the irq show up in
     53	 * allocated_irqs.
     54	 */
     55	irq_mark_irq(irq);
     56	return 0;
     57}
     58EXPORT_SYMBOL(irq_set_chip);
     59
     60/**
     61 *	irq_set_irq_type - set the irq trigger type for an irq
     62 *	@irq:	irq number
     63 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
     64 */
     65int irq_set_irq_type(unsigned int irq, unsigned int type)
     66{
     67	unsigned long flags;
     68	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
     69	int ret = 0;
     70
     71	if (!desc)
     72		return -EINVAL;
     73
     74	ret = __irq_set_trigger(desc, type);
     75	irq_put_desc_busunlock(desc, flags);
     76	return ret;
     77}
     78EXPORT_SYMBOL(irq_set_irq_type);
     79
     80/**
     81 *	irq_set_handler_data - set irq handler data for an irq
     82 *	@irq:	Interrupt number
     83 *	@data:	Pointer to interrupt specific data
     84 *
     85 *	Set the hardware irq controller data for an irq
     86 */
     87int irq_set_handler_data(unsigned int irq, void *data)
     88{
     89	unsigned long flags;
     90	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
     91
     92	if (!desc)
     93		return -EINVAL;
     94	desc->irq_common_data.handler_data = data;
     95	irq_put_desc_unlock(desc, flags);
     96	return 0;
     97}
     98EXPORT_SYMBOL(irq_set_handler_data);
     99
    100/**
    101 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
    102 *	@irq_base:	Interrupt number base
    103 *	@irq_offset:	Interrupt number offset
    104 *	@entry:		Pointer to MSI descriptor data
    105 *
    106 *	Set the MSI descriptor entry for an irq at offset
    107 */
    108int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
    109			 struct msi_desc *entry)
    110{
    111	unsigned long flags;
    112	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
    113
    114	if (!desc)
    115		return -EINVAL;
    116	desc->irq_common_data.msi_desc = entry;
    117	if (entry && !irq_offset)
    118		entry->irq = irq_base;
    119	irq_put_desc_unlock(desc, flags);
    120	return 0;
    121}
    122
    123/**
    124 *	irq_set_msi_desc - set MSI descriptor data for an irq
    125 *	@irq:	Interrupt number
    126 *	@entry:	Pointer to MSI descriptor data
    127 *
    128 *	Set the MSI descriptor entry for an irq
    129 */
    130int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
    131{
    132	return irq_set_msi_desc_off(irq, 0, entry);
    133}
    134
    135/**
    136 *	irq_set_chip_data - set irq chip data for an irq
    137 *	@irq:	Interrupt number
    138 *	@data:	Pointer to chip specific data
    139 *
    140 *	Set the hardware irq chip data for an irq
    141 */
    142int irq_set_chip_data(unsigned int irq, void *data)
    143{
    144	unsigned long flags;
    145	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
    146
    147	if (!desc)
    148		return -EINVAL;
    149	desc->irq_data.chip_data = data;
    150	irq_put_desc_unlock(desc, flags);
    151	return 0;
    152}
    153EXPORT_SYMBOL(irq_set_chip_data);
    154
    155struct irq_data *irq_get_irq_data(unsigned int irq)
    156{
    157	struct irq_desc *desc = irq_to_desc(irq);
    158
    159	return desc ? &desc->irq_data : NULL;
    160}
    161EXPORT_SYMBOL_GPL(irq_get_irq_data);
    162
    163static void irq_state_clr_disabled(struct irq_desc *desc)
    164{
    165	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
    166}
    167
    168static void irq_state_clr_masked(struct irq_desc *desc)
    169{
    170	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
    171}
    172
    173static void irq_state_clr_started(struct irq_desc *desc)
    174{
    175	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
    176}
    177
    178static void irq_state_set_started(struct irq_desc *desc)
    179{
    180	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
    181}
    182
    183enum {
    184	IRQ_STARTUP_NORMAL,
    185	IRQ_STARTUP_MANAGED,
    186	IRQ_STARTUP_ABORT,
    187};
    188
    189#ifdef CONFIG_SMP
    190static int
    191__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
    192{
    193	struct irq_data *d = irq_desc_get_irq_data(desc);
    194
    195	if (!irqd_affinity_is_managed(d))
    196		return IRQ_STARTUP_NORMAL;
    197
    198	irqd_clr_managed_shutdown(d);
    199
    200	if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
    201		/*
    202		 * Catch code which fiddles with enable_irq() on a managed
    203		 * and potentially shutdown IRQ. Chained interrupt
    204		 * installment or irq auto probing should not happen on
    205		 * managed irqs either.
    206		 */
    207		if (WARN_ON_ONCE(force))
    208			return IRQ_STARTUP_ABORT;
    209		/*
    210		 * The interrupt was requested, but there is no online CPU
    211		 * in it's affinity mask. Put it into managed shutdown
    212		 * state and let the cpu hotplug mechanism start it up once
    213		 * a CPU in the mask becomes available.
    214		 */
    215		return IRQ_STARTUP_ABORT;
    216	}
    217	/*
    218	 * Managed interrupts have reserved resources, so this should not
    219	 * happen.
    220	 */
    221	if (WARN_ON(irq_domain_activate_irq(d, false)))
    222		return IRQ_STARTUP_ABORT;
    223	return IRQ_STARTUP_MANAGED;
    224}
    225#else
    226static __always_inline int
    227__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
    228{
    229	return IRQ_STARTUP_NORMAL;
    230}
    231#endif
    232
    233static int __irq_startup(struct irq_desc *desc)
    234{
    235	struct irq_data *d = irq_desc_get_irq_data(desc);
    236	int ret = 0;
    237
    238	/* Warn if this interrupt is not activated but try nevertheless */
    239	WARN_ON_ONCE(!irqd_is_activated(d));
    240
    241	if (d->chip->irq_startup) {
    242		ret = d->chip->irq_startup(d);
    243		irq_state_clr_disabled(desc);
    244		irq_state_clr_masked(desc);
    245	} else {
    246		irq_enable(desc);
    247	}
    248	irq_state_set_started(desc);
    249	return ret;
    250}
    251
    252int irq_startup(struct irq_desc *desc, bool resend, bool force)
    253{
    254	struct irq_data *d = irq_desc_get_irq_data(desc);
    255	struct cpumask *aff = irq_data_get_affinity_mask(d);
    256	int ret = 0;
    257
    258	desc->depth = 0;
    259
    260	if (irqd_is_started(d)) {
    261		irq_enable(desc);
    262	} else {
    263		switch (__irq_startup_managed(desc, aff, force)) {
    264		case IRQ_STARTUP_NORMAL:
    265			if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
    266				irq_setup_affinity(desc);
    267			ret = __irq_startup(desc);
    268			if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
    269				irq_setup_affinity(desc);
    270			break;
    271		case IRQ_STARTUP_MANAGED:
    272			irq_do_set_affinity(d, aff, false);
    273			ret = __irq_startup(desc);
    274			break;
    275		case IRQ_STARTUP_ABORT:
    276			irqd_set_managed_shutdown(d);
    277			return 0;
    278		}
    279	}
    280	if (resend)
    281		check_irq_resend(desc, false);
    282
    283	return ret;
    284}
    285
    286int irq_activate(struct irq_desc *desc)
    287{
    288	struct irq_data *d = irq_desc_get_irq_data(desc);
    289
    290	if (!irqd_affinity_is_managed(d))
    291		return irq_domain_activate_irq(d, false);
    292	return 0;
    293}
    294
    295int irq_activate_and_startup(struct irq_desc *desc, bool resend)
    296{
    297	if (WARN_ON(irq_activate(desc)))
    298		return 0;
    299	return irq_startup(desc, resend, IRQ_START_FORCE);
    300}
    301
    302static void __irq_disable(struct irq_desc *desc, bool mask);
    303
    304void irq_shutdown(struct irq_desc *desc)
    305{
    306	if (irqd_is_started(&desc->irq_data)) {
    307		desc->depth = 1;
    308		if (desc->irq_data.chip->irq_shutdown) {
    309			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
    310			irq_state_set_disabled(desc);
    311			irq_state_set_masked(desc);
    312		} else {
    313			__irq_disable(desc, true);
    314		}
    315		irq_state_clr_started(desc);
    316	}
    317}
    318
    319
    320void irq_shutdown_and_deactivate(struct irq_desc *desc)
    321{
    322	irq_shutdown(desc);
    323	/*
    324	 * This must be called even if the interrupt was never started up,
    325	 * because the activation can happen before the interrupt is
    326	 * available for request/startup. It has it's own state tracking so
    327	 * it's safe to call it unconditionally.
    328	 */
    329	irq_domain_deactivate_irq(&desc->irq_data);
    330}
    331
    332void irq_enable(struct irq_desc *desc)
    333{
    334	if (!irqd_irq_disabled(&desc->irq_data)) {
    335		unmask_irq(desc);
    336	} else {
    337		irq_state_clr_disabled(desc);
    338		if (desc->irq_data.chip->irq_enable) {
    339			desc->irq_data.chip->irq_enable(&desc->irq_data);
    340			irq_state_clr_masked(desc);
    341		} else {
    342			unmask_irq(desc);
    343		}
    344	}
    345}
    346
    347static void __irq_disable(struct irq_desc *desc, bool mask)
    348{
    349	if (irqd_irq_disabled(&desc->irq_data)) {
    350		if (mask)
    351			mask_irq(desc);
    352	} else {
    353		irq_state_set_disabled(desc);
    354		if (desc->irq_data.chip->irq_disable) {
    355			desc->irq_data.chip->irq_disable(&desc->irq_data);
    356			irq_state_set_masked(desc);
    357		} else if (mask) {
    358			mask_irq(desc);
    359		}
    360	}
    361}
    362
    363/**
    364 * irq_disable - Mark interrupt disabled
    365 * @desc:	irq descriptor which should be disabled
    366 *
    367 * If the chip does not implement the irq_disable callback, we
    368 * use a lazy disable approach. That means we mark the interrupt
    369 * disabled, but leave the hardware unmasked. That's an
    370 * optimization because we avoid the hardware access for the
    371 * common case where no interrupt happens after we marked it
    372 * disabled. If an interrupt happens, then the interrupt flow
    373 * handler masks the line at the hardware level and marks it
    374 * pending.
    375 *
    376 * If the interrupt chip does not implement the irq_disable callback,
    377 * a driver can disable the lazy approach for a particular irq line by
    378 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
    379 * be used for devices which cannot disable the interrupt at the
    380 * device level under certain circumstances and have to use
    381 * disable_irq[_nosync] instead.
    382 */
    383void irq_disable(struct irq_desc *desc)
    384{
    385	__irq_disable(desc, irq_settings_disable_unlazy(desc));
    386}
    387
    388void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
    389{
    390	if (desc->irq_data.chip->irq_enable)
    391		desc->irq_data.chip->irq_enable(&desc->irq_data);
    392	else
    393		desc->irq_data.chip->irq_unmask(&desc->irq_data);
    394	cpumask_set_cpu(cpu, desc->percpu_enabled);
    395}
    396
    397void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
    398{
    399	if (desc->irq_data.chip->irq_disable)
    400		desc->irq_data.chip->irq_disable(&desc->irq_data);
    401	else
    402		desc->irq_data.chip->irq_mask(&desc->irq_data);
    403	cpumask_clear_cpu(cpu, desc->percpu_enabled);
    404}
    405
    406static inline void mask_ack_irq(struct irq_desc *desc)
    407{
    408	if (desc->irq_data.chip->irq_mask_ack) {
    409		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
    410		irq_state_set_masked(desc);
    411	} else {
    412		mask_irq(desc);
    413		if (desc->irq_data.chip->irq_ack)
    414			desc->irq_data.chip->irq_ack(&desc->irq_data);
    415	}
    416}
    417
    418void mask_irq(struct irq_desc *desc)
    419{
    420	if (irqd_irq_masked(&desc->irq_data))
    421		return;
    422
    423	if (desc->irq_data.chip->irq_mask) {
    424		desc->irq_data.chip->irq_mask(&desc->irq_data);
    425		irq_state_set_masked(desc);
    426	}
    427}
    428
    429void unmask_irq(struct irq_desc *desc)
    430{
    431	if (!irqd_irq_masked(&desc->irq_data))
    432		return;
    433
    434	if (desc->irq_data.chip->irq_unmask) {
    435		desc->irq_data.chip->irq_unmask(&desc->irq_data);
    436		irq_state_clr_masked(desc);
    437	}
    438}
    439
    440void unmask_threaded_irq(struct irq_desc *desc)
    441{
    442	struct irq_chip *chip = desc->irq_data.chip;
    443
    444	if (chip->flags & IRQCHIP_EOI_THREADED)
    445		chip->irq_eoi(&desc->irq_data);
    446
    447	unmask_irq(desc);
    448}
    449
    450/*
    451 *	handle_nested_irq - Handle a nested irq from a irq thread
    452 *	@irq:	the interrupt number
    453 *
    454 *	Handle interrupts which are nested into a threaded interrupt
    455 *	handler. The handler function is called inside the calling
    456 *	threads context.
    457 */
    458void handle_nested_irq(unsigned int irq)
    459{
    460	struct irq_desc *desc = irq_to_desc(irq);
    461	struct irqaction *action;
    462	irqreturn_t action_ret;
    463
    464	might_sleep();
    465
    466	raw_spin_lock_irq(&desc->lock);
    467
    468	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    469
    470	action = desc->action;
    471	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
    472		desc->istate |= IRQS_PENDING;
    473		goto out_unlock;
    474	}
    475
    476	kstat_incr_irqs_this_cpu(desc);
    477	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    478	raw_spin_unlock_irq(&desc->lock);
    479
    480	action_ret = IRQ_NONE;
    481	for_each_action_of_desc(desc, action)
    482		action_ret |= action->thread_fn(action->irq, action->dev_id);
    483
    484	if (!irq_settings_no_debug(desc))
    485		note_interrupt(desc, action_ret);
    486
    487	raw_spin_lock_irq(&desc->lock);
    488	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    489
    490out_unlock:
    491	raw_spin_unlock_irq(&desc->lock);
    492}
    493EXPORT_SYMBOL_GPL(handle_nested_irq);
    494
    495static bool irq_check_poll(struct irq_desc *desc)
    496{
    497	if (!(desc->istate & IRQS_POLL_INPROGRESS))
    498		return false;
    499	return irq_wait_for_poll(desc);
    500}
    501
    502static bool irq_may_run(struct irq_desc *desc)
    503{
    504	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
    505
    506	/*
    507	 * If the interrupt is not in progress and is not an armed
    508	 * wakeup interrupt, proceed.
    509	 */
    510	if (!irqd_has_set(&desc->irq_data, mask))
    511		return true;
    512
    513	/*
    514	 * If the interrupt is an armed wakeup source, mark it pending
    515	 * and suspended, disable it and notify the pm core about the
    516	 * event.
    517	 */
    518	if (irq_pm_check_wakeup(desc))
    519		return false;
    520
    521	/*
    522	 * Handle a potential concurrent poll on a different core.
    523	 */
    524	return irq_check_poll(desc);
    525}
    526
    527/**
    528 *	handle_simple_irq - Simple and software-decoded IRQs.
    529 *	@desc:	the interrupt description structure for this irq
    530 *
    531 *	Simple interrupts are either sent from a demultiplexing interrupt
    532 *	handler or come from hardware, where no interrupt hardware control
    533 *	is necessary.
    534 *
    535 *	Note: The caller is expected to handle the ack, clear, mask and
    536 *	unmask issues if necessary.
    537 */
    538void handle_simple_irq(struct irq_desc *desc)
    539{
    540	raw_spin_lock(&desc->lock);
    541
    542	if (!irq_may_run(desc))
    543		goto out_unlock;
    544
    545	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    546
    547	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
    548		desc->istate |= IRQS_PENDING;
    549		goto out_unlock;
    550	}
    551
    552	kstat_incr_irqs_this_cpu(desc);
    553	handle_irq_event(desc);
    554
    555out_unlock:
    556	raw_spin_unlock(&desc->lock);
    557}
    558EXPORT_SYMBOL_GPL(handle_simple_irq);
    559
    560/**
    561 *	handle_untracked_irq - Simple and software-decoded IRQs.
    562 *	@desc:	the interrupt description structure for this irq
    563 *
    564 *	Untracked interrupts are sent from a demultiplexing interrupt
    565 *	handler when the demultiplexer does not know which device it its
    566 *	multiplexed irq domain generated the interrupt. IRQ's handled
    567 *	through here are not subjected to stats tracking, randomness, or
    568 *	spurious interrupt detection.
    569 *
    570 *	Note: Like handle_simple_irq, the caller is expected to handle
    571 *	the ack, clear, mask and unmask issues if necessary.
    572 */
    573void handle_untracked_irq(struct irq_desc *desc)
    574{
    575	raw_spin_lock(&desc->lock);
    576
    577	if (!irq_may_run(desc))
    578		goto out_unlock;
    579
    580	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    581
    582	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
    583		desc->istate |= IRQS_PENDING;
    584		goto out_unlock;
    585	}
    586
    587	desc->istate &= ~IRQS_PENDING;
    588	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    589	raw_spin_unlock(&desc->lock);
    590
    591	__handle_irq_event_percpu(desc);
    592
    593	raw_spin_lock(&desc->lock);
    594	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
    595
    596out_unlock:
    597	raw_spin_unlock(&desc->lock);
    598}
    599EXPORT_SYMBOL_GPL(handle_untracked_irq);
    600
    601/*
    602 * Called unconditionally from handle_level_irq() and only for oneshot
    603 * interrupts from handle_fasteoi_irq()
    604 */
    605static void cond_unmask_irq(struct irq_desc *desc)
    606{
    607	/*
    608	 * We need to unmask in the following cases:
    609	 * - Standard level irq (IRQF_ONESHOT is not set)
    610	 * - Oneshot irq which did not wake the thread (caused by a
    611	 *   spurious interrupt or a primary handler handling it
    612	 *   completely).
    613	 */
    614	if (!irqd_irq_disabled(&desc->irq_data) &&
    615	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
    616		unmask_irq(desc);
    617}
    618
    619/**
    620 *	handle_level_irq - Level type irq handler
    621 *	@desc:	the interrupt description structure for this irq
    622 *
    623 *	Level type interrupts are active as long as the hardware line has
    624 *	the active level. This may require to mask the interrupt and unmask
    625 *	it after the associated handler has acknowledged the device, so the
    626 *	interrupt line is back to inactive.
    627 */
    628void handle_level_irq(struct irq_desc *desc)
    629{
    630	raw_spin_lock(&desc->lock);
    631	mask_ack_irq(desc);
    632
    633	if (!irq_may_run(desc))
    634		goto out_unlock;
    635
    636	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    637
    638	/*
    639	 * If its disabled or no action available
    640	 * keep it masked and get out of here
    641	 */
    642	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
    643		desc->istate |= IRQS_PENDING;
    644		goto out_unlock;
    645	}
    646
    647	kstat_incr_irqs_this_cpu(desc);
    648	handle_irq_event(desc);
    649
    650	cond_unmask_irq(desc);
    651
    652out_unlock:
    653	raw_spin_unlock(&desc->lock);
    654}
    655EXPORT_SYMBOL_GPL(handle_level_irq);
    656
    657static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
    658{
    659	if (!(desc->istate & IRQS_ONESHOT)) {
    660		chip->irq_eoi(&desc->irq_data);
    661		return;
    662	}
    663	/*
    664	 * We need to unmask in the following cases:
    665	 * - Oneshot irq which did not wake the thread (caused by a
    666	 *   spurious interrupt or a primary handler handling it
    667	 *   completely).
    668	 */
    669	if (!irqd_irq_disabled(&desc->irq_data) &&
    670	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
    671		chip->irq_eoi(&desc->irq_data);
    672		unmask_irq(desc);
    673	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
    674		chip->irq_eoi(&desc->irq_data);
    675	}
    676}
    677
    678/**
    679 *	handle_fasteoi_irq - irq handler for transparent controllers
    680 *	@desc:	the interrupt description structure for this irq
    681 *
    682 *	Only a single callback will be issued to the chip: an ->eoi()
    683 *	call when the interrupt has been serviced. This enables support
    684 *	for modern forms of interrupt handlers, which handle the flow
    685 *	details in hardware, transparently.
    686 */
    687void handle_fasteoi_irq(struct irq_desc *desc)
    688{
    689	struct irq_chip *chip = desc->irq_data.chip;
    690
    691	raw_spin_lock(&desc->lock);
    692
    693	if (!irq_may_run(desc))
    694		goto out;
    695
    696	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    697
    698	/*
    699	 * If its disabled or no action available
    700	 * then mask it and get out of here:
    701	 */
    702	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
    703		desc->istate |= IRQS_PENDING;
    704		mask_irq(desc);
    705		goto out;
    706	}
    707
    708	kstat_incr_irqs_this_cpu(desc);
    709	if (desc->istate & IRQS_ONESHOT)
    710		mask_irq(desc);
    711
    712	handle_irq_event(desc);
    713
    714	cond_unmask_eoi_irq(desc, chip);
    715
    716	raw_spin_unlock(&desc->lock);
    717	return;
    718out:
    719	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
    720		chip->irq_eoi(&desc->irq_data);
    721	raw_spin_unlock(&desc->lock);
    722}
    723EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
    724
    725/**
    726 *	handle_fasteoi_nmi - irq handler for NMI interrupt lines
    727 *	@desc:	the interrupt description structure for this irq
    728 *
    729 *	A simple NMI-safe handler, considering the restrictions
    730 *	from request_nmi.
    731 *
    732 *	Only a single callback will be issued to the chip: an ->eoi()
    733 *	call when the interrupt has been serviced. This enables support
    734 *	for modern forms of interrupt handlers, which handle the flow
    735 *	details in hardware, transparently.
    736 */
    737void handle_fasteoi_nmi(struct irq_desc *desc)
    738{
    739	struct irq_chip *chip = irq_desc_get_chip(desc);
    740	struct irqaction *action = desc->action;
    741	unsigned int irq = irq_desc_get_irq(desc);
    742	irqreturn_t res;
    743
    744	__kstat_incr_irqs_this_cpu(desc);
    745
    746	trace_irq_handler_entry(irq, action);
    747	/*
    748	 * NMIs cannot be shared, there is only one action.
    749	 */
    750	res = action->handler(irq, action->dev_id);
    751	trace_irq_handler_exit(irq, action, res);
    752
    753	if (chip->irq_eoi)
    754		chip->irq_eoi(&desc->irq_data);
    755}
    756EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
    757
    758/**
    759 *	handle_edge_irq - edge type IRQ handler
    760 *	@desc:	the interrupt description structure for this irq
    761 *
    762 *	Interrupt occurs on the falling and/or rising edge of a hardware
    763 *	signal. The occurrence is latched into the irq controller hardware
    764 *	and must be acked in order to be reenabled. After the ack another
    765 *	interrupt can happen on the same source even before the first one
    766 *	is handled by the associated event handler. If this happens it
    767 *	might be necessary to disable (mask) the interrupt depending on the
    768 *	controller hardware. This requires to reenable the interrupt inside
    769 *	of the loop which handles the interrupts which have arrived while
    770 *	the handler was running. If all pending interrupts are handled, the
    771 *	loop is left.
    772 */
    773void handle_edge_irq(struct irq_desc *desc)
    774{
    775	raw_spin_lock(&desc->lock);
    776
    777	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    778
    779	if (!irq_may_run(desc)) {
    780		desc->istate |= IRQS_PENDING;
    781		mask_ack_irq(desc);
    782		goto out_unlock;
    783	}
    784
    785	/*
    786	 * If its disabled or no action available then mask it and get
    787	 * out of here.
    788	 */
    789	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
    790		desc->istate |= IRQS_PENDING;
    791		mask_ack_irq(desc);
    792		goto out_unlock;
    793	}
    794
    795	kstat_incr_irqs_this_cpu(desc);
    796
    797	/* Start handling the irq */
    798	desc->irq_data.chip->irq_ack(&desc->irq_data);
    799
    800	do {
    801		if (unlikely(!desc->action)) {
    802			mask_irq(desc);
    803			goto out_unlock;
    804		}
    805
    806		/*
    807		 * When another irq arrived while we were handling
    808		 * one, we could have masked the irq.
    809		 * Reenable it, if it was not disabled in meantime.
    810		 */
    811		if (unlikely(desc->istate & IRQS_PENDING)) {
    812			if (!irqd_irq_disabled(&desc->irq_data) &&
    813			    irqd_irq_masked(&desc->irq_data))
    814				unmask_irq(desc);
    815		}
    816
    817		handle_irq_event(desc);
    818
    819	} while ((desc->istate & IRQS_PENDING) &&
    820		 !irqd_irq_disabled(&desc->irq_data));
    821
    822out_unlock:
    823	raw_spin_unlock(&desc->lock);
    824}
    825EXPORT_SYMBOL(handle_edge_irq);
    826
    827#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
    828/**
    829 *	handle_edge_eoi_irq - edge eoi type IRQ handler
    830 *	@desc:	the interrupt description structure for this irq
    831 *
    832 * Similar as the above handle_edge_irq, but using eoi and w/o the
    833 * mask/unmask logic.
    834 */
    835void handle_edge_eoi_irq(struct irq_desc *desc)
    836{
    837	struct irq_chip *chip = irq_desc_get_chip(desc);
    838
    839	raw_spin_lock(&desc->lock);
    840
    841	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
    842
    843	if (!irq_may_run(desc)) {
    844		desc->istate |= IRQS_PENDING;
    845		goto out_eoi;
    846	}
    847
    848	/*
    849	 * If its disabled or no action available then mask it and get
    850	 * out of here.
    851	 */
    852	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
    853		desc->istate |= IRQS_PENDING;
    854		goto out_eoi;
    855	}
    856
    857	kstat_incr_irqs_this_cpu(desc);
    858
    859	do {
    860		if (unlikely(!desc->action))
    861			goto out_eoi;
    862
    863		handle_irq_event(desc);
    864
    865	} while ((desc->istate & IRQS_PENDING) &&
    866		 !irqd_irq_disabled(&desc->irq_data));
    867
    868out_eoi:
    869	chip->irq_eoi(&desc->irq_data);
    870	raw_spin_unlock(&desc->lock);
    871}
    872#endif
    873
    874/**
    875 *	handle_percpu_irq - Per CPU local irq handler
    876 *	@desc:	the interrupt description structure for this irq
    877 *
    878 *	Per CPU interrupts on SMP machines without locking requirements
    879 */
    880void handle_percpu_irq(struct irq_desc *desc)
    881{
    882	struct irq_chip *chip = irq_desc_get_chip(desc);
    883
    884	/*
    885	 * PER CPU interrupts are not serialized. Do not touch
    886	 * desc->tot_count.
    887	 */
    888	__kstat_incr_irqs_this_cpu(desc);
    889
    890	if (chip->irq_ack)
    891		chip->irq_ack(&desc->irq_data);
    892
    893	handle_irq_event_percpu(desc);
    894
    895	if (chip->irq_eoi)
    896		chip->irq_eoi(&desc->irq_data);
    897}
    898
    899/**
    900 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
    901 * @desc:	the interrupt description structure for this irq
    902 *
    903 * Per CPU interrupts on SMP machines without locking requirements. Same as
    904 * handle_percpu_irq() above but with the following extras:
    905 *
    906 * action->percpu_dev_id is a pointer to percpu variables which
    907 * contain the real device id for the cpu on which this handler is
    908 * called
    909 */
    910void handle_percpu_devid_irq(struct irq_desc *desc)
    911{
    912	struct irq_chip *chip = irq_desc_get_chip(desc);
    913	struct irqaction *action = desc->action;
    914	unsigned int irq = irq_desc_get_irq(desc);
    915	irqreturn_t res;
    916
    917	/*
    918	 * PER CPU interrupts are not serialized. Do not touch
    919	 * desc->tot_count.
    920	 */
    921	__kstat_incr_irqs_this_cpu(desc);
    922
    923	if (chip->irq_ack)
    924		chip->irq_ack(&desc->irq_data);
    925
    926	if (likely(action)) {
    927		trace_irq_handler_entry(irq, action);
    928		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
    929		trace_irq_handler_exit(irq, action, res);
    930	} else {
    931		unsigned int cpu = smp_processor_id();
    932		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
    933
    934		if (enabled)
    935			irq_percpu_disable(desc, cpu);
    936
    937		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
    938			    enabled ? " and unmasked" : "", irq, cpu);
    939	}
    940
    941	if (chip->irq_eoi)
    942		chip->irq_eoi(&desc->irq_data);
    943}
    944
    945/**
    946 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
    947 *				     dev ids
    948 * @desc:	the interrupt description structure for this irq
    949 *
    950 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
    951 * as a percpu pointer.
    952 */
    953void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
    954{
    955	struct irq_chip *chip = irq_desc_get_chip(desc);
    956	struct irqaction *action = desc->action;
    957	unsigned int irq = irq_desc_get_irq(desc);
    958	irqreturn_t res;
    959
    960	__kstat_incr_irqs_this_cpu(desc);
    961
    962	trace_irq_handler_entry(irq, action);
    963	res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
    964	trace_irq_handler_exit(irq, action, res);
    965
    966	if (chip->irq_eoi)
    967		chip->irq_eoi(&desc->irq_data);
    968}
    969
    970static void
    971__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
    972		     int is_chained, const char *name)
    973{
    974	if (!handle) {
    975		handle = handle_bad_irq;
    976	} else {
    977		struct irq_data *irq_data = &desc->irq_data;
    978#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
    979		/*
    980		 * With hierarchical domains we might run into a
    981		 * situation where the outermost chip is not yet set
    982		 * up, but the inner chips are there.  Instead of
    983		 * bailing we install the handler, but obviously we
    984		 * cannot enable/startup the interrupt at this point.
    985		 */
    986		while (irq_data) {
    987			if (irq_data->chip != &no_irq_chip)
    988				break;
    989			/*
    990			 * Bail out if the outer chip is not set up
    991			 * and the interrupt supposed to be started
    992			 * right away.
    993			 */
    994			if (WARN_ON(is_chained))
    995				return;
    996			/* Try the parent */
    997			irq_data = irq_data->parent_data;
    998		}
    999#endif
   1000		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
   1001			return;
   1002	}
   1003
   1004	/* Uninstall? */
   1005	if (handle == handle_bad_irq) {
   1006		if (desc->irq_data.chip != &no_irq_chip)
   1007			mask_ack_irq(desc);
   1008		irq_state_set_disabled(desc);
   1009		if (is_chained) {
   1010			desc->action = NULL;
   1011			WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
   1012		}
   1013		desc->depth = 1;
   1014	}
   1015	desc->handle_irq = handle;
   1016	desc->name = name;
   1017
   1018	if (handle != handle_bad_irq && is_chained) {
   1019		unsigned int type = irqd_get_trigger_type(&desc->irq_data);
   1020
   1021		/*
   1022		 * We're about to start this interrupt immediately,
   1023		 * hence the need to set the trigger configuration.
   1024		 * But the .set_type callback may have overridden the
   1025		 * flow handler, ignoring that we're dealing with a
   1026		 * chained interrupt. Reset it immediately because we
   1027		 * do know better.
   1028		 */
   1029		if (type != IRQ_TYPE_NONE) {
   1030			__irq_set_trigger(desc, type);
   1031			desc->handle_irq = handle;
   1032		}
   1033
   1034		irq_settings_set_noprobe(desc);
   1035		irq_settings_set_norequest(desc);
   1036		irq_settings_set_nothread(desc);
   1037		desc->action = &chained_action;
   1038		WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
   1039		irq_activate_and_startup(desc, IRQ_RESEND);
   1040	}
   1041}
   1042
   1043void
   1044__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
   1045		  const char *name)
   1046{
   1047	unsigned long flags;
   1048	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
   1049
   1050	if (!desc)
   1051		return;
   1052
   1053	__irq_do_set_handler(desc, handle, is_chained, name);
   1054	irq_put_desc_busunlock(desc, flags);
   1055}
   1056EXPORT_SYMBOL_GPL(__irq_set_handler);
   1057
   1058void
   1059irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
   1060				 void *data)
   1061{
   1062	unsigned long flags;
   1063	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
   1064
   1065	if (!desc)
   1066		return;
   1067
   1068	desc->irq_common_data.handler_data = data;
   1069	__irq_do_set_handler(desc, handle, 1, NULL);
   1070
   1071	irq_put_desc_busunlock(desc, flags);
   1072}
   1073EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
   1074
   1075void
   1076irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
   1077			      irq_flow_handler_t handle, const char *name)
   1078{
   1079	irq_set_chip(irq, chip);
   1080	__irq_set_handler(irq, handle, 0, name);
   1081}
   1082EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
   1083
   1084void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
   1085{
   1086	unsigned long flags, trigger, tmp;
   1087	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
   1088
   1089	if (!desc)
   1090		return;
   1091
   1092	/*
   1093	 * Warn when a driver sets the no autoenable flag on an already
   1094	 * active interrupt.
   1095	 */
   1096	WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
   1097
   1098	irq_settings_clr_and_set(desc, clr, set);
   1099
   1100	trigger = irqd_get_trigger_type(&desc->irq_data);
   1101
   1102	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
   1103		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
   1104	if (irq_settings_has_no_balance_set(desc))
   1105		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
   1106	if (irq_settings_is_per_cpu(desc))
   1107		irqd_set(&desc->irq_data, IRQD_PER_CPU);
   1108	if (irq_settings_can_move_pcntxt(desc))
   1109		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
   1110	if (irq_settings_is_level(desc))
   1111		irqd_set(&desc->irq_data, IRQD_LEVEL);
   1112
   1113	tmp = irq_settings_get_trigger_mask(desc);
   1114	if (tmp != IRQ_TYPE_NONE)
   1115		trigger = tmp;
   1116
   1117	irqd_set(&desc->irq_data, trigger);
   1118
   1119	irq_put_desc_unlock(desc, flags);
   1120}
   1121EXPORT_SYMBOL_GPL(irq_modify_status);
   1122
   1123#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
   1124/**
   1125 *	irq_cpu_online - Invoke all irq_cpu_online functions.
   1126 *
   1127 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
   1128 *	for each.
   1129 */
   1130void irq_cpu_online(void)
   1131{
   1132	struct irq_desc *desc;
   1133	struct irq_chip *chip;
   1134	unsigned long flags;
   1135	unsigned int irq;
   1136
   1137	for_each_active_irq(irq) {
   1138		desc = irq_to_desc(irq);
   1139		if (!desc)
   1140			continue;
   1141
   1142		raw_spin_lock_irqsave(&desc->lock, flags);
   1143
   1144		chip = irq_data_get_irq_chip(&desc->irq_data);
   1145		if (chip && chip->irq_cpu_online &&
   1146		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
   1147		     !irqd_irq_disabled(&desc->irq_data)))
   1148			chip->irq_cpu_online(&desc->irq_data);
   1149
   1150		raw_spin_unlock_irqrestore(&desc->lock, flags);
   1151	}
   1152}
   1153
   1154/**
   1155 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
   1156 *
   1157 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
   1158 *	for each.
   1159 */
   1160void irq_cpu_offline(void)
   1161{
   1162	struct irq_desc *desc;
   1163	struct irq_chip *chip;
   1164	unsigned long flags;
   1165	unsigned int irq;
   1166
   1167	for_each_active_irq(irq) {
   1168		desc = irq_to_desc(irq);
   1169		if (!desc)
   1170			continue;
   1171
   1172		raw_spin_lock_irqsave(&desc->lock, flags);
   1173
   1174		chip = irq_data_get_irq_chip(&desc->irq_data);
   1175		if (chip && chip->irq_cpu_offline &&
   1176		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
   1177		     !irqd_irq_disabled(&desc->irq_data)))
   1178			chip->irq_cpu_offline(&desc->irq_data);
   1179
   1180		raw_spin_unlock_irqrestore(&desc->lock, flags);
   1181	}
   1182}
   1183#endif
   1184
   1185#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
   1186
   1187#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
   1188/**
   1189 *	handle_fasteoi_ack_irq - irq handler for edge hierarchy
   1190 *	stacked on transparent controllers
   1191 *
   1192 *	@desc:	the interrupt description structure for this irq
   1193 *
   1194 *	Like handle_fasteoi_irq(), but for use with hierarchy where
   1195 *	the irq_chip also needs to have its ->irq_ack() function
   1196 *	called.
   1197 */
   1198void handle_fasteoi_ack_irq(struct irq_desc *desc)
   1199{
   1200	struct irq_chip *chip = desc->irq_data.chip;
   1201
   1202	raw_spin_lock(&desc->lock);
   1203
   1204	if (!irq_may_run(desc))
   1205		goto out;
   1206
   1207	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
   1208
   1209	/*
   1210	 * If its disabled or no action available
   1211	 * then mask it and get out of here:
   1212	 */
   1213	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
   1214		desc->istate |= IRQS_PENDING;
   1215		mask_irq(desc);
   1216		goto out;
   1217	}
   1218
   1219	kstat_incr_irqs_this_cpu(desc);
   1220	if (desc->istate & IRQS_ONESHOT)
   1221		mask_irq(desc);
   1222
   1223	/* Start handling the irq */
   1224	desc->irq_data.chip->irq_ack(&desc->irq_data);
   1225
   1226	handle_irq_event(desc);
   1227
   1228	cond_unmask_eoi_irq(desc, chip);
   1229
   1230	raw_spin_unlock(&desc->lock);
   1231	return;
   1232out:
   1233	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
   1234		chip->irq_eoi(&desc->irq_data);
   1235	raw_spin_unlock(&desc->lock);
   1236}
   1237EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
   1238
   1239/**
   1240 *	handle_fasteoi_mask_irq - irq handler for level hierarchy
   1241 *	stacked on transparent controllers
   1242 *
   1243 *	@desc:	the interrupt description structure for this irq
   1244 *
   1245 *	Like handle_fasteoi_irq(), but for use with hierarchy where
   1246 *	the irq_chip also needs to have its ->irq_mask_ack() function
   1247 *	called.
   1248 */
   1249void handle_fasteoi_mask_irq(struct irq_desc *desc)
   1250{
   1251	struct irq_chip *chip = desc->irq_data.chip;
   1252
   1253	raw_spin_lock(&desc->lock);
   1254	mask_ack_irq(desc);
   1255
   1256	if (!irq_may_run(desc))
   1257		goto out;
   1258
   1259	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
   1260
   1261	/*
   1262	 * If its disabled or no action available
   1263	 * then mask it and get out of here:
   1264	 */
   1265	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
   1266		desc->istate |= IRQS_PENDING;
   1267		mask_irq(desc);
   1268		goto out;
   1269	}
   1270
   1271	kstat_incr_irqs_this_cpu(desc);
   1272	if (desc->istate & IRQS_ONESHOT)
   1273		mask_irq(desc);
   1274
   1275	handle_irq_event(desc);
   1276
   1277	cond_unmask_eoi_irq(desc, chip);
   1278
   1279	raw_spin_unlock(&desc->lock);
   1280	return;
   1281out:
   1282	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
   1283		chip->irq_eoi(&desc->irq_data);
   1284	raw_spin_unlock(&desc->lock);
   1285}
   1286EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
   1287
   1288#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
   1289
   1290/**
   1291 * irq_chip_set_parent_state - set the state of a parent interrupt.
   1292 *
   1293 * @data: Pointer to interrupt specific data
   1294 * @which: State to be restored (one of IRQCHIP_STATE_*)
   1295 * @val: Value corresponding to @which
   1296 *
   1297 * Conditional success, if the underlying irqchip does not implement it.
   1298 */
   1299int irq_chip_set_parent_state(struct irq_data *data,
   1300			      enum irqchip_irq_state which,
   1301			      bool val)
   1302{
   1303	data = data->parent_data;
   1304
   1305	if (!data || !data->chip->irq_set_irqchip_state)
   1306		return 0;
   1307
   1308	return data->chip->irq_set_irqchip_state(data, which, val);
   1309}
   1310EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
   1311
   1312/**
   1313 * irq_chip_get_parent_state - get the state of a parent interrupt.
   1314 *
   1315 * @data: Pointer to interrupt specific data
   1316 * @which: one of IRQCHIP_STATE_* the caller wants to know
   1317 * @state: a pointer to a boolean where the state is to be stored
   1318 *
   1319 * Conditional success, if the underlying irqchip does not implement it.
   1320 */
   1321int irq_chip_get_parent_state(struct irq_data *data,
   1322			      enum irqchip_irq_state which,
   1323			      bool *state)
   1324{
   1325	data = data->parent_data;
   1326
   1327	if (!data || !data->chip->irq_get_irqchip_state)
   1328		return 0;
   1329
   1330	return data->chip->irq_get_irqchip_state(data, which, state);
   1331}
   1332EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
   1333
   1334/**
   1335 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
   1336 * NULL)
   1337 * @data:	Pointer to interrupt specific data
   1338 */
   1339void irq_chip_enable_parent(struct irq_data *data)
   1340{
   1341	data = data->parent_data;
   1342	if (data->chip->irq_enable)
   1343		data->chip->irq_enable(data);
   1344	else
   1345		data->chip->irq_unmask(data);
   1346}
   1347EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
   1348
   1349/**
   1350 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
   1351 * NULL)
   1352 * @data:	Pointer to interrupt specific data
   1353 */
   1354void irq_chip_disable_parent(struct irq_data *data)
   1355{
   1356	data = data->parent_data;
   1357	if (data->chip->irq_disable)
   1358		data->chip->irq_disable(data);
   1359	else
   1360		data->chip->irq_mask(data);
   1361}
   1362EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
   1363
   1364/**
   1365 * irq_chip_ack_parent - Acknowledge the parent interrupt
   1366 * @data:	Pointer to interrupt specific data
   1367 */
   1368void irq_chip_ack_parent(struct irq_data *data)
   1369{
   1370	data = data->parent_data;
   1371	data->chip->irq_ack(data);
   1372}
   1373EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
   1374
   1375/**
   1376 * irq_chip_mask_parent - Mask the parent interrupt
   1377 * @data:	Pointer to interrupt specific data
   1378 */
   1379void irq_chip_mask_parent(struct irq_data *data)
   1380{
   1381	data = data->parent_data;
   1382	data->chip->irq_mask(data);
   1383}
   1384EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
   1385
   1386/**
   1387 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
   1388 * @data:	Pointer to interrupt specific data
   1389 */
   1390void irq_chip_mask_ack_parent(struct irq_data *data)
   1391{
   1392	data = data->parent_data;
   1393	data->chip->irq_mask_ack(data);
   1394}
   1395EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
   1396
   1397/**
   1398 * irq_chip_unmask_parent - Unmask the parent interrupt
   1399 * @data:	Pointer to interrupt specific data
   1400 */
   1401void irq_chip_unmask_parent(struct irq_data *data)
   1402{
   1403	data = data->parent_data;
   1404	data->chip->irq_unmask(data);
   1405}
   1406EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
   1407
   1408/**
   1409 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
   1410 * @data:	Pointer to interrupt specific data
   1411 */
   1412void irq_chip_eoi_parent(struct irq_data *data)
   1413{
   1414	data = data->parent_data;
   1415	data->chip->irq_eoi(data);
   1416}
   1417EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
   1418
   1419/**
   1420 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
   1421 * @data:	Pointer to interrupt specific data
   1422 * @dest:	The affinity mask to set
   1423 * @force:	Flag to enforce setting (disable online checks)
   1424 *
   1425 * Conditional, as the underlying parent chip might not implement it.
   1426 */
   1427int irq_chip_set_affinity_parent(struct irq_data *data,
   1428				 const struct cpumask *dest, bool force)
   1429{
   1430	data = data->parent_data;
   1431	if (data->chip->irq_set_affinity)
   1432		return data->chip->irq_set_affinity(data, dest, force);
   1433
   1434	return -ENOSYS;
   1435}
   1436EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
   1437
   1438/**
   1439 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
   1440 * @data:	Pointer to interrupt specific data
   1441 * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
   1442 *
   1443 * Conditional, as the underlying parent chip might not implement it.
   1444 */
   1445int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
   1446{
   1447	data = data->parent_data;
   1448
   1449	if (data->chip->irq_set_type)
   1450		return data->chip->irq_set_type(data, type);
   1451
   1452	return -ENOSYS;
   1453}
   1454EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
   1455
   1456/**
   1457 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
   1458 * @data:	Pointer to interrupt specific data
   1459 *
   1460 * Iterate through the domain hierarchy of the interrupt and check
   1461 * whether a hw retrigger function exists. If yes, invoke it.
   1462 */
   1463int irq_chip_retrigger_hierarchy(struct irq_data *data)
   1464{
   1465	for (data = data->parent_data; data; data = data->parent_data)
   1466		if (data->chip && data->chip->irq_retrigger)
   1467			return data->chip->irq_retrigger(data);
   1468
   1469	return 0;
   1470}
   1471EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
   1472
   1473/**
   1474 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
   1475 * @data:	Pointer to interrupt specific data
   1476 * @vcpu_info:	The vcpu affinity information
   1477 */
   1478int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
   1479{
   1480	data = data->parent_data;
   1481	if (data->chip->irq_set_vcpu_affinity)
   1482		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
   1483
   1484	return -ENOSYS;
   1485}
   1486EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
   1487/**
   1488 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
   1489 * @data:	Pointer to interrupt specific data
   1490 * @on:		Whether to set or reset the wake-up capability of this irq
   1491 *
   1492 * Conditional, as the underlying parent chip might not implement it.
   1493 */
   1494int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
   1495{
   1496	data = data->parent_data;
   1497
   1498	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
   1499		return 0;
   1500
   1501	if (data->chip->irq_set_wake)
   1502		return data->chip->irq_set_wake(data, on);
   1503
   1504	return -ENOSYS;
   1505}
   1506EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
   1507
   1508/**
   1509 * irq_chip_request_resources_parent - Request resources on the parent interrupt
   1510 * @data:	Pointer to interrupt specific data
   1511 */
   1512int irq_chip_request_resources_parent(struct irq_data *data)
   1513{
   1514	data = data->parent_data;
   1515
   1516	if (data->chip->irq_request_resources)
   1517		return data->chip->irq_request_resources(data);
   1518
   1519	return -ENOSYS;
   1520}
   1521EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
   1522
   1523/**
   1524 * irq_chip_release_resources_parent - Release resources on the parent interrupt
   1525 * @data:	Pointer to interrupt specific data
   1526 */
   1527void irq_chip_release_resources_parent(struct irq_data *data)
   1528{
   1529	data = data->parent_data;
   1530	if (data->chip->irq_release_resources)
   1531		data->chip->irq_release_resources(data);
   1532}
   1533EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
   1534#endif
   1535
   1536/**
   1537 * irq_chip_compose_msi_msg - Compose msi message for a irq chip
   1538 * @data:	Pointer to interrupt specific data
   1539 * @msg:	Pointer to the MSI message
   1540 *
   1541 * For hierarchical domains we find the first chip in the hierarchy
   1542 * which implements the irq_compose_msi_msg callback. For non
   1543 * hierarchical we use the top level chip.
   1544 */
   1545int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
   1546{
   1547	struct irq_data *pos;
   1548
   1549	for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
   1550		if (data->chip && data->chip->irq_compose_msi_msg)
   1551			pos = data;
   1552	}
   1553
   1554	if (!pos)
   1555		return -ENOSYS;
   1556
   1557	pos->chip->irq_compose_msi_msg(pos, msg);
   1558	return 0;
   1559}
   1560
   1561static struct device *irq_get_parent_device(struct irq_data *data)
   1562{
   1563	if (data->domain)
   1564		return data->domain->dev;
   1565
   1566	return NULL;
   1567}
   1568
   1569/**
   1570 * irq_chip_pm_get - Enable power for an IRQ chip
   1571 * @data:	Pointer to interrupt specific data
   1572 *
   1573 * Enable the power to the IRQ chip referenced by the interrupt data
   1574 * structure.
   1575 */
   1576int irq_chip_pm_get(struct irq_data *data)
   1577{
   1578	struct device *dev = irq_get_parent_device(data);
   1579	int retval = 0;
   1580
   1581	if (IS_ENABLED(CONFIG_PM) && dev)
   1582		retval = pm_runtime_resume_and_get(dev);
   1583
   1584	return retval;
   1585}
   1586
   1587/**
   1588 * irq_chip_pm_put - Disable power for an IRQ chip
   1589 * @data:	Pointer to interrupt specific data
   1590 *
   1591 * Disable the power to the IRQ chip referenced by the interrupt data
   1592 * structure, belongs. Note that power will only be disabled, once this
   1593 * function has been called for all IRQs that have called irq_chip_pm_get().
   1594 */
   1595int irq_chip_pm_put(struct irq_data *data)
   1596{
   1597	struct device *dev = irq_get_parent_device(data);
   1598	int retval = 0;
   1599
   1600	if (IS_ENABLED(CONFIG_PM) && dev)
   1601		retval = pm_runtime_put(dev);
   1602
   1603	return (retval < 0) ? retval : 0;
   1604}