cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

amdgpu_irq.c (20116B)


      1/*
      2 * Copyright 2008 Advanced Micro Devices, Inc.
      3 * Copyright 2008 Red Hat Inc.
      4 * Copyright 2009 Jerome Glisse.
      5 *
      6 * Permission is hereby granted, free of charge, to any person obtaining a
      7 * copy of this software and associated documentation files (the "Software"),
      8 * to deal in the Software without restriction, including without limitation
      9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10 * and/or sell copies of the Software, and to permit persons to whom the
     11 * Software is furnished to do so, subject to the following conditions:
     12 *
     13 * The above copyright notice and this permission notice shall be included in
     14 * all copies or substantial portions of the Software.
     15 *
     16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22 * OTHER DEALINGS IN THE SOFTWARE.
     23 *
     24 * Authors: Dave Airlie
     25 *          Alex Deucher
     26 *          Jerome Glisse
     27 */
     28
     29/**
     30 * DOC: Interrupt Handling
     31 *
     32 * Interrupts generated within GPU hardware raise interrupt requests that are
     33 * passed to amdgpu IRQ handler which is responsible for detecting source and
     34 * type of the interrupt and dispatching matching handlers. If handling an
     35 * interrupt requires calling kernel functions that may sleep processing is
     36 * dispatched to work handlers.
     37 *
     38 * If MSI functionality is not disabled by module parameter then MSI
     39 * support will be enabled.
     40 *
     41 * For GPU interrupt sources that may be driven by another driver, IRQ domain
     42 * support is used (with mapping between virtual and hardware IRQs).
     43 */
     44
     45#include <linux/irq.h>
     46#include <linux/pci.h>
     47
     48#include <drm/drm_crtc_helper.h>
     49#include <drm/drm_vblank.h>
     50#include <drm/amdgpu_drm.h>
     51#include <drm/drm_drv.h>
     52#include "amdgpu.h"
     53#include "amdgpu_ih.h"
     54#include "atom.h"
     55#include "amdgpu_connectors.h"
     56#include "amdgpu_trace.h"
     57#include "amdgpu_amdkfd.h"
     58#include "amdgpu_ras.h"
     59
     60#include <linux/pm_runtime.h>
     61
     62#ifdef CONFIG_DRM_AMD_DC
     63#include "amdgpu_dm_irq.h"
     64#endif
     65
     66#define AMDGPU_WAIT_IDLE_TIMEOUT 200
     67
     68const char *soc15_ih_clientid_name[] = {
     69	"IH",
     70	"SDMA2 or ACP",
     71	"ATHUB",
     72	"BIF",
     73	"SDMA3 or DCE",
     74	"SDMA4 or ISP",
     75	"VMC1 or PCIE0",
     76	"RLC",
     77	"SDMA0",
     78	"SDMA1",
     79	"SE0SH",
     80	"SE1SH",
     81	"SE2SH",
     82	"SE3SH",
     83	"VCN1 or UVD1",
     84	"THM",
     85	"VCN or UVD",
     86	"SDMA5 or VCE0",
     87	"VMC",
     88	"SDMA6 or XDMA",
     89	"GRBM_CP",
     90	"ATS",
     91	"ROM_SMUIO",
     92	"DF",
     93	"SDMA7 or VCE1",
     94	"PWR",
     95	"reserved",
     96	"UTCL2",
     97	"EA",
     98	"UTCL2LOG",
     99	"MP0",
    100	"MP1"
    101};
    102
    103/**
    104 * amdgpu_hotplug_work_func - work handler for display hotplug event
    105 *
    106 * @work: work struct pointer
    107 *
    108 * This is the hotplug event work handler (all ASICs).
    109 * The work gets scheduled from the IRQ handler if there
    110 * was a hotplug interrupt.  It walks through the connector table
    111 * and calls hotplug handler for each connector. After this, it sends
    112 * a DRM hotplug event to alert userspace.
    113 *
    114 * This design approach is required in order to defer hotplug event handling
    115 * from the IRQ handler to a work handler because hotplug handler has to use
    116 * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
    117 * sleep).
    118 */
    119static void amdgpu_hotplug_work_func(struct work_struct *work)
    120{
    121	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
    122						  hotplug_work);
    123	struct drm_device *dev = adev_to_drm(adev);
    124	struct drm_mode_config *mode_config = &dev->mode_config;
    125	struct drm_connector *connector;
    126	struct drm_connector_list_iter iter;
    127
    128	mutex_lock(&mode_config->mutex);
    129	drm_connector_list_iter_begin(dev, &iter);
    130	drm_for_each_connector_iter(connector, &iter)
    131		amdgpu_connector_hotplug(connector);
    132	drm_connector_list_iter_end(&iter);
    133	mutex_unlock(&mode_config->mutex);
    134	/* Just fire off a uevent and let userspace tell us what to do */
    135	drm_helper_hpd_irq_event(dev);
    136}
    137
    138/**
    139 * amdgpu_irq_disable_all - disable *all* interrupts
    140 *
    141 * @adev: amdgpu device pointer
    142 *
    143 * Disable all types of interrupts from all sources.
    144 */
    145void amdgpu_irq_disable_all(struct amdgpu_device *adev)
    146{
    147	unsigned long irqflags;
    148	unsigned i, j, k;
    149	int r;
    150
    151	spin_lock_irqsave(&adev->irq.lock, irqflags);
    152	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
    153		if (!adev->irq.client[i].sources)
    154			continue;
    155
    156		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
    157			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
    158
    159			if (!src || !src->funcs->set || !src->num_types)
    160				continue;
    161
    162			for (k = 0; k < src->num_types; ++k) {
    163				atomic_set(&src->enabled_types[k], 0);
    164				r = src->funcs->set(adev, src, k,
    165						    AMDGPU_IRQ_STATE_DISABLE);
    166				if (r)
    167					DRM_ERROR("error disabling interrupt (%d)\n",
    168						  r);
    169			}
    170		}
    171	}
    172	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
    173}
    174
    175/**
    176 * amdgpu_irq_handler - IRQ handler
    177 *
    178 * @irq: IRQ number (unused)
    179 * @arg: pointer to DRM device
    180 *
    181 * IRQ handler for amdgpu driver (all ASICs).
    182 *
    183 * Returns:
    184 * result of handling the IRQ, as defined by &irqreturn_t
    185 */
    186static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
    187{
    188	struct drm_device *dev = (struct drm_device *) arg;
    189	struct amdgpu_device *adev = drm_to_adev(dev);
    190	irqreturn_t ret;
    191
    192	ret = amdgpu_ih_process(adev, &adev->irq.ih);
    193	if (ret == IRQ_HANDLED)
    194		pm_runtime_mark_last_busy(dev->dev);
    195
    196	amdgpu_ras_interrupt_fatal_error_handler(adev);
    197
    198	return ret;
    199}
    200
    201/**
    202 * amdgpu_irq_handle_ih1 - kick of processing for IH1
    203 *
    204 * @work: work structure in struct amdgpu_irq
    205 *
    206 * Kick of processing IH ring 1.
    207 */
    208static void amdgpu_irq_handle_ih1(struct work_struct *work)
    209{
    210	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
    211						  irq.ih1_work);
    212
    213	amdgpu_ih_process(adev, &adev->irq.ih1);
    214}
    215
    216/**
    217 * amdgpu_irq_handle_ih2 - kick of processing for IH2
    218 *
    219 * @work: work structure in struct amdgpu_irq
    220 *
    221 * Kick of processing IH ring 2.
    222 */
    223static void amdgpu_irq_handle_ih2(struct work_struct *work)
    224{
    225	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
    226						  irq.ih2_work);
    227
    228	amdgpu_ih_process(adev, &adev->irq.ih2);
    229}
    230
    231/**
    232 * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
    233 *
    234 * @work: work structure in struct amdgpu_irq
    235 *
    236 * Kick of processing IH soft ring.
    237 */
    238static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
    239{
    240	struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
    241						  irq.ih_soft_work);
    242
    243	amdgpu_ih_process(adev, &adev->irq.ih_soft);
    244}
    245
    246/**
    247 * amdgpu_msi_ok - check whether MSI functionality is enabled
    248 *
    249 * @adev: amdgpu device pointer (unused)
    250 *
    251 * Checks whether MSI functionality has been disabled via module parameter
    252 * (all ASICs).
    253 *
    254 * Returns:
    255 * *true* if MSIs are allowed to be enabled or *false* otherwise
    256 */
    257static bool amdgpu_msi_ok(struct amdgpu_device *adev)
    258{
    259	if (amdgpu_msi == 1)
    260		return true;
    261	else if (amdgpu_msi == 0)
    262		return false;
    263
    264	return true;
    265}
    266
    267static void amdgpu_restore_msix(struct amdgpu_device *adev)
    268{
    269	u16 ctrl;
    270
    271	pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
    272	if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
    273		return;
    274
    275	/* VF FLR */
    276	ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
    277	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
    278	ctrl |= PCI_MSIX_FLAGS_ENABLE;
    279	pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
    280}
    281
    282/**
    283 * amdgpu_irq_init - initialize interrupt handling
    284 *
    285 * @adev: amdgpu device pointer
    286 *
    287 * Sets up work functions for hotplug and reset interrupts, enables MSI
    288 * functionality, initializes vblank, hotplug and reset interrupt handling.
    289 *
    290 * Returns:
    291 * 0 on success or error code on failure
    292 */
    293int amdgpu_irq_init(struct amdgpu_device *adev)
    294{
    295	int r = 0;
    296	unsigned int irq;
    297
    298	spin_lock_init(&adev->irq.lock);
    299
    300	/* Enable MSI if not disabled by module parameter */
    301	adev->irq.msi_enabled = false;
    302
    303	if (amdgpu_msi_ok(adev)) {
    304		int nvec = pci_msix_vec_count(adev->pdev);
    305		unsigned int flags;
    306
    307		if (nvec <= 0) {
    308			flags = PCI_IRQ_MSI;
    309		} else {
    310			flags = PCI_IRQ_MSI | PCI_IRQ_MSIX;
    311		}
    312		/* we only need one vector */
    313		nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
    314		if (nvec > 0) {
    315			adev->irq.msi_enabled = true;
    316			dev_dbg(adev->dev, "using MSI/MSI-X.\n");
    317		}
    318	}
    319
    320	if (!amdgpu_device_has_dc_support(adev)) {
    321		if (!adev->enable_virtual_display)
    322			/* Disable vblank IRQs aggressively for power-saving */
    323			/* XXX: can this be enabled for DC? */
    324			adev_to_drm(adev)->vblank_disable_immediate = true;
    325
    326		r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
    327		if (r)
    328			return r;
    329
    330		/* Pre-DCE11 */
    331		INIT_WORK(&adev->hotplug_work,
    332				amdgpu_hotplug_work_func);
    333	}
    334
    335	INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
    336	INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
    337	INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
    338
    339	/* Use vector 0 for MSI-X. */
    340	r = pci_irq_vector(adev->pdev, 0);
    341	if (r < 0)
    342		return r;
    343	irq = r;
    344
    345	/* PCI devices require shared interrupts. */
    346	r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
    347			adev_to_drm(adev));
    348	if (r) {
    349		if (!amdgpu_device_has_dc_support(adev))
    350			flush_work(&adev->hotplug_work);
    351		return r;
    352	}
    353	adev->irq.installed = true;
    354	adev->irq.irq = irq;
    355	adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
    356
    357	DRM_DEBUG("amdgpu: irq initialized.\n");
    358	return 0;
    359}
    360
    361
    362void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
    363{
    364	if (adev->irq.installed) {
    365		free_irq(adev->irq.irq, adev_to_drm(adev));
    366		adev->irq.installed = false;
    367		if (adev->irq.msi_enabled)
    368			pci_free_irq_vectors(adev->pdev);
    369
    370		if (!amdgpu_device_has_dc_support(adev))
    371			flush_work(&adev->hotplug_work);
    372	}
    373
    374	amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
    375	amdgpu_ih_ring_fini(adev, &adev->irq.ih);
    376	amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
    377	amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
    378}
    379
    380/**
    381 * amdgpu_irq_fini_sw - shut down interrupt handling
    382 *
    383 * @adev: amdgpu device pointer
    384 *
    385 * Tears down work functions for hotplug and reset interrupts, disables MSI
    386 * functionality, shuts down vblank, hotplug and reset interrupt handling,
    387 * turns off interrupts from all sources (all ASICs).
    388 */
    389void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
    390{
    391	unsigned i, j;
    392
    393	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
    394		if (!adev->irq.client[i].sources)
    395			continue;
    396
    397		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
    398			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
    399
    400			if (!src)
    401				continue;
    402
    403			kfree(src->enabled_types);
    404			src->enabled_types = NULL;
    405		}
    406		kfree(adev->irq.client[i].sources);
    407		adev->irq.client[i].sources = NULL;
    408	}
    409}
    410
    411/**
    412 * amdgpu_irq_add_id - register IRQ source
    413 *
    414 * @adev: amdgpu device pointer
    415 * @client_id: client id
    416 * @src_id: source id
    417 * @source: IRQ source pointer
    418 *
    419 * Registers IRQ source on a client.
    420 *
    421 * Returns:
    422 * 0 on success or error code otherwise
    423 */
    424int amdgpu_irq_add_id(struct amdgpu_device *adev,
    425		      unsigned client_id, unsigned src_id,
    426		      struct amdgpu_irq_src *source)
    427{
    428	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
    429		return -EINVAL;
    430
    431	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
    432		return -EINVAL;
    433
    434	if (!source->funcs)
    435		return -EINVAL;
    436
    437	if (!adev->irq.client[client_id].sources) {
    438		adev->irq.client[client_id].sources =
    439			kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
    440				sizeof(struct amdgpu_irq_src *),
    441				GFP_KERNEL);
    442		if (!adev->irq.client[client_id].sources)
    443			return -ENOMEM;
    444	}
    445
    446	if (adev->irq.client[client_id].sources[src_id] != NULL)
    447		return -EINVAL;
    448
    449	if (source->num_types && !source->enabled_types) {
    450		atomic_t *types;
    451
    452		types = kcalloc(source->num_types, sizeof(atomic_t),
    453				GFP_KERNEL);
    454		if (!types)
    455			return -ENOMEM;
    456
    457		source->enabled_types = types;
    458	}
    459
    460	adev->irq.client[client_id].sources[src_id] = source;
    461	return 0;
    462}
    463
    464/**
    465 * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
    466 *
    467 * @adev: amdgpu device pointer
    468 * @ih: interrupt ring instance
    469 *
    470 * Dispatches IRQ to IP blocks.
    471 */
    472void amdgpu_irq_dispatch(struct amdgpu_device *adev,
    473			 struct amdgpu_ih_ring *ih)
    474{
    475	u32 ring_index = ih->rptr >> 2;
    476	struct amdgpu_iv_entry entry;
    477	unsigned client_id, src_id;
    478	struct amdgpu_irq_src *src;
    479	bool handled = false;
    480	int r;
    481
    482	entry.ih = ih;
    483	entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
    484	amdgpu_ih_decode_iv(adev, &entry);
    485
    486	trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
    487
    488	client_id = entry.client_id;
    489	src_id = entry.src_id;
    490
    491	if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
    492		DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
    493
    494	} else	if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
    495		DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
    496
    497	} else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
    498		   adev->irq.virq[src_id]) {
    499		generic_handle_domain_irq(adev->irq.domain, src_id);
    500
    501	} else if (!adev->irq.client[client_id].sources) {
    502		DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
    503			  client_id, src_id);
    504
    505	} else if ((src = adev->irq.client[client_id].sources[src_id])) {
    506		r = src->funcs->process(adev, src, &entry);
    507		if (r < 0)
    508			DRM_ERROR("error processing interrupt (%d)\n", r);
    509		else if (r)
    510			handled = true;
    511
    512	} else {
    513		DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
    514	}
    515
    516	/* Send it to amdkfd as well if it isn't already handled */
    517	if (!handled)
    518		amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
    519
    520	if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
    521		ih->processed_timestamp = entry.timestamp;
    522}
    523
    524/**
    525 * amdgpu_irq_delegate - delegate IV to soft IH ring
    526 *
    527 * @adev: amdgpu device pointer
    528 * @entry: IV entry
    529 * @num_dw: size of IV
    530 *
    531 * Delegate the IV to the soft IH ring and schedule processing of it. Used
    532 * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
    533 */
    534void amdgpu_irq_delegate(struct amdgpu_device *adev,
    535			 struct amdgpu_iv_entry *entry,
    536			 unsigned int num_dw)
    537{
    538	amdgpu_ih_ring_write(&adev->irq.ih_soft, entry->iv_entry, num_dw);
    539	schedule_work(&adev->irq.ih_soft_work);
    540}
    541
    542/**
    543 * amdgpu_irq_update - update hardware interrupt state
    544 *
    545 * @adev: amdgpu device pointer
    546 * @src: interrupt source pointer
    547 * @type: type of interrupt
    548 *
    549 * Updates interrupt state for the specific source (all ASICs).
    550 */
    551int amdgpu_irq_update(struct amdgpu_device *adev,
    552			     struct amdgpu_irq_src *src, unsigned type)
    553{
    554	unsigned long irqflags;
    555	enum amdgpu_interrupt_state state;
    556	int r;
    557
    558	spin_lock_irqsave(&adev->irq.lock, irqflags);
    559
    560	/* We need to determine after taking the lock, otherwise
    561	   we might disable just enabled interrupts again */
    562	if (amdgpu_irq_enabled(adev, src, type))
    563		state = AMDGPU_IRQ_STATE_ENABLE;
    564	else
    565		state = AMDGPU_IRQ_STATE_DISABLE;
    566
    567	r = src->funcs->set(adev, src, type, state);
    568	spin_unlock_irqrestore(&adev->irq.lock, irqflags);
    569	return r;
    570}
    571
    572/**
    573 * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
    574 *
    575 * @adev: amdgpu device pointer
    576 *
    577 * Updates state of all types of interrupts on all sources on resume after
    578 * reset.
    579 */
    580void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
    581{
    582	int i, j, k;
    583
    584	if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
    585		amdgpu_restore_msix(adev);
    586
    587	for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
    588		if (!adev->irq.client[i].sources)
    589			continue;
    590
    591		for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
    592			struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
    593
    594			if (!src || !src->funcs || !src->funcs->set)
    595				continue;
    596			for (k = 0; k < src->num_types; k++)
    597				amdgpu_irq_update(adev, src, k);
    598		}
    599	}
    600}
    601
    602/**
    603 * amdgpu_irq_get - enable interrupt
    604 *
    605 * @adev: amdgpu device pointer
    606 * @src: interrupt source pointer
    607 * @type: type of interrupt
    608 *
    609 * Enables specified type of interrupt on the specified source (all ASICs).
    610 *
    611 * Returns:
    612 * 0 on success or error code otherwise
    613 */
    614int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
    615		   unsigned type)
    616{
    617	if (!adev->irq.installed)
    618		return -ENOENT;
    619
    620	if (type >= src->num_types)
    621		return -EINVAL;
    622
    623	if (!src->enabled_types || !src->funcs->set)
    624		return -EINVAL;
    625
    626	if (atomic_inc_return(&src->enabled_types[type]) == 1)
    627		return amdgpu_irq_update(adev, src, type);
    628
    629	return 0;
    630}
    631
    632/**
    633 * amdgpu_irq_put - disable interrupt
    634 *
    635 * @adev: amdgpu device pointer
    636 * @src: interrupt source pointer
    637 * @type: type of interrupt
    638 *
    639 * Enables specified type of interrupt on the specified source (all ASICs).
    640 *
    641 * Returns:
    642 * 0 on success or error code otherwise
    643 */
    644int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
    645		   unsigned type)
    646{
    647	if (!adev->irq.installed)
    648		return -ENOENT;
    649
    650	if (type >= src->num_types)
    651		return -EINVAL;
    652
    653	if (!src->enabled_types || !src->funcs->set)
    654		return -EINVAL;
    655
    656	if (atomic_dec_and_test(&src->enabled_types[type]))
    657		return amdgpu_irq_update(adev, src, type);
    658
    659	return 0;
    660}
    661
    662/**
    663 * amdgpu_irq_enabled - check whether interrupt is enabled or not
    664 *
    665 * @adev: amdgpu device pointer
    666 * @src: interrupt source pointer
    667 * @type: type of interrupt
    668 *
    669 * Checks whether the given type of interrupt is enabled on the given source.
    670 *
    671 * Returns:
    672 * *true* if interrupt is enabled, *false* if interrupt is disabled or on
    673 * invalid parameters
    674 */
    675bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
    676			unsigned type)
    677{
    678	if (!adev->irq.installed)
    679		return false;
    680
    681	if (type >= src->num_types)
    682		return false;
    683
    684	if (!src->enabled_types || !src->funcs->set)
    685		return false;
    686
    687	return !!atomic_read(&src->enabled_types[type]);
    688}
    689
    690/* XXX: Generic IRQ handling */
    691static void amdgpu_irq_mask(struct irq_data *irqd)
    692{
    693	/* XXX */
    694}
    695
    696static void amdgpu_irq_unmask(struct irq_data *irqd)
    697{
    698	/* XXX */
    699}
    700
    701/* amdgpu hardware interrupt chip descriptor */
    702static struct irq_chip amdgpu_irq_chip = {
    703	.name = "amdgpu-ih",
    704	.irq_mask = amdgpu_irq_mask,
    705	.irq_unmask = amdgpu_irq_unmask,
    706};
    707
    708/**
    709 * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
    710 *
    711 * @d: amdgpu IRQ domain pointer (unused)
    712 * @irq: virtual IRQ number
    713 * @hwirq: hardware irq number
    714 *
    715 * Current implementation assigns simple interrupt handler to the given virtual
    716 * IRQ.
    717 *
    718 * Returns:
    719 * 0 on success or error code otherwise
    720 */
    721static int amdgpu_irqdomain_map(struct irq_domain *d,
    722				unsigned int irq, irq_hw_number_t hwirq)
    723{
    724	if (hwirq >= AMDGPU_MAX_IRQ_SRC_ID)
    725		return -EPERM;
    726
    727	irq_set_chip_and_handler(irq,
    728				 &amdgpu_irq_chip, handle_simple_irq);
    729	return 0;
    730}
    731
    732/* Implementation of methods for amdgpu IRQ domain */
    733static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
    734	.map = amdgpu_irqdomain_map,
    735};
    736
    737/**
    738 * amdgpu_irq_add_domain - create a linear IRQ domain
    739 *
    740 * @adev: amdgpu device pointer
    741 *
    742 * Creates an IRQ domain for GPU interrupt sources
    743 * that may be driven by another driver (e.g., ACP).
    744 *
    745 * Returns:
    746 * 0 on success or error code otherwise
    747 */
    748int amdgpu_irq_add_domain(struct amdgpu_device *adev)
    749{
    750	adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
    751						 &amdgpu_hw_irqdomain_ops, adev);
    752	if (!adev->irq.domain) {
    753		DRM_ERROR("GPU irq add domain failed\n");
    754		return -ENODEV;
    755	}
    756
    757	return 0;
    758}
    759
    760/**
    761 * amdgpu_irq_remove_domain - remove the IRQ domain
    762 *
    763 * @adev: amdgpu device pointer
    764 *
    765 * Removes the IRQ domain for GPU interrupt sources
    766 * that may be driven by another driver (e.g., ACP).
    767 */
    768void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
    769{
    770	if (adev->irq.domain) {
    771		irq_domain_remove(adev->irq.domain);
    772		adev->irq.domain = NULL;
    773	}
    774}
    775
    776/**
    777 * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
    778 *
    779 * @adev: amdgpu device pointer
    780 * @src_id: IH source id
    781 *
    782 * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
    783 * Use this for components that generate a GPU interrupt, but are driven
    784 * by a different driver (e.g., ACP).
    785 *
    786 * Returns:
    787 * Linux IRQ
    788 */
    789unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
    790{
    791	adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
    792
    793	return adev->irq.virq[src_id];
    794}