cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_uncore.c (80444B)


      1/*
      2 * Copyright © 2013 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 */
     23
     24#include <linux/pm_runtime.h>
     25
     26#include "gt/intel_engine_regs.h"
     27#include "gt/intel_gt_regs.h"
     28
     29#include "i915_drv.h"
     30#include "i915_iosf_mbi.h"
     31#include "i915_trace.h"
     32#include "i915_vgpu.h"
     33#include "intel_pm.h"
     34
     35#define FORCEWAKE_ACK_TIMEOUT_MS 50
     36#define GT_FIFO_TIMEOUT_MS	 10
     37
     38#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
     39
     40static void
     41fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
     42{
     43	uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
     44}
     45
     46void
     47intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
     48{
     49	spin_lock_init(&mmio_debug->lock);
     50	mmio_debug->unclaimed_mmio_check = 1;
     51}
     52
     53static void mmio_debug_suspend(struct intel_uncore_mmio_debug *mmio_debug)
     54{
     55	lockdep_assert_held(&mmio_debug->lock);
     56
     57	/* Save and disable mmio debugging for the user bypass */
     58	if (!mmio_debug->suspend_count++) {
     59		mmio_debug->saved_mmio_check = mmio_debug->unclaimed_mmio_check;
     60		mmio_debug->unclaimed_mmio_check = 0;
     61	}
     62}
     63
     64static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
     65{
     66	lockdep_assert_held(&mmio_debug->lock);
     67
     68	if (!--mmio_debug->suspend_count)
     69		mmio_debug->unclaimed_mmio_check = mmio_debug->saved_mmio_check;
     70}
     71
     72static const char * const forcewake_domain_names[] = {
     73	"render",
     74	"gt",
     75	"media",
     76	"vdbox0",
     77	"vdbox1",
     78	"vdbox2",
     79	"vdbox3",
     80	"vdbox4",
     81	"vdbox5",
     82	"vdbox6",
     83	"vdbox7",
     84	"vebox0",
     85	"vebox1",
     86	"vebox2",
     87	"vebox3",
     88};
     89
     90const char *
     91intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
     92{
     93	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
     94
     95	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
     96		return forcewake_domain_names[id];
     97
     98	WARN_ON(id);
     99
    100	return "unknown";
    101}
    102
    103#define fw_ack(d) readl((d)->reg_ack)
    104#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
    105#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
    106
    107static inline void
    108fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
    109{
    110	/*
    111	 * We don't really know if the powerwell for the forcewake domain we are
    112	 * trying to reset here does exist at this point (engines could be fused
    113	 * off in ICL+), so no waiting for acks
    114	 */
    115	/* WaRsClearFWBitsAtReset:bdw,skl */
    116	fw_clear(d, 0xffff);
    117}
    118
    119static inline void
    120fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
    121{
    122	GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
    123	d->uncore->fw_domains_timer |= d->mask;
    124	d->wake_count++;
    125	hrtimer_start_range_ns(&d->timer,
    126			       NSEC_PER_MSEC,
    127			       NSEC_PER_MSEC,
    128			       HRTIMER_MODE_REL);
    129}
    130
    131static inline int
    132__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
    133	       const u32 ack,
    134	       const u32 value)
    135{
    136	return wait_for_atomic((fw_ack(d) & ack) == value,
    137			       FORCEWAKE_ACK_TIMEOUT_MS);
    138}
    139
    140static inline int
    141wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
    142	       const u32 ack)
    143{
    144	return __wait_for_ack(d, ack, 0);
    145}
    146
    147static inline int
    148wait_ack_set(const struct intel_uncore_forcewake_domain *d,
    149	     const u32 ack)
    150{
    151	return __wait_for_ack(d, ack, ack);
    152}
    153
    154static inline void
    155fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
    156{
    157	if (wait_ack_clear(d, FORCEWAKE_KERNEL)) {
    158		DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
    159			  intel_uncore_forcewake_domain_to_str(d->id));
    160		add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
    161	}
    162}
    163
    164enum ack_type {
    165	ACK_CLEAR = 0,
    166	ACK_SET
    167};
    168
    169static int
    170fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
    171				 const enum ack_type type)
    172{
    173	const u32 ack_bit = FORCEWAKE_KERNEL;
    174	const u32 value = type == ACK_SET ? ack_bit : 0;
    175	unsigned int pass;
    176	bool ack_detected;
    177
    178	/*
    179	 * There is a possibility of driver's wake request colliding
    180	 * with hardware's own wake requests and that can cause
    181	 * hardware to not deliver the driver's ack message.
    182	 *
    183	 * Use a fallback bit toggle to kick the gpu state machine
    184	 * in the hope that the original ack will be delivered along with
    185	 * the fallback ack.
    186	 *
    187	 * This workaround is described in HSDES #1604254524 and it's known as:
    188	 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
    189	 * although the name is a bit misleading.
    190	 */
    191
    192	pass = 1;
    193	do {
    194		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
    195
    196		fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
    197		/* Give gt some time to relax before the polling frenzy */
    198		udelay(10 * pass);
    199		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
    200
    201		ack_detected = (fw_ack(d) & ack_bit) == value;
    202
    203		fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
    204	} while (!ack_detected && pass++ < 10);
    205
    206	DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
    207			 intel_uncore_forcewake_domain_to_str(d->id),
    208			 type == ACK_SET ? "set" : "clear",
    209			 fw_ack(d),
    210			 pass);
    211
    212	return ack_detected ? 0 : -ETIMEDOUT;
    213}
    214
    215static inline void
    216fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
    217{
    218	if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
    219		return;
    220
    221	if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
    222		fw_domain_wait_ack_clear(d);
    223}
    224
    225static inline void
    226fw_domain_get(const struct intel_uncore_forcewake_domain *d)
    227{
    228	fw_set(d, FORCEWAKE_KERNEL);
    229}
    230
    231static inline void
    232fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
    233{
    234	if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
    235		DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
    236			  intel_uncore_forcewake_domain_to_str(d->id));
    237		add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
    238	}
    239}
    240
    241static inline void
    242fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
    243{
    244	if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
    245		return;
    246
    247	if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
    248		fw_domain_wait_ack_set(d);
    249}
    250
    251static inline void
    252fw_domain_put(const struct intel_uncore_forcewake_domain *d)
    253{
    254	fw_clear(d, FORCEWAKE_KERNEL);
    255}
    256
    257static void
    258fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
    259{
    260	struct intel_uncore_forcewake_domain *d;
    261	unsigned int tmp;
    262
    263	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
    264
    265	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
    266		fw_domain_wait_ack_clear(d);
    267		fw_domain_get(d);
    268	}
    269
    270	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
    271		fw_domain_wait_ack_set(d);
    272
    273	uncore->fw_domains_active |= fw_domains;
    274}
    275
    276static void
    277fw_domains_get_with_fallback(struct intel_uncore *uncore,
    278			     enum forcewake_domains fw_domains)
    279{
    280	struct intel_uncore_forcewake_domain *d;
    281	unsigned int tmp;
    282
    283	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
    284
    285	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
    286		fw_domain_wait_ack_clear_fallback(d);
    287		fw_domain_get(d);
    288	}
    289
    290	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
    291		fw_domain_wait_ack_set_fallback(d);
    292
    293	uncore->fw_domains_active |= fw_domains;
    294}
    295
    296static void
    297fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
    298{
    299	struct intel_uncore_forcewake_domain *d;
    300	unsigned int tmp;
    301
    302	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
    303
    304	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
    305		fw_domain_put(d);
    306
    307	uncore->fw_domains_active &= ~fw_domains;
    308}
    309
    310static void
    311fw_domains_reset(struct intel_uncore *uncore,
    312		 enum forcewake_domains fw_domains)
    313{
    314	struct intel_uncore_forcewake_domain *d;
    315	unsigned int tmp;
    316
    317	if (!fw_domains)
    318		return;
    319
    320	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
    321
    322	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
    323		fw_domain_reset(d);
    324}
    325
    326static inline u32 gt_thread_status(struct intel_uncore *uncore)
    327{
    328	u32 val;
    329
    330	val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
    331	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
    332
    333	return val;
    334}
    335
    336static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
    337{
    338	/*
    339	 * w/a for a sporadic read returning 0 by waiting for the GT
    340	 * thread to wake up.
    341	 */
    342	drm_WARN_ONCE(&uncore->i915->drm,
    343		      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
    344		      "GT thread status wait timed out\n");
    345}
    346
    347static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
    348					      enum forcewake_domains fw_domains)
    349{
    350	fw_domains_get_normal(uncore, fw_domains);
    351
    352	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
    353	__gen6_gt_wait_for_thread_c0(uncore);
    354}
    355
    356static inline u32 fifo_free_entries(struct intel_uncore *uncore)
    357{
    358	u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
    359
    360	return count & GT_FIFO_FREE_ENTRIES_MASK;
    361}
    362
    363static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
    364{
    365	u32 n;
    366
    367	/* On VLV, FIFO will be shared by both SW and HW.
    368	 * So, we need to read the FREE_ENTRIES everytime */
    369	if (IS_VALLEYVIEW(uncore->i915))
    370		n = fifo_free_entries(uncore);
    371	else
    372		n = uncore->fifo_count;
    373
    374	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
    375		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
    376				    GT_FIFO_NUM_RESERVED_ENTRIES,
    377				    GT_FIFO_TIMEOUT_MS)) {
    378			drm_dbg(&uncore->i915->drm,
    379				"GT_FIFO timeout, entries: %u\n", n);
    380			return;
    381		}
    382	}
    383
    384	uncore->fifo_count = n - 1;
    385}
    386
    387static enum hrtimer_restart
    388intel_uncore_fw_release_timer(struct hrtimer *timer)
    389{
    390	struct intel_uncore_forcewake_domain *domain =
    391	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
    392	struct intel_uncore *uncore = domain->uncore;
    393	unsigned long irqflags;
    394
    395	assert_rpm_device_not_suspended(uncore->rpm);
    396
    397	if (xchg(&domain->active, false))
    398		return HRTIMER_RESTART;
    399
    400	spin_lock_irqsave(&uncore->lock, irqflags);
    401
    402	uncore->fw_domains_timer &= ~domain->mask;
    403
    404	GEM_BUG_ON(!domain->wake_count);
    405	if (--domain->wake_count == 0)
    406		fw_domains_put(uncore, domain->mask);
    407
    408	spin_unlock_irqrestore(&uncore->lock, irqflags);
    409
    410	return HRTIMER_NORESTART;
    411}
    412
    413/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
    414static unsigned int
    415intel_uncore_forcewake_reset(struct intel_uncore *uncore)
    416{
    417	unsigned long irqflags;
    418	struct intel_uncore_forcewake_domain *domain;
    419	int retry_count = 100;
    420	enum forcewake_domains fw, active_domains;
    421
    422	iosf_mbi_assert_punit_acquired();
    423
    424	/* Hold uncore.lock across reset to prevent any register access
    425	 * with forcewake not set correctly. Wait until all pending
    426	 * timers are run before holding.
    427	 */
    428	while (1) {
    429		unsigned int tmp;
    430
    431		active_domains = 0;
    432
    433		for_each_fw_domain(domain, uncore, tmp) {
    434			smp_store_mb(domain->active, false);
    435			if (hrtimer_cancel(&domain->timer) == 0)
    436				continue;
    437
    438			intel_uncore_fw_release_timer(&domain->timer);
    439		}
    440
    441		spin_lock_irqsave(&uncore->lock, irqflags);
    442
    443		for_each_fw_domain(domain, uncore, tmp) {
    444			if (hrtimer_active(&domain->timer))
    445				active_domains |= domain->mask;
    446		}
    447
    448		if (active_domains == 0)
    449			break;
    450
    451		if (--retry_count == 0) {
    452			drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
    453			break;
    454		}
    455
    456		spin_unlock_irqrestore(&uncore->lock, irqflags);
    457		cond_resched();
    458	}
    459
    460	drm_WARN_ON(&uncore->i915->drm, active_domains);
    461
    462	fw = uncore->fw_domains_active;
    463	if (fw)
    464		fw_domains_put(uncore, fw);
    465
    466	fw_domains_reset(uncore, uncore->fw_domains);
    467	assert_forcewakes_inactive(uncore);
    468
    469	spin_unlock_irqrestore(&uncore->lock, irqflags);
    470
    471	return fw; /* track the lost user forcewake domains */
    472}
    473
    474static bool
    475fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
    476{
    477	u32 dbg;
    478
    479	dbg = __raw_uncore_read32(uncore, FPGA_DBG);
    480	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
    481		return false;
    482
    483	/*
    484	 * Bugs in PCI programming (or failing hardware) can occasionally cause
    485	 * us to lose access to the MMIO BAR.  When this happens, register
    486	 * reads will come back with 0xFFFFFFFF for every register and things
    487	 * go bad very quickly.  Let's try to detect that special case and at
    488	 * least try to print a more informative message about what has
    489	 * happened.
    490	 *
    491	 * During normal operation the FPGA_DBG register has several unused
    492	 * bits that will always read back as 0's so we can use them as canaries
    493	 * to recognize when MMIO accesses are just busted.
    494	 */
    495	if (unlikely(dbg == ~0))
    496		drm_err(&uncore->i915->drm,
    497			"Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
    498
    499	__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
    500
    501	return true;
    502}
    503
    504static bool
    505vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
    506{
    507	u32 cer;
    508
    509	cer = __raw_uncore_read32(uncore, CLAIM_ER);
    510	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
    511		return false;
    512
    513	__raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
    514
    515	return true;
    516}
    517
    518static bool
    519gen6_check_for_fifo_debug(struct intel_uncore *uncore)
    520{
    521	u32 fifodbg;
    522
    523	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
    524
    525	if (unlikely(fifodbg)) {
    526		drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
    527		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
    528	}
    529
    530	return fifodbg;
    531}
    532
    533static bool
    534check_for_unclaimed_mmio(struct intel_uncore *uncore)
    535{
    536	bool ret = false;
    537
    538	lockdep_assert_held(&uncore->debug->lock);
    539
    540	if (uncore->debug->suspend_count)
    541		return false;
    542
    543	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
    544		ret |= fpga_check_for_unclaimed_mmio(uncore);
    545
    546	if (intel_uncore_has_dbg_unclaimed(uncore))
    547		ret |= vlv_check_for_unclaimed_mmio(uncore);
    548
    549	if (intel_uncore_has_fifo(uncore))
    550		ret |= gen6_check_for_fifo_debug(uncore);
    551
    552	return ret;
    553}
    554
    555static void forcewake_early_sanitize(struct intel_uncore *uncore,
    556				     unsigned int restore_forcewake)
    557{
    558	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
    559
    560	/* WaDisableShadowRegForCpd:chv */
    561	if (IS_CHERRYVIEW(uncore->i915)) {
    562		__raw_uncore_write32(uncore, GTFIFOCTL,
    563				     __raw_uncore_read32(uncore, GTFIFOCTL) |
    564				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
    565				     GT_FIFO_CTL_RC6_POLICY_STALL);
    566	}
    567
    568	iosf_mbi_punit_acquire();
    569	intel_uncore_forcewake_reset(uncore);
    570	if (restore_forcewake) {
    571		spin_lock_irq(&uncore->lock);
    572		fw_domains_get(uncore, restore_forcewake);
    573
    574		if (intel_uncore_has_fifo(uncore))
    575			uncore->fifo_count = fifo_free_entries(uncore);
    576		spin_unlock_irq(&uncore->lock);
    577	}
    578	iosf_mbi_punit_release();
    579}
    580
    581void intel_uncore_suspend(struct intel_uncore *uncore)
    582{
    583	if (!intel_uncore_has_forcewake(uncore))
    584		return;
    585
    586	iosf_mbi_punit_acquire();
    587	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
    588		&uncore->pmic_bus_access_nb);
    589	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
    590	iosf_mbi_punit_release();
    591}
    592
    593void intel_uncore_resume_early(struct intel_uncore *uncore)
    594{
    595	unsigned int restore_forcewake;
    596
    597	if (intel_uncore_unclaimed_mmio(uncore))
    598		drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
    599
    600	if (!intel_uncore_has_forcewake(uncore))
    601		return;
    602
    603	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
    604	forcewake_early_sanitize(uncore, restore_forcewake);
    605
    606	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
    607}
    608
    609void intel_uncore_runtime_resume(struct intel_uncore *uncore)
    610{
    611	if (!intel_uncore_has_forcewake(uncore))
    612		return;
    613
    614	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
    615}
    616
    617static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
    618					 enum forcewake_domains fw_domains)
    619{
    620	struct intel_uncore_forcewake_domain *domain;
    621	unsigned int tmp;
    622
    623	fw_domains &= uncore->fw_domains;
    624
    625	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
    626		if (domain->wake_count++) {
    627			fw_domains &= ~domain->mask;
    628			domain->active = true;
    629		}
    630	}
    631
    632	if (fw_domains)
    633		fw_domains_get(uncore, fw_domains);
    634}
    635
    636/**
    637 * intel_uncore_forcewake_get - grab forcewake domain references
    638 * @uncore: the intel_uncore structure
    639 * @fw_domains: forcewake domains to get reference on
    640 *
    641 * This function can be used get GT's forcewake domain references.
    642 * Normal register access will handle the forcewake domains automatically.
    643 * However if some sequence requires the GT to not power down a particular
    644 * forcewake domains this function should be called at the beginning of the
    645 * sequence. And subsequently the reference should be dropped by symmetric
    646 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
    647 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
    648 */
    649void intel_uncore_forcewake_get(struct intel_uncore *uncore,
    650				enum forcewake_domains fw_domains)
    651{
    652	unsigned long irqflags;
    653
    654	if (!uncore->fw_get_funcs)
    655		return;
    656
    657	assert_rpm_wakelock_held(uncore->rpm);
    658
    659	spin_lock_irqsave(&uncore->lock, irqflags);
    660	__intel_uncore_forcewake_get(uncore, fw_domains);
    661	spin_unlock_irqrestore(&uncore->lock, irqflags);
    662}
    663
    664/**
    665 * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
    666 * @uncore: the intel_uncore structure
    667 *
    668 * This function is a wrapper around intel_uncore_forcewake_get() to acquire
    669 * the GT powerwell and in the process disable our debugging for the
    670 * duration of userspace's bypass.
    671 */
    672void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
    673{
    674	spin_lock_irq(&uncore->lock);
    675	if (!uncore->user_forcewake_count++) {
    676		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
    677		spin_lock(&uncore->debug->lock);
    678		mmio_debug_suspend(uncore->debug);
    679		spin_unlock(&uncore->debug->lock);
    680	}
    681	spin_unlock_irq(&uncore->lock);
    682}
    683
    684/**
    685 * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
    686 * @uncore: the intel_uncore structure
    687 *
    688 * This function complements intel_uncore_forcewake_user_get() and releases
    689 * the GT powerwell taken on behalf of the userspace bypass.
    690 */
    691void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
    692{
    693	spin_lock_irq(&uncore->lock);
    694	if (!--uncore->user_forcewake_count) {
    695		spin_lock(&uncore->debug->lock);
    696		mmio_debug_resume(uncore->debug);
    697
    698		if (check_for_unclaimed_mmio(uncore))
    699			drm_info(&uncore->i915->drm,
    700				 "Invalid mmio detected during user access\n");
    701		spin_unlock(&uncore->debug->lock);
    702
    703		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
    704	}
    705	spin_unlock_irq(&uncore->lock);
    706}
    707
    708/**
    709 * intel_uncore_forcewake_get__locked - grab forcewake domain references
    710 * @uncore: the intel_uncore structure
    711 * @fw_domains: forcewake domains to get reference on
    712 *
    713 * See intel_uncore_forcewake_get(). This variant places the onus
    714 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
    715 */
    716void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
    717					enum forcewake_domains fw_domains)
    718{
    719	lockdep_assert_held(&uncore->lock);
    720
    721	if (!uncore->fw_get_funcs)
    722		return;
    723
    724	__intel_uncore_forcewake_get(uncore, fw_domains);
    725}
    726
    727static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
    728					 enum forcewake_domains fw_domains,
    729					 bool delayed)
    730{
    731	struct intel_uncore_forcewake_domain *domain;
    732	unsigned int tmp;
    733
    734	fw_domains &= uncore->fw_domains;
    735
    736	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
    737		GEM_BUG_ON(!domain->wake_count);
    738
    739		if (--domain->wake_count) {
    740			domain->active = true;
    741			continue;
    742		}
    743
    744		if (delayed &&
    745		    !(domain->uncore->fw_domains_timer & domain->mask))
    746			fw_domain_arm_timer(domain);
    747		else
    748			fw_domains_put(uncore, domain->mask);
    749	}
    750}
    751
    752/**
    753 * intel_uncore_forcewake_put - release a forcewake domain reference
    754 * @uncore: the intel_uncore structure
    755 * @fw_domains: forcewake domains to put references
    756 *
    757 * This function drops the device-level forcewakes for specified
    758 * domains obtained by intel_uncore_forcewake_get().
    759 */
    760void intel_uncore_forcewake_put(struct intel_uncore *uncore,
    761				enum forcewake_domains fw_domains)
    762{
    763	unsigned long irqflags;
    764
    765	if (!uncore->fw_get_funcs)
    766		return;
    767
    768	spin_lock_irqsave(&uncore->lock, irqflags);
    769	__intel_uncore_forcewake_put(uncore, fw_domains, false);
    770	spin_unlock_irqrestore(&uncore->lock, irqflags);
    771}
    772
    773void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
    774					enum forcewake_domains fw_domains)
    775{
    776	unsigned long irqflags;
    777
    778	if (!uncore->fw_get_funcs)
    779		return;
    780
    781	spin_lock_irqsave(&uncore->lock, irqflags);
    782	__intel_uncore_forcewake_put(uncore, fw_domains, true);
    783	spin_unlock_irqrestore(&uncore->lock, irqflags);
    784}
    785
    786/**
    787 * intel_uncore_forcewake_flush - flush the delayed release
    788 * @uncore: the intel_uncore structure
    789 * @fw_domains: forcewake domains to flush
    790 */
    791void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
    792				  enum forcewake_domains fw_domains)
    793{
    794	struct intel_uncore_forcewake_domain *domain;
    795	unsigned int tmp;
    796
    797	if (!uncore->fw_get_funcs)
    798		return;
    799
    800	fw_domains &= uncore->fw_domains;
    801	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
    802		WRITE_ONCE(domain->active, false);
    803		if (hrtimer_cancel(&domain->timer))
    804			intel_uncore_fw_release_timer(&domain->timer);
    805	}
    806}
    807
    808/**
    809 * intel_uncore_forcewake_put__locked - grab forcewake domain references
    810 * @uncore: the intel_uncore structure
    811 * @fw_domains: forcewake domains to get reference on
    812 *
    813 * See intel_uncore_forcewake_put(). This variant places the onus
    814 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
    815 */
    816void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
    817					enum forcewake_domains fw_domains)
    818{
    819	lockdep_assert_held(&uncore->lock);
    820
    821	if (!uncore->fw_get_funcs)
    822		return;
    823
    824	__intel_uncore_forcewake_put(uncore, fw_domains, false);
    825}
    826
    827void assert_forcewakes_inactive(struct intel_uncore *uncore)
    828{
    829	if (!uncore->fw_get_funcs)
    830		return;
    831
    832	drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
    833		 "Expected all fw_domains to be inactive, but %08x are still on\n",
    834		 uncore->fw_domains_active);
    835}
    836
    837void assert_forcewakes_active(struct intel_uncore *uncore,
    838			      enum forcewake_domains fw_domains)
    839{
    840	struct intel_uncore_forcewake_domain *domain;
    841	unsigned int tmp;
    842
    843	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
    844		return;
    845
    846	if (!uncore->fw_get_funcs)
    847		return;
    848
    849	spin_lock_irq(&uncore->lock);
    850
    851	assert_rpm_wakelock_held(uncore->rpm);
    852
    853	fw_domains &= uncore->fw_domains;
    854	drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
    855		 "Expected %08x fw_domains to be active, but %08x are off\n",
    856		 fw_domains, fw_domains & ~uncore->fw_domains_active);
    857
    858	/*
    859	 * Check that the caller has an explicit wakeref and we don't mistake
    860	 * it for the auto wakeref.
    861	 */
    862	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
    863		unsigned int actual = READ_ONCE(domain->wake_count);
    864		unsigned int expect = 1;
    865
    866		if (uncore->fw_domains_timer & domain->mask)
    867			expect++; /* pending automatic release */
    868
    869		if (drm_WARN(&uncore->i915->drm, actual < expect,
    870			     "Expected domain %d to be held awake by caller, count=%d\n",
    871			     domain->id, actual))
    872			break;
    873	}
    874
    875	spin_unlock_irq(&uncore->lock);
    876}
    877
    878/* We give fast paths for the really cool registers */
    879#define NEEDS_FORCE_WAKE(reg) ({ \
    880	u32 __reg = (reg); \
    881	__reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \
    882})
    883
    884static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
    885{
    886	if (offset < entry->start)
    887		return -1;
    888	else if (offset > entry->end)
    889		return 1;
    890	else
    891		return 0;
    892}
    893
    894/* Copied and "macroized" from lib/bsearch.c */
    895#define BSEARCH(key, base, num, cmp) ({                                 \
    896	unsigned int start__ = 0, end__ = (num);                        \
    897	typeof(base) result__ = NULL;                                   \
    898	while (start__ < end__) {                                       \
    899		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
    900		int ret__ = (cmp)((key), (base) + mid__);               \
    901		if (ret__ < 0) {                                        \
    902			end__ = mid__;                                  \
    903		} else if (ret__ > 0) {                                 \
    904			start__ = mid__ + 1;                            \
    905		} else {                                                \
    906			result__ = (base) + mid__;                      \
    907			break;                                          \
    908		}                                                       \
    909	}                                                               \
    910	result__;                                                       \
    911})
    912
    913static enum forcewake_domains
    914find_fw_domain(struct intel_uncore *uncore, u32 offset)
    915{
    916	const struct intel_forcewake_range *entry;
    917
    918	entry = BSEARCH(offset,
    919			uncore->fw_domains_table,
    920			uncore->fw_domains_table_entries,
    921			fw_range_cmp);
    922
    923	if (!entry)
    924		return 0;
    925
    926	/*
    927	 * The list of FW domains depends on the SKU in gen11+ so we
    928	 * can't determine it statically. We use FORCEWAKE_ALL and
    929	 * translate it here to the list of available domains.
    930	 */
    931	if (entry->domains == FORCEWAKE_ALL)
    932		return uncore->fw_domains;
    933
    934	drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
    935		 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
    936		 entry->domains & ~uncore->fw_domains, offset);
    937
    938	return entry->domains;
    939}
    940
    941#define GEN_FW_RANGE(s, e, d) \
    942	{ .start = (s), .end = (e), .domains = (d) }
    943
    944/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
    945static const struct intel_forcewake_range __vlv_fw_ranges[] = {
    946	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
    947	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
    948	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
    949	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
    950	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
    951	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
    952	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
    953};
    954
    955#define __fwtable_reg_read_fw_domains(uncore, offset) \
    956({ \
    957	enum forcewake_domains __fwd = 0; \
    958	if (NEEDS_FORCE_WAKE((offset))) \
    959		__fwd = find_fw_domain(uncore, offset); \
    960	__fwd; \
    961})
    962
    963/* *Must* be sorted by offset! See intel_shadow_table_check(). */
    964static const struct i915_range gen8_shadowed_regs[] = {
    965	{ .start =  0x2030, .end =  0x2030 },
    966	{ .start =  0xA008, .end =  0xA00C },
    967	{ .start = 0x12030, .end = 0x12030 },
    968	{ .start = 0x1a030, .end = 0x1a030 },
    969	{ .start = 0x22030, .end = 0x22030 },
    970	/* TODO: Other registers are not yet used */
    971};
    972
    973static const struct i915_range gen11_shadowed_regs[] = {
    974	{ .start =   0x2030, .end =   0x2030 },
    975	{ .start =   0x2550, .end =   0x2550 },
    976	{ .start =   0xA008, .end =   0xA00C },
    977	{ .start =  0x22030, .end =  0x22030 },
    978	{ .start =  0x22230, .end =  0x22230 },
    979	{ .start =  0x22510, .end =  0x22550 },
    980	{ .start = 0x1C0030, .end = 0x1C0030 },
    981	{ .start = 0x1C0230, .end = 0x1C0230 },
    982	{ .start = 0x1C0510, .end = 0x1C0550 },
    983	{ .start = 0x1C4030, .end = 0x1C4030 },
    984	{ .start = 0x1C4230, .end = 0x1C4230 },
    985	{ .start = 0x1C4510, .end = 0x1C4550 },
    986	{ .start = 0x1C8030, .end = 0x1C8030 },
    987	{ .start = 0x1C8230, .end = 0x1C8230 },
    988	{ .start = 0x1C8510, .end = 0x1C8550 },
    989	{ .start = 0x1D0030, .end = 0x1D0030 },
    990	{ .start = 0x1D0230, .end = 0x1D0230 },
    991	{ .start = 0x1D0510, .end = 0x1D0550 },
    992	{ .start = 0x1D4030, .end = 0x1D4030 },
    993	{ .start = 0x1D4230, .end = 0x1D4230 },
    994	{ .start = 0x1D4510, .end = 0x1D4550 },
    995	{ .start = 0x1D8030, .end = 0x1D8030 },
    996	{ .start = 0x1D8230, .end = 0x1D8230 },
    997	{ .start = 0x1D8510, .end = 0x1D8550 },
    998};
    999
   1000static const struct i915_range gen12_shadowed_regs[] = {
   1001	{ .start =   0x2030, .end =   0x2030 },
   1002	{ .start =   0x2510, .end =   0x2550 },
   1003	{ .start =   0xA008, .end =   0xA00C },
   1004	{ .start =   0xA188, .end =   0xA188 },
   1005	{ .start =   0xA278, .end =   0xA278 },
   1006	{ .start =   0xA540, .end =   0xA56C },
   1007	{ .start =   0xC4C8, .end =   0xC4C8 },
   1008	{ .start =   0xC4D4, .end =   0xC4D4 },
   1009	{ .start =   0xC600, .end =   0xC600 },
   1010	{ .start =  0x22030, .end =  0x22030 },
   1011	{ .start =  0x22510, .end =  0x22550 },
   1012	{ .start = 0x1C0030, .end = 0x1C0030 },
   1013	{ .start = 0x1C0510, .end = 0x1C0550 },
   1014	{ .start = 0x1C4030, .end = 0x1C4030 },
   1015	{ .start = 0x1C4510, .end = 0x1C4550 },
   1016	{ .start = 0x1C8030, .end = 0x1C8030 },
   1017	{ .start = 0x1C8510, .end = 0x1C8550 },
   1018	{ .start = 0x1D0030, .end = 0x1D0030 },
   1019	{ .start = 0x1D0510, .end = 0x1D0550 },
   1020	{ .start = 0x1D4030, .end = 0x1D4030 },
   1021	{ .start = 0x1D4510, .end = 0x1D4550 },
   1022	{ .start = 0x1D8030, .end = 0x1D8030 },
   1023	{ .start = 0x1D8510, .end = 0x1D8550 },
   1024
   1025	/*
   1026	 * The rest of these ranges are specific to Xe_HP and beyond, but
   1027	 * are reserved/unused ranges on earlier gen12 platforms, so they can
   1028	 * be safely added to the gen12 table.
   1029	 */
   1030	{ .start = 0x1E0030, .end = 0x1E0030 },
   1031	{ .start = 0x1E0510, .end = 0x1E0550 },
   1032	{ .start = 0x1E4030, .end = 0x1E4030 },
   1033	{ .start = 0x1E4510, .end = 0x1E4550 },
   1034	{ .start = 0x1E8030, .end = 0x1E8030 },
   1035	{ .start = 0x1E8510, .end = 0x1E8550 },
   1036	{ .start = 0x1F0030, .end = 0x1F0030 },
   1037	{ .start = 0x1F0510, .end = 0x1F0550 },
   1038	{ .start = 0x1F4030, .end = 0x1F4030 },
   1039	{ .start = 0x1F4510, .end = 0x1F4550 },
   1040	{ .start = 0x1F8030, .end = 0x1F8030 },
   1041	{ .start = 0x1F8510, .end = 0x1F8550 },
   1042};
   1043
   1044static const struct i915_range dg2_shadowed_regs[] = {
   1045	{ .start =   0x2030, .end =   0x2030 },
   1046	{ .start =   0x2510, .end =   0x2550 },
   1047	{ .start =   0xA008, .end =   0xA00C },
   1048	{ .start =   0xA188, .end =   0xA188 },
   1049	{ .start =   0xA278, .end =   0xA278 },
   1050	{ .start =   0xA540, .end =   0xA56C },
   1051	{ .start =   0xC4C8, .end =   0xC4C8 },
   1052	{ .start =   0xC4E0, .end =   0xC4E0 },
   1053	{ .start =   0xC600, .end =   0xC600 },
   1054	{ .start =   0xC658, .end =   0xC658 },
   1055	{ .start =  0x22030, .end =  0x22030 },
   1056	{ .start =  0x22510, .end =  0x22550 },
   1057	{ .start = 0x1C0030, .end = 0x1C0030 },
   1058	{ .start = 0x1C0510, .end = 0x1C0550 },
   1059	{ .start = 0x1C4030, .end = 0x1C4030 },
   1060	{ .start = 0x1C4510, .end = 0x1C4550 },
   1061	{ .start = 0x1C8030, .end = 0x1C8030 },
   1062	{ .start = 0x1C8510, .end = 0x1C8550 },
   1063	{ .start = 0x1D0030, .end = 0x1D0030 },
   1064	{ .start = 0x1D0510, .end = 0x1D0550 },
   1065	{ .start = 0x1D4030, .end = 0x1D4030 },
   1066	{ .start = 0x1D4510, .end = 0x1D4550 },
   1067	{ .start = 0x1D8030, .end = 0x1D8030 },
   1068	{ .start = 0x1D8510, .end = 0x1D8550 },
   1069	{ .start = 0x1E0030, .end = 0x1E0030 },
   1070	{ .start = 0x1E0510, .end = 0x1E0550 },
   1071	{ .start = 0x1E4030, .end = 0x1E4030 },
   1072	{ .start = 0x1E4510, .end = 0x1E4550 },
   1073	{ .start = 0x1E8030, .end = 0x1E8030 },
   1074	{ .start = 0x1E8510, .end = 0x1E8550 },
   1075	{ .start = 0x1F0030, .end = 0x1F0030 },
   1076	{ .start = 0x1F0510, .end = 0x1F0550 },
   1077	{ .start = 0x1F4030, .end = 0x1F4030 },
   1078	{ .start = 0x1F4510, .end = 0x1F4550 },
   1079	{ .start = 0x1F8030, .end = 0x1F8030 },
   1080	{ .start = 0x1F8510, .end = 0x1F8550 },
   1081};
   1082
   1083static int mmio_range_cmp(u32 key, const struct i915_range *range)
   1084{
   1085	if (key < range->start)
   1086		return -1;
   1087	else if (key > range->end)
   1088		return 1;
   1089	else
   1090		return 0;
   1091}
   1092
   1093static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
   1094{
   1095	if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
   1096		return false;
   1097
   1098	return BSEARCH(offset,
   1099		       uncore->shadowed_reg_table,
   1100		       uncore->shadowed_reg_table_entries,
   1101		       mmio_range_cmp);
   1102}
   1103
   1104static enum forcewake_domains
   1105gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
   1106{
   1107	return FORCEWAKE_RENDER;
   1108}
   1109
   1110static const struct intel_forcewake_range __gen6_fw_ranges[] = {
   1111	GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
   1112};
   1113
   1114/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
   1115static const struct intel_forcewake_range __chv_fw_ranges[] = {
   1116	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
   1117	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
   1118	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
   1119	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
   1120	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
   1121	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
   1122	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
   1123	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
   1124	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
   1125	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
   1126	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
   1127	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
   1128	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
   1129	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
   1130	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
   1131	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
   1132};
   1133
   1134#define __fwtable_reg_write_fw_domains(uncore, offset) \
   1135({ \
   1136	enum forcewake_domains __fwd = 0; \
   1137	const u32 __offset = (offset); \
   1138	if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
   1139		__fwd = find_fw_domain(uncore, __offset); \
   1140	__fwd; \
   1141})
   1142
   1143/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
   1144static const struct intel_forcewake_range __gen9_fw_ranges[] = {
   1145	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
   1146	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
   1147	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
   1148	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
   1149	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
   1150	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
   1151	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
   1152	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
   1153	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
   1154	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
   1155	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
   1156	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
   1157	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
   1158	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
   1159	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
   1160	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
   1161	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
   1162	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
   1163	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
   1164	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
   1165	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
   1166	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
   1167	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
   1168	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
   1169	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
   1170	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
   1171	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
   1172	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
   1173	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
   1174	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
   1175	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
   1176	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
   1177};
   1178
   1179/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
   1180static const struct intel_forcewake_range __gen11_fw_ranges[] = {
   1181	GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
   1182	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
   1183	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
   1184	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
   1185	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
   1186	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
   1187	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
   1188	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
   1189	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
   1190	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
   1191	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
   1192	GEN_FW_RANGE(0x8800, 0x8bff, 0),
   1193	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
   1194	GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
   1195	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
   1196	GEN_FW_RANGE(0x9560, 0x95ff, 0),
   1197	GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
   1198	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
   1199	GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
   1200	GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
   1201	GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
   1202	GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
   1203	GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
   1204	GEN_FW_RANGE(0x24000, 0x2407f, 0),
   1205	GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
   1206	GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
   1207	GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
   1208	GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
   1209	GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
   1210	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
   1211	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
   1212	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
   1213	GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
   1214	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
   1215	GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
   1216};
   1217
   1218/*
   1219 * *Must* be sorted by offset ranges! See intel_fw_table_check().
   1220 *
   1221 * Note that the spec lists several reserved/unused ranges that don't
   1222 * actually contain any registers.  In the table below we'll combine those
   1223 * reserved ranges with either the preceding or following range to keep the
   1224 * table small and lookups fast.
   1225 */
   1226static const struct intel_forcewake_range __gen12_fw_ranges[] = {
   1227	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
   1228		0x0   -  0xaff: reserved
   1229		0xb00 - 0x1fff: always on */
   1230	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
   1231	GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
   1232	GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
   1233	GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
   1234	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
   1235	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
   1236		0x4000 - 0x48ff: gt
   1237		0x4900 - 0x51ff: reserved */
   1238	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
   1239		0x5200 - 0x53ff: render
   1240		0x5400 - 0x54ff: reserved
   1241		0x5500 - 0x7fff: render */
   1242	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
   1243	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
   1244	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
   1245		0x8160 - 0x817f: reserved
   1246		0x8180 - 0x81ff: always on */
   1247	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
   1248	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
   1249	GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
   1250		0x8500 - 0x87ff: gt
   1251		0x8800 - 0x8fff: reserved
   1252		0x9000 - 0x947f: gt
   1253		0x9480 - 0x94cf: reserved */
   1254	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
   1255	GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
   1256		0x9560 - 0x95ff: always on
   1257		0x9600 - 0x97ff: reserved */
   1258	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
   1259	GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
   1260	GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
   1261		0xb400 - 0xbf7f: gt
   1262		0xb480 - 0xbfff: reserved
   1263		0xc000 - 0xcfff: gt */
   1264	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
   1265	GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
   1266	GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
   1267	GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
   1268		0xdc00 - 0xddff: render
   1269		0xde00 - 0xde7f: reserved
   1270		0xde80 - 0xe8ff: render
   1271		0xe900 - 0xefff: reserved */
   1272	GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
   1273		 0xf000 - 0xffff: gt
   1274		0x10000 - 0x147ff: reserved */
   1275	GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
   1276		0x14800 - 0x14fff: render
   1277		0x15000 - 0x16dff: reserved
   1278		0x16e00 - 0x1bfff: render
   1279		0x1c000 - 0x1ffff: reserved */
   1280	GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
   1281	GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
   1282	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
   1283	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
   1284		0x24000 - 0x2407f: always on
   1285		0x24080 - 0x2417f: reserved */
   1286	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
   1287		0x24180 - 0x241ff: gt
   1288		0x24200 - 0x249ff: reserved */
   1289	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
   1290		0x24a00 - 0x24a7f: render
   1291		0x24a80 - 0x251ff: reserved */
   1292	GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
   1293		0x25200 - 0x252ff: gt
   1294		0x25300 - 0x255ff: reserved */
   1295	GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
   1296	GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
   1297		0x25680 - 0x256ff: VD2
   1298		0x25700 - 0x259ff: reserved */
   1299	GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
   1300	GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
   1301		0x25a80 - 0x25aff: VD2
   1302		0x25b00 - 0x2ffff: reserved */
   1303	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
   1304	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
   1305	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
   1306		0x1c0000 - 0x1c2bff: VD0
   1307		0x1c2c00 - 0x1c2cff: reserved
   1308		0x1c2d00 - 0x1c2dff: VD0
   1309		0x1c2e00 - 0x1c3eff: reserved
   1310		0x1c3f00 - 0x1c3fff: VD0 */
   1311	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
   1312	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
   1313		0x1c8000 - 0x1ca0ff: VE0
   1314		0x1ca100 - 0x1cbeff: reserved
   1315		0x1cbf00 - 0x1cbfff: VE0 */
   1316	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
   1317		0x1cc000 - 0x1ccfff: VD0
   1318		0x1cd000 - 0x1cffff: reserved */
   1319	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
   1320		0x1d0000 - 0x1d2bff: VD2
   1321		0x1d2c00 - 0x1d2cff: reserved
   1322		0x1d2d00 - 0x1d2dff: VD2
   1323		0x1d2e00 - 0x1d3eff: reserved
   1324		0x1d3f00 - 0x1d3fff: VD2 */
   1325};
   1326
   1327/*
   1328 * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
   1329 * switching it from the GT domain to the render domain.
   1330 *
   1331 * *Must* be sorted by offset ranges! See intel_fw_table_check().
   1332 */
   1333#define XEHP_FWRANGES(FW_RANGE_D800)					\
   1334	GEN_FW_RANGE(0x0, 0x1fff, 0), /*					\
   1335		  0x0 -  0xaff: reserved					\
   1336		0xb00 - 0x1fff: always on */					\
   1337	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),				\
   1338	GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),				\
   1339	GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*					\
   1340		0x4b00 - 0x4fff: reserved					\
   1341		0x5000 - 0x51ff: always on */					\
   1342	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),				\
   1343	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),				\
   1344	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),				\
   1345	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*					\
   1346		0x8160 - 0x817f: reserved					\
   1347		0x8180 - 0x81ff: always on */					\
   1348	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),				\
   1349	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),				\
   1350	GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*				\
   1351		0x8500 - 0x87ff: gt						\
   1352		0x8800 - 0x8c7f: reserved					\
   1353		0x8c80 - 0x8cff: gt (DG2 only) */				\
   1354	GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*			\
   1355		0x8d00 - 0x8dff: render (DG2 only)				\
   1356		0x8e00 - 0x8fff: reserved */					\
   1357	GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*				\
   1358		0x9000 - 0x947f: gt						\
   1359		0x9480 - 0x94cf: reserved */					\
   1360	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),				\
   1361	GEN_FW_RANGE(0x9560, 0x967f, 0), /*					\
   1362		0x9560 - 0x95ff: always on					\
   1363		0x9600 - 0x967f: reserved */					\
   1364	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*			\
   1365		0x9680 - 0x96ff: render (DG2 only)				\
   1366		0x9700 - 0x97ff: reserved */					\
   1367	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*				\
   1368		0x9800 - 0xb4ff: gt						\
   1369		0xb500 - 0xbfff: reserved					\
   1370		0xc000 - 0xcfff: gt */						\
   1371	GEN_FW_RANGE(0xd000, 0xd7ff, 0),					\
   1372	GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800),			\
   1373	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),				\
   1374	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),				\
   1375	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*				\
   1376		0xdd00 - 0xddff: gt						\
   1377		0xde00 - 0xde7f: reserved */					\
   1378	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*			\
   1379		0xde80 - 0xdfff: render						\
   1380		0xe000 - 0xe0ff: reserved					\
   1381		0xe100 - 0xe8ff: render */					\
   1382	GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*				\
   1383		0xe900 - 0xe9ff: gt						\
   1384		0xea00 - 0xefff: reserved					\
   1385		0xf000 - 0xffff: gt */						\
   1386	GEN_FW_RANGE(0x10000, 0x12fff, 0), /*					\
   1387		0x10000 - 0x11fff: reserved					\
   1388		0x12000 - 0x127ff: always on					\
   1389		0x12800 - 0x12fff: reserved */					\
   1390	GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */	\
   1391	GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /*		\
   1392		0x13200 - 0x133ff: VD2 (DG2 only)				\
   1393		0x13400 - 0x13fff: reserved */					\
   1394	GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */	\
   1395	GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */	\
   1396	GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */	\
   1397	GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */	\
   1398	GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),			\
   1399	GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*			\
   1400		0x15000 - 0x15fff: gt (DG2 only)				\
   1401		0x16000 - 0x16dff: reserved */					\
   1402	GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER),			\
   1403	GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /*		\
   1404		0x20000 - 0x20fff: VD0 (XEHPSDV only)				\
   1405		0x21000 - 0x21fff: reserved */					\
   1406	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),				\
   1407	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*					\
   1408		0x24000 - 0x2407f: always on					\
   1409		0x24080 - 0x2417f: reserved */					\
   1410	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*			\
   1411		0x24180 - 0x241ff: gt						\
   1412		0x24200 - 0x249ff: reserved */					\
   1413	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*			\
   1414		0x24a00 - 0x24a7f: render					\
   1415		0x24a80 - 0x251ff: reserved */					\
   1416	GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*			\
   1417		0x25200 - 0x252ff: gt						\
   1418		0x25300 - 0x25fff: reserved */					\
   1419	GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*			\
   1420		0x26000 - 0x27fff: render					\
   1421		0x28000 - 0x29fff: reserved					\
   1422		0x2a000 - 0x2ffff: undocumented */				\
   1423	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),				\
   1424	GEN_FW_RANGE(0x40000, 0x1bffff, 0),					\
   1425	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*		\
   1426		0x1c0000 - 0x1c2bff: VD0					\
   1427		0x1c2c00 - 0x1c2cff: reserved					\
   1428		0x1c2d00 - 0x1c2dff: VD0					\
   1429		0x1c2e00 - 0x1c3eff: VD0 (DG2 only)				\
   1430		0x1c3f00 - 0x1c3fff: VD0 */					\
   1431	GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*		\
   1432		0x1c4000 - 0x1c6bff: VD1					\
   1433		0x1c6c00 - 0x1c6cff: reserved					\
   1434		0x1c6d00 - 0x1c6dff: VD1					\
   1435		0x1c6e00 - 0x1c7fff: reserved */				\
   1436	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*		\
   1437		0x1c8000 - 0x1ca0ff: VE0					\
   1438		0x1ca100 - 0x1cbfff: reserved */				\
   1439	GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),		\
   1440	GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),		\
   1441	GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),		\
   1442	GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),		\
   1443	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*		\
   1444		0x1d0000 - 0x1d2bff: VD2					\
   1445		0x1d2c00 - 0x1d2cff: reserved					\
   1446		0x1d2d00 - 0x1d2dff: VD2					\
   1447		0x1d2e00 - 0x1d3dff: VD2 (DG2 only)				\
   1448		0x1d3e00 - 0x1d3eff: reserved					\
   1449		0x1d3f00 - 0x1d3fff: VD2 */					\
   1450	GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*		\
   1451		0x1d4000 - 0x1d6bff: VD3					\
   1452		0x1d6c00 - 0x1d6cff: reserved					\
   1453		0x1d6d00 - 0x1d6dff: VD3					\
   1454		0x1d6e00 - 0x1d7fff: reserved */				\
   1455	GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*		\
   1456		0x1d8000 - 0x1da0ff: VE1					\
   1457		0x1da100 - 0x1dffff: reserved */				\
   1458	GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*		\
   1459		0x1e0000 - 0x1e2bff: VD4					\
   1460		0x1e2c00 - 0x1e2cff: reserved					\
   1461		0x1e2d00 - 0x1e2dff: VD4					\
   1462		0x1e2e00 - 0x1e3eff: reserved					\
   1463		0x1e3f00 - 0x1e3fff: VD4 */					\
   1464	GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*		\
   1465		0x1e4000 - 0x1e6bff: VD5					\
   1466		0x1e6c00 - 0x1e6cff: reserved					\
   1467		0x1e6d00 - 0x1e6dff: VD5					\
   1468		0x1e6e00 - 0x1e7fff: reserved */				\
   1469	GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*		\
   1470		0x1e8000 - 0x1ea0ff: VE2					\
   1471		0x1ea100 - 0x1effff: reserved */				\
   1472	GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*		\
   1473		0x1f0000 - 0x1f2bff: VD6					\
   1474		0x1f2c00 - 0x1f2cff: reserved					\
   1475		0x1f2d00 - 0x1f2dff: VD6					\
   1476		0x1f2e00 - 0x1f3eff: reserved					\
   1477		0x1f3f00 - 0x1f3fff: VD6 */					\
   1478	GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*		\
   1479		0x1f4000 - 0x1f6bff: VD7					\
   1480		0x1f6c00 - 0x1f6cff: reserved					\
   1481		0x1f6d00 - 0x1f6dff: VD7					\
   1482		0x1f6e00 - 0x1f7fff: reserved */				\
   1483	GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
   1484
   1485static const struct intel_forcewake_range __xehp_fw_ranges[] = {
   1486	XEHP_FWRANGES(FORCEWAKE_GT)
   1487};
   1488
   1489static const struct intel_forcewake_range __dg2_fw_ranges[] = {
   1490	XEHP_FWRANGES(FORCEWAKE_RENDER)
   1491};
   1492
   1493static void
   1494ilk_dummy_write(struct intel_uncore *uncore)
   1495{
   1496	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
   1497	 * the chip from rc6 before touching it for real. MI_MODE is masked,
   1498	 * hence harmless to write 0 into. */
   1499	__raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
   1500}
   1501
   1502static void
   1503__unclaimed_reg_debug(struct intel_uncore *uncore,
   1504		      const i915_reg_t reg,
   1505		      const bool read)
   1506{
   1507	if (drm_WARN(&uncore->i915->drm,
   1508		     check_for_unclaimed_mmio(uncore),
   1509		     "Unclaimed %s register 0x%x\n",
   1510		     read ? "read from" : "write to",
   1511		     i915_mmio_reg_offset(reg)))
   1512		/* Only report the first N failures */
   1513		uncore->i915->params.mmio_debug--;
   1514}
   1515
   1516static void
   1517__unclaimed_previous_reg_debug(struct intel_uncore *uncore,
   1518			       const i915_reg_t reg,
   1519			       const bool read)
   1520{
   1521	if (check_for_unclaimed_mmio(uncore))
   1522		drm_dbg(&uncore->i915->drm,
   1523			"Unclaimed access detected before %s register 0x%x\n",
   1524			read ? "read from" : "write to",
   1525			i915_mmio_reg_offset(reg));
   1526}
   1527
   1528static inline void
   1529unclaimed_reg_debug(struct intel_uncore *uncore,
   1530		    const i915_reg_t reg,
   1531		    const bool read,
   1532		    const bool before)
   1533{
   1534	if (likely(!uncore->i915->params.mmio_debug))
   1535		return;
   1536
   1537	/* interrupts are disabled and re-enabled around uncore->lock usage */
   1538	lockdep_assert_held(&uncore->lock);
   1539
   1540	if (before) {
   1541		spin_lock(&uncore->debug->lock);
   1542		__unclaimed_previous_reg_debug(uncore, reg, read);
   1543	} else {
   1544		__unclaimed_reg_debug(uncore, reg, read);
   1545		spin_unlock(&uncore->debug->lock);
   1546	}
   1547}
   1548
   1549#define __vgpu_read(x) \
   1550static u##x \
   1551vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
   1552	u##x val = __raw_uncore_read##x(uncore, reg); \
   1553	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
   1554	return val; \
   1555}
   1556__vgpu_read(8)
   1557__vgpu_read(16)
   1558__vgpu_read(32)
   1559__vgpu_read(64)
   1560
   1561#define GEN2_READ_HEADER(x) \
   1562	u##x val = 0; \
   1563	assert_rpm_wakelock_held(uncore->rpm);
   1564
   1565#define GEN2_READ_FOOTER \
   1566	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
   1567	return val
   1568
   1569#define __gen2_read(x) \
   1570static u##x \
   1571gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
   1572	GEN2_READ_HEADER(x); \
   1573	val = __raw_uncore_read##x(uncore, reg); \
   1574	GEN2_READ_FOOTER; \
   1575}
   1576
   1577#define __gen5_read(x) \
   1578static u##x \
   1579gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
   1580	GEN2_READ_HEADER(x); \
   1581	ilk_dummy_write(uncore); \
   1582	val = __raw_uncore_read##x(uncore, reg); \
   1583	GEN2_READ_FOOTER; \
   1584}
   1585
   1586__gen5_read(8)
   1587__gen5_read(16)
   1588__gen5_read(32)
   1589__gen5_read(64)
   1590__gen2_read(8)
   1591__gen2_read(16)
   1592__gen2_read(32)
   1593__gen2_read(64)
   1594
   1595#undef __gen5_read
   1596#undef __gen2_read
   1597
   1598#undef GEN2_READ_FOOTER
   1599#undef GEN2_READ_HEADER
   1600
   1601#define GEN6_READ_HEADER(x) \
   1602	u32 offset = i915_mmio_reg_offset(reg); \
   1603	unsigned long irqflags; \
   1604	u##x val = 0; \
   1605	assert_rpm_wakelock_held(uncore->rpm); \
   1606	spin_lock_irqsave(&uncore->lock, irqflags); \
   1607	unclaimed_reg_debug(uncore, reg, true, true)
   1608
   1609#define GEN6_READ_FOOTER \
   1610	unclaimed_reg_debug(uncore, reg, true, false); \
   1611	spin_unlock_irqrestore(&uncore->lock, irqflags); \
   1612	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
   1613	return val
   1614
   1615static noinline void ___force_wake_auto(struct intel_uncore *uncore,
   1616					enum forcewake_domains fw_domains)
   1617{
   1618	struct intel_uncore_forcewake_domain *domain;
   1619	unsigned int tmp;
   1620
   1621	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
   1622
   1623	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
   1624		fw_domain_arm_timer(domain);
   1625
   1626	fw_domains_get(uncore, fw_domains);
   1627}
   1628
   1629static inline void __force_wake_auto(struct intel_uncore *uncore,
   1630				     enum forcewake_domains fw_domains)
   1631{
   1632	GEM_BUG_ON(!fw_domains);
   1633
   1634	/* Turn on all requested but inactive supported forcewake domains. */
   1635	fw_domains &= uncore->fw_domains;
   1636	fw_domains &= ~uncore->fw_domains_active;
   1637
   1638	if (fw_domains)
   1639		___force_wake_auto(uncore, fw_domains);
   1640}
   1641
   1642#define __gen_fwtable_read(x) \
   1643static u##x \
   1644fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
   1645{ \
   1646	enum forcewake_domains fw_engine; \
   1647	GEN6_READ_HEADER(x); \
   1648	fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
   1649	if (fw_engine) \
   1650		__force_wake_auto(uncore, fw_engine); \
   1651	val = __raw_uncore_read##x(uncore, reg); \
   1652	GEN6_READ_FOOTER; \
   1653}
   1654
   1655static enum forcewake_domains
   1656fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
   1657	return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
   1658}
   1659
   1660__gen_fwtable_read(8)
   1661__gen_fwtable_read(16)
   1662__gen_fwtable_read(32)
   1663__gen_fwtable_read(64)
   1664
   1665#undef __gen_fwtable_read
   1666#undef GEN6_READ_FOOTER
   1667#undef GEN6_READ_HEADER
   1668
   1669#define GEN2_WRITE_HEADER \
   1670	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
   1671	assert_rpm_wakelock_held(uncore->rpm); \
   1672
   1673#define GEN2_WRITE_FOOTER
   1674
   1675#define __gen2_write(x) \
   1676static void \
   1677gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
   1678	GEN2_WRITE_HEADER; \
   1679	__raw_uncore_write##x(uncore, reg, val); \
   1680	GEN2_WRITE_FOOTER; \
   1681}
   1682
   1683#define __gen5_write(x) \
   1684static void \
   1685gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
   1686	GEN2_WRITE_HEADER; \
   1687	ilk_dummy_write(uncore); \
   1688	__raw_uncore_write##x(uncore, reg, val); \
   1689	GEN2_WRITE_FOOTER; \
   1690}
   1691
   1692__gen5_write(8)
   1693__gen5_write(16)
   1694__gen5_write(32)
   1695__gen2_write(8)
   1696__gen2_write(16)
   1697__gen2_write(32)
   1698
   1699#undef __gen5_write
   1700#undef __gen2_write
   1701
   1702#undef GEN2_WRITE_FOOTER
   1703#undef GEN2_WRITE_HEADER
   1704
   1705#define GEN6_WRITE_HEADER \
   1706	u32 offset = i915_mmio_reg_offset(reg); \
   1707	unsigned long irqflags; \
   1708	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
   1709	assert_rpm_wakelock_held(uncore->rpm); \
   1710	spin_lock_irqsave(&uncore->lock, irqflags); \
   1711	unclaimed_reg_debug(uncore, reg, false, true)
   1712
   1713#define GEN6_WRITE_FOOTER \
   1714	unclaimed_reg_debug(uncore, reg, false, false); \
   1715	spin_unlock_irqrestore(&uncore->lock, irqflags)
   1716
   1717#define __gen6_write(x) \
   1718static void \
   1719gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
   1720	GEN6_WRITE_HEADER; \
   1721	if (NEEDS_FORCE_WAKE(offset)) \
   1722		__gen6_gt_wait_for_fifo(uncore); \
   1723	__raw_uncore_write##x(uncore, reg, val); \
   1724	GEN6_WRITE_FOOTER; \
   1725}
   1726__gen6_write(8)
   1727__gen6_write(16)
   1728__gen6_write(32)
   1729
   1730#define __gen_fwtable_write(x) \
   1731static void \
   1732fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
   1733	enum forcewake_domains fw_engine; \
   1734	GEN6_WRITE_HEADER; \
   1735	fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
   1736	if (fw_engine) \
   1737		__force_wake_auto(uncore, fw_engine); \
   1738	__raw_uncore_write##x(uncore, reg, val); \
   1739	GEN6_WRITE_FOOTER; \
   1740}
   1741
   1742static enum forcewake_domains
   1743fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
   1744{
   1745	return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
   1746}
   1747
   1748__gen_fwtable_write(8)
   1749__gen_fwtable_write(16)
   1750__gen_fwtable_write(32)
   1751
   1752#undef __gen_fwtable_write
   1753#undef GEN6_WRITE_FOOTER
   1754#undef GEN6_WRITE_HEADER
   1755
   1756#define __vgpu_write(x) \
   1757static void \
   1758vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
   1759	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
   1760	__raw_uncore_write##x(uncore, reg, val); \
   1761}
   1762__vgpu_write(8)
   1763__vgpu_write(16)
   1764__vgpu_write(32)
   1765
   1766#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
   1767do { \
   1768	(uncore)->funcs.mmio_writeb = x##_write8; \
   1769	(uncore)->funcs.mmio_writew = x##_write16; \
   1770	(uncore)->funcs.mmio_writel = x##_write32; \
   1771} while (0)
   1772
   1773#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
   1774do { \
   1775	(uncore)->funcs.mmio_readb = x##_read8; \
   1776	(uncore)->funcs.mmio_readw = x##_read16; \
   1777	(uncore)->funcs.mmio_readl = x##_read32; \
   1778	(uncore)->funcs.mmio_readq = x##_read64; \
   1779} while (0)
   1780
   1781#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
   1782do { \
   1783	ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
   1784	(uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
   1785} while (0)
   1786
   1787#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
   1788do { \
   1789	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
   1790	(uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
   1791} while (0)
   1792
   1793static int __fw_domain_init(struct intel_uncore *uncore,
   1794			    enum forcewake_domain_id domain_id,
   1795			    i915_reg_t reg_set,
   1796			    i915_reg_t reg_ack)
   1797{
   1798	struct intel_uncore_forcewake_domain *d;
   1799
   1800	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
   1801	GEM_BUG_ON(uncore->fw_domain[domain_id]);
   1802
   1803	if (i915_inject_probe_failure(uncore->i915))
   1804		return -ENOMEM;
   1805
   1806	d = kzalloc(sizeof(*d), GFP_KERNEL);
   1807	if (!d)
   1808		return -ENOMEM;
   1809
   1810	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
   1811	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
   1812
   1813	d->uncore = uncore;
   1814	d->wake_count = 0;
   1815	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
   1816	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
   1817
   1818	d->id = domain_id;
   1819
   1820	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
   1821	BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
   1822	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
   1823	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
   1824	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
   1825	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
   1826	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
   1827	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
   1828	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
   1829	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
   1830	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
   1831	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
   1832	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
   1833	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
   1834	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
   1835
   1836	d->mask = BIT(domain_id);
   1837
   1838	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
   1839	d->timer.function = intel_uncore_fw_release_timer;
   1840
   1841	uncore->fw_domains |= BIT(domain_id);
   1842
   1843	fw_domain_reset(d);
   1844
   1845	uncore->fw_domain[domain_id] = d;
   1846
   1847	return 0;
   1848}
   1849
   1850static void fw_domain_fini(struct intel_uncore *uncore,
   1851			   enum forcewake_domain_id domain_id)
   1852{
   1853	struct intel_uncore_forcewake_domain *d;
   1854
   1855	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
   1856
   1857	d = fetch_and_zero(&uncore->fw_domain[domain_id]);
   1858	if (!d)
   1859		return;
   1860
   1861	uncore->fw_domains &= ~BIT(domain_id);
   1862	drm_WARN_ON(&uncore->i915->drm, d->wake_count);
   1863	drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
   1864	kfree(d);
   1865}
   1866
   1867static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
   1868{
   1869	struct intel_uncore_forcewake_domain *d;
   1870	int tmp;
   1871
   1872	for_each_fw_domain(d, uncore, tmp)
   1873		fw_domain_fini(uncore, d->id);
   1874}
   1875
   1876static const struct intel_uncore_fw_get uncore_get_fallback = {
   1877	.force_wake_get = fw_domains_get_with_fallback
   1878};
   1879
   1880static const struct intel_uncore_fw_get uncore_get_normal = {
   1881	.force_wake_get = fw_domains_get_normal,
   1882};
   1883
   1884static const struct intel_uncore_fw_get uncore_get_thread_status = {
   1885	.force_wake_get = fw_domains_get_with_thread_status
   1886};
   1887
   1888static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
   1889{
   1890	struct drm_i915_private *i915 = uncore->i915;
   1891	int ret = 0;
   1892
   1893	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
   1894
   1895#define fw_domain_init(uncore__, id__, set__, ack__) \
   1896	(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
   1897
   1898	if (GRAPHICS_VER(i915) >= 11) {
   1899		/* we'll prune the domains of missing engines later */
   1900		intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
   1901		int i;
   1902
   1903		uncore->fw_get_funcs = &uncore_get_fallback;
   1904		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
   1905			       FORCEWAKE_RENDER_GEN9,
   1906			       FORCEWAKE_ACK_RENDER_GEN9);
   1907		fw_domain_init(uncore, FW_DOMAIN_ID_GT,
   1908			       FORCEWAKE_GT_GEN9,
   1909			       FORCEWAKE_ACK_GT_GEN9);
   1910
   1911		for (i = 0; i < I915_MAX_VCS; i++) {
   1912			if (!__HAS_ENGINE(emask, _VCS(i)))
   1913				continue;
   1914
   1915			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
   1916				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
   1917				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
   1918		}
   1919		for (i = 0; i < I915_MAX_VECS; i++) {
   1920			if (!__HAS_ENGINE(emask, _VECS(i)))
   1921				continue;
   1922
   1923			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
   1924				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
   1925				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
   1926		}
   1927	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
   1928		uncore->fw_get_funcs = &uncore_get_fallback;
   1929		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
   1930			       FORCEWAKE_RENDER_GEN9,
   1931			       FORCEWAKE_ACK_RENDER_GEN9);
   1932		fw_domain_init(uncore, FW_DOMAIN_ID_GT,
   1933			       FORCEWAKE_GT_GEN9,
   1934			       FORCEWAKE_ACK_GT_GEN9);
   1935		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
   1936			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
   1937	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
   1938		uncore->fw_get_funcs = &uncore_get_normal;
   1939		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
   1940			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
   1941		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
   1942			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
   1943	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
   1944		uncore->fw_get_funcs = &uncore_get_thread_status;
   1945		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
   1946			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
   1947	} else if (IS_IVYBRIDGE(i915)) {
   1948		u32 ecobus;
   1949
   1950		/* IVB configs may use multi-threaded forcewake */
   1951
   1952		/* A small trick here - if the bios hasn't configured
   1953		 * MT forcewake, and if the device is in RC6, then
   1954		 * force_wake_mt_get will not wake the device and the
   1955		 * ECOBUS read will return zero. Which will be
   1956		 * (correctly) interpreted by the test below as MT
   1957		 * forcewake being disabled.
   1958		 */
   1959		uncore->fw_get_funcs = &uncore_get_thread_status;
   1960
   1961		/* We need to init first for ECOBUS access and then
   1962		 * determine later if we want to reinit, in case of MT access is
   1963		 * not working. In this stage we don't know which flavour this
   1964		 * ivb is, so it is better to reset also the gen6 fw registers
   1965		 * before the ecobus check.
   1966		 */
   1967
   1968		__raw_uncore_write32(uncore, FORCEWAKE, 0);
   1969		__raw_posting_read(uncore, ECOBUS);
   1970
   1971		ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
   1972				       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
   1973		if (ret)
   1974			goto out;
   1975
   1976		spin_lock_irq(&uncore->lock);
   1977		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
   1978		ecobus = __raw_uncore_read32(uncore, ECOBUS);
   1979		fw_domains_put(uncore, FORCEWAKE_RENDER);
   1980		spin_unlock_irq(&uncore->lock);
   1981
   1982		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
   1983			drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
   1984			drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
   1985			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
   1986			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
   1987				       FORCEWAKE, FORCEWAKE_ACK);
   1988		}
   1989	} else if (GRAPHICS_VER(i915) == 6) {
   1990		uncore->fw_get_funcs = &uncore_get_thread_status;
   1991		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
   1992			       FORCEWAKE, FORCEWAKE_ACK);
   1993	}
   1994
   1995#undef fw_domain_init
   1996
   1997	/* All future platforms are expected to require complex power gating */
   1998	drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
   1999
   2000out:
   2001	if (ret)
   2002		intel_uncore_fw_domains_fini(uncore);
   2003
   2004	return ret;
   2005}
   2006
   2007#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
   2008{ \
   2009	(uncore)->fw_domains_table = \
   2010			(struct intel_forcewake_range *)(d); \
   2011	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
   2012}
   2013
   2014#define ASSIGN_SHADOW_TABLE(uncore, d) \
   2015{ \
   2016	(uncore)->shadowed_reg_table = d; \
   2017	(uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
   2018}
   2019
   2020static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
   2021					 unsigned long action, void *data)
   2022{
   2023	struct intel_uncore *uncore = container_of(nb,
   2024			struct intel_uncore, pmic_bus_access_nb);
   2025
   2026	switch (action) {
   2027	case MBI_PMIC_BUS_ACCESS_BEGIN:
   2028		/*
   2029		 * forcewake all now to make sure that we don't need to do a
   2030		 * forcewake later which on systems where this notifier gets
   2031		 * called requires the punit to access to the shared pmic i2c
   2032		 * bus, which will be busy after this notification, leading to:
   2033		 * "render: timed out waiting for forcewake ack request."
   2034		 * errors.
   2035		 *
   2036		 * The notifier is unregistered during intel_runtime_suspend(),
   2037		 * so it's ok to access the HW here without holding a RPM
   2038		 * wake reference -> disable wakeref asserts for the time of
   2039		 * the access.
   2040		 */
   2041		disable_rpm_wakeref_asserts(uncore->rpm);
   2042		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
   2043		enable_rpm_wakeref_asserts(uncore->rpm);
   2044		break;
   2045	case MBI_PMIC_BUS_ACCESS_END:
   2046		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
   2047		break;
   2048	}
   2049
   2050	return NOTIFY_OK;
   2051}
   2052
   2053int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
   2054{
   2055	struct drm_i915_private *i915 = uncore->i915;
   2056	int mmio_size;
   2057
   2058	/*
   2059	 * Before gen4, the registers and the GTT are behind different BARs.
   2060	 * However, from gen4 onwards, the registers and the GTT are shared
   2061	 * in the same BAR, so we want to restrict this ioremap from
   2062	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
   2063	 * the register BAR remains the same size for all the earlier
   2064	 * generations up to Ironlake.
   2065	 * For dgfx chips register range is expanded to 4MB.
   2066	 */
   2067	if (GRAPHICS_VER(i915) < 5)
   2068		mmio_size = 512 * 1024;
   2069	else if (IS_DGFX(i915))
   2070		mmio_size = 4 * 1024 * 1024;
   2071	else
   2072		mmio_size = 2 * 1024 * 1024;
   2073
   2074	uncore->regs = ioremap(phys_addr, mmio_size);
   2075	if (uncore->regs == NULL) {
   2076		drm_err(&i915->drm, "failed to map registers\n");
   2077		return -EIO;
   2078	}
   2079
   2080	return 0;
   2081}
   2082
   2083void intel_uncore_cleanup_mmio(struct intel_uncore *uncore)
   2084{
   2085	iounmap(uncore->regs);
   2086}
   2087
   2088void intel_uncore_init_early(struct intel_uncore *uncore,
   2089			     struct intel_gt *gt)
   2090{
   2091	spin_lock_init(&uncore->lock);
   2092	uncore->i915 = gt->i915;
   2093	uncore->gt = gt;
   2094	uncore->rpm = &gt->i915->runtime_pm;
   2095	uncore->debug = &gt->i915->mmio_debug;
   2096}
   2097
   2098static void uncore_raw_init(struct intel_uncore *uncore)
   2099{
   2100	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
   2101
   2102	if (intel_vgpu_active(uncore->i915)) {
   2103		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
   2104		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
   2105	} else if (GRAPHICS_VER(uncore->i915) == 5) {
   2106		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
   2107		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
   2108	} else {
   2109		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
   2110		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
   2111	}
   2112}
   2113
   2114static int uncore_forcewake_init(struct intel_uncore *uncore)
   2115{
   2116	struct drm_i915_private *i915 = uncore->i915;
   2117	int ret;
   2118
   2119	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
   2120
   2121	ret = intel_uncore_fw_domains_init(uncore);
   2122	if (ret)
   2123		return ret;
   2124	forcewake_early_sanitize(uncore, 0);
   2125
   2126	ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
   2127
   2128	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
   2129		ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
   2130		ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
   2131		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
   2132	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
   2133		ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
   2134		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
   2135		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
   2136	} else if (GRAPHICS_VER(i915) >= 12) {
   2137		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
   2138		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
   2139		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
   2140	} else if (GRAPHICS_VER(i915) == 11) {
   2141		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
   2142		ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
   2143		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
   2144	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
   2145		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
   2146		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
   2147		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
   2148	} else if (IS_CHERRYVIEW(i915)) {
   2149		ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
   2150		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
   2151		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
   2152	} else if (GRAPHICS_VER(i915) == 8) {
   2153		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
   2154		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
   2155		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
   2156	} else if (IS_VALLEYVIEW(i915)) {
   2157		ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
   2158		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
   2159	} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
   2160		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
   2161		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
   2162	}
   2163
   2164	uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
   2165	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
   2166
   2167	return 0;
   2168}
   2169
   2170int intel_uncore_init_mmio(struct intel_uncore *uncore)
   2171{
   2172	struct drm_i915_private *i915 = uncore->i915;
   2173	int ret;
   2174
   2175	/*
   2176	 * The boot firmware initializes local memory and assesses its health.
   2177	 * If memory training fails, the punit will have been instructed to
   2178	 * keep the GT powered down; we won't be able to communicate with it
   2179	 * and we should not continue with driver initialization.
   2180	 */
   2181	if (IS_DGFX(i915) &&
   2182	    !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
   2183		drm_err(&i915->drm, "LMEM not initialized by firmware\n");
   2184		return -ENODEV;
   2185	}
   2186
   2187	if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
   2188		uncore->flags |= UNCORE_HAS_FORCEWAKE;
   2189
   2190	if (!intel_uncore_has_forcewake(uncore)) {
   2191		uncore_raw_init(uncore);
   2192	} else {
   2193		ret = uncore_forcewake_init(uncore);
   2194		if (ret)
   2195			return ret;
   2196	}
   2197
   2198	/* make sure fw funcs are set if and only if we have fw*/
   2199	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
   2200	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
   2201	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
   2202
   2203	if (HAS_FPGA_DBG_UNCLAIMED(i915))
   2204		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
   2205
   2206	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
   2207		uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
   2208
   2209	if (IS_GRAPHICS_VER(i915, 6, 7))
   2210		uncore->flags |= UNCORE_HAS_FIFO;
   2211
   2212	/* clear out unclaimed reg detection bit */
   2213	if (intel_uncore_unclaimed_mmio(uncore))
   2214		drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
   2215
   2216	return 0;
   2217}
   2218
   2219/*
   2220 * We might have detected that some engines are fused off after we initialized
   2221 * the forcewake domains. Prune them, to make sure they only reference existing
   2222 * engines.
   2223 */
   2224void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
   2225					  struct intel_gt *gt)
   2226{
   2227	enum forcewake_domains fw_domains = uncore->fw_domains;
   2228	enum forcewake_domain_id domain_id;
   2229	int i;
   2230
   2231	if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
   2232		return;
   2233
   2234	for (i = 0; i < I915_MAX_VCS; i++) {
   2235		domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
   2236
   2237		if (HAS_ENGINE(gt, _VCS(i)))
   2238			continue;
   2239
   2240		/*
   2241		 * Starting with XeHP, the power well for an even-numbered
   2242		 * VDBOX is also used for shared units within the
   2243		 * media slice such as SFC.  So even if the engine
   2244		 * itself is fused off, we still need to initialize
   2245		 * the forcewake domain if any of the other engines
   2246		 * in the same media slice are present.
   2247		 */
   2248		if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
   2249			if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
   2250				continue;
   2251
   2252			if (HAS_ENGINE(gt, _VECS(i / 2)))
   2253				continue;
   2254		}
   2255
   2256		if (fw_domains & BIT(domain_id))
   2257			fw_domain_fini(uncore, domain_id);
   2258	}
   2259
   2260	for (i = 0; i < I915_MAX_VECS; i++) {
   2261		domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
   2262
   2263		if (HAS_ENGINE(gt, _VECS(i)))
   2264			continue;
   2265
   2266		if (fw_domains & BIT(domain_id))
   2267			fw_domain_fini(uncore, domain_id);
   2268	}
   2269}
   2270
   2271void intel_uncore_fini_mmio(struct intel_uncore *uncore)
   2272{
   2273	if (intel_uncore_has_forcewake(uncore)) {
   2274		iosf_mbi_punit_acquire();
   2275		iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
   2276			&uncore->pmic_bus_access_nb);
   2277		intel_uncore_forcewake_reset(uncore);
   2278		intel_uncore_fw_domains_fini(uncore);
   2279		iosf_mbi_punit_release();
   2280	}
   2281}
   2282
   2283/**
   2284 * __intel_wait_for_register_fw - wait until register matches expected state
   2285 * @uncore: the struct intel_uncore
   2286 * @reg: the register to read
   2287 * @mask: mask to apply to register value
   2288 * @value: expected value
   2289 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
   2290 * @slow_timeout_ms: slow timeout in millisecond
   2291 * @out_value: optional placeholder to hold registry value
   2292 *
   2293 * This routine waits until the target register @reg contains the expected
   2294 * @value after applying the @mask, i.e. it waits until ::
   2295 *
   2296 *     (intel_uncore_read_fw(uncore, reg) & mask) == value
   2297 *
   2298 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
   2299 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
   2300 * must be not larger than 20,0000 microseconds.
   2301 *
   2302 * Note that this routine assumes the caller holds forcewake asserted, it is
   2303 * not suitable for very long waits. See intel_wait_for_register() if you
   2304 * wish to wait without holding forcewake for the duration (i.e. you expect
   2305 * the wait to be slow).
   2306 *
   2307 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
   2308 */
   2309int __intel_wait_for_register_fw(struct intel_uncore *uncore,
   2310				 i915_reg_t reg,
   2311				 u32 mask,
   2312				 u32 value,
   2313				 unsigned int fast_timeout_us,
   2314				 unsigned int slow_timeout_ms,
   2315				 u32 *out_value)
   2316{
   2317	u32 reg_value = 0;
   2318#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
   2319	int ret;
   2320
   2321	/* Catch any overuse of this function */
   2322	might_sleep_if(slow_timeout_ms);
   2323	GEM_BUG_ON(fast_timeout_us > 20000);
   2324	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
   2325
   2326	ret = -ETIMEDOUT;
   2327	if (fast_timeout_us && fast_timeout_us <= 20000)
   2328		ret = _wait_for_atomic(done, fast_timeout_us, 0);
   2329	if (ret && slow_timeout_ms)
   2330		ret = wait_for(done, slow_timeout_ms);
   2331
   2332	if (out_value)
   2333		*out_value = reg_value;
   2334
   2335	return ret;
   2336#undef done
   2337}
   2338
   2339/**
   2340 * __intel_wait_for_register - wait until register matches expected state
   2341 * @uncore: the struct intel_uncore
   2342 * @reg: the register to read
   2343 * @mask: mask to apply to register value
   2344 * @value: expected value
   2345 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
   2346 * @slow_timeout_ms: slow timeout in millisecond
   2347 * @out_value: optional placeholder to hold registry value
   2348 *
   2349 * This routine waits until the target register @reg contains the expected
   2350 * @value after applying the @mask, i.e. it waits until ::
   2351 *
   2352 *     (intel_uncore_read(uncore, reg) & mask) == value
   2353 *
   2354 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
   2355 *
   2356 * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
   2357 */
   2358int __intel_wait_for_register(struct intel_uncore *uncore,
   2359			      i915_reg_t reg,
   2360			      u32 mask,
   2361			      u32 value,
   2362			      unsigned int fast_timeout_us,
   2363			      unsigned int slow_timeout_ms,
   2364			      u32 *out_value)
   2365{
   2366	unsigned fw =
   2367		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
   2368	u32 reg_value;
   2369	int ret;
   2370
   2371	might_sleep_if(slow_timeout_ms);
   2372
   2373	spin_lock_irq(&uncore->lock);
   2374	intel_uncore_forcewake_get__locked(uncore, fw);
   2375
   2376	ret = __intel_wait_for_register_fw(uncore,
   2377					   reg, mask, value,
   2378					   fast_timeout_us, 0, &reg_value);
   2379
   2380	intel_uncore_forcewake_put__locked(uncore, fw);
   2381	spin_unlock_irq(&uncore->lock);
   2382
   2383	if (ret && slow_timeout_ms)
   2384		ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
   2385								       reg),
   2386				 (reg_value & mask) == value,
   2387				 slow_timeout_ms * 1000, 10, 1000);
   2388
   2389	/* just trace the final value */
   2390	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
   2391
   2392	if (out_value)
   2393		*out_value = reg_value;
   2394
   2395	return ret;
   2396}
   2397
   2398bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
   2399{
   2400	bool ret;
   2401
   2402	spin_lock_irq(&uncore->debug->lock);
   2403	ret = check_for_unclaimed_mmio(uncore);
   2404	spin_unlock_irq(&uncore->debug->lock);
   2405
   2406	return ret;
   2407}
   2408
   2409bool
   2410intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
   2411{
   2412	bool ret = false;
   2413
   2414	spin_lock_irq(&uncore->debug->lock);
   2415
   2416	if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
   2417		goto out;
   2418
   2419	if (unlikely(check_for_unclaimed_mmio(uncore))) {
   2420		if (!uncore->i915->params.mmio_debug) {
   2421			drm_dbg(&uncore->i915->drm,
   2422				"Unclaimed register detected, "
   2423				"enabling oneshot unclaimed register reporting. "
   2424				"Please use i915.mmio_debug=N for more information.\n");
   2425			uncore->i915->params.mmio_debug++;
   2426		}
   2427		uncore->debug->unclaimed_mmio_check--;
   2428		ret = true;
   2429	}
   2430
   2431out:
   2432	spin_unlock_irq(&uncore->debug->lock);
   2433
   2434	return ret;
   2435}
   2436
   2437/**
   2438 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
   2439 * 				    a register
   2440 * @uncore: pointer to struct intel_uncore
   2441 * @reg: register in question
   2442 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
   2443 *
   2444 * Returns a set of forcewake domains required to be taken with for example
   2445 * intel_uncore_forcewake_get for the specified register to be accessible in the
   2446 * specified mode (read, write or read/write) with raw mmio accessors.
   2447 *
   2448 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
   2449 * callers to do FIFO management on their own or risk losing writes.
   2450 */
   2451enum forcewake_domains
   2452intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
   2453			       i915_reg_t reg, unsigned int op)
   2454{
   2455	enum forcewake_domains fw_domains = 0;
   2456
   2457	drm_WARN_ON(&uncore->i915->drm, !op);
   2458
   2459	if (!intel_uncore_has_forcewake(uncore))
   2460		return 0;
   2461
   2462	if (op & FW_REG_READ)
   2463		fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
   2464
   2465	if (op & FW_REG_WRITE)
   2466		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
   2467
   2468	drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
   2469
   2470	return fw_domains;
   2471}
   2472
   2473/**
   2474 * uncore_rw_with_mcr_steering_fw - Access a register after programming
   2475 *				    the MCR selector register.
   2476 * @uncore: pointer to struct intel_uncore
   2477 * @reg: register being accessed
   2478 * @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
   2479 * @slice: slice number (ignored for multi-cast write)
   2480 * @subslice: sub-slice number (ignored for multi-cast write)
   2481 * @value: register value to be written (ignored for read)
   2482 *
   2483 * Return: 0 for write access. register value for read access.
   2484 *
   2485 * Caller needs to make sure the relevant forcewake wells are up.
   2486 */
   2487static u32 uncore_rw_with_mcr_steering_fw(struct intel_uncore *uncore,
   2488					  i915_reg_t reg, u8 rw_flag,
   2489					  int slice, int subslice, u32 value)
   2490{
   2491	u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
   2492
   2493	lockdep_assert_held(&uncore->lock);
   2494
   2495	if (GRAPHICS_VER(uncore->i915) >= 11) {
   2496		mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
   2497		mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
   2498
   2499		/*
   2500		 * Wa_22013088509
   2501		 *
   2502		 * The setting of the multicast/unicast bit usually wouldn't
   2503		 * matter for read operations (which always return the value
   2504		 * from a single register instance regardless of how that bit
   2505		 * is set), but some platforms have a workaround requiring us
   2506		 * to remain in multicast mode for reads.  There's no real
   2507		 * downside to this, so we'll just go ahead and do so on all
   2508		 * platforms; we'll only clear the multicast bit from the mask
   2509		 * when exlicitly doing a write operation.
   2510		 */
   2511		if (rw_flag == FW_REG_WRITE)
   2512			mcr_mask |= GEN11_MCR_MULTICAST;
   2513	} else {
   2514		mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
   2515		mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
   2516	}
   2517
   2518	old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
   2519
   2520	mcr &= ~mcr_mask;
   2521	mcr |= mcr_ss;
   2522	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
   2523
   2524	if (rw_flag == FW_REG_READ)
   2525		val = intel_uncore_read_fw(uncore, reg);
   2526	else
   2527		intel_uncore_write_fw(uncore, reg, value);
   2528
   2529	mcr &= ~mcr_mask;
   2530	mcr |= old_mcr & mcr_mask;
   2531
   2532	intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
   2533
   2534	return val;
   2535}
   2536
   2537static u32 uncore_rw_with_mcr_steering(struct intel_uncore *uncore,
   2538				       i915_reg_t reg, u8 rw_flag,
   2539				       int slice, int subslice,
   2540				       u32 value)
   2541{
   2542	enum forcewake_domains fw_domains;
   2543	u32 val;
   2544
   2545	fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
   2546						    rw_flag);
   2547	fw_domains |= intel_uncore_forcewake_for_reg(uncore,
   2548						     GEN8_MCR_SELECTOR,
   2549						     FW_REG_READ | FW_REG_WRITE);
   2550
   2551	spin_lock_irq(&uncore->lock);
   2552	intel_uncore_forcewake_get__locked(uncore, fw_domains);
   2553
   2554	val = uncore_rw_with_mcr_steering_fw(uncore, reg, rw_flag,
   2555					     slice, subslice, value);
   2556
   2557	intel_uncore_forcewake_put__locked(uncore, fw_domains);
   2558	spin_unlock_irq(&uncore->lock);
   2559
   2560	return val;
   2561}
   2562
   2563u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
   2564					   i915_reg_t reg, int slice, int subslice)
   2565{
   2566	return uncore_rw_with_mcr_steering_fw(uncore, reg, FW_REG_READ,
   2567					      slice, subslice, 0);
   2568}
   2569
   2570u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
   2571					i915_reg_t reg, int slice, int subslice)
   2572{
   2573	return uncore_rw_with_mcr_steering(uncore, reg, FW_REG_READ,
   2574					   slice, subslice, 0);
   2575}
   2576
   2577void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
   2578					  i915_reg_t reg, u32 value,
   2579					  int slice, int subslice)
   2580{
   2581	uncore_rw_with_mcr_steering(uncore, reg, FW_REG_WRITE,
   2582				    slice, subslice, value);
   2583}
   2584
   2585#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
   2586#include "selftests/mock_uncore.c"
   2587#include "selftests/intel_uncore.c"
   2588#endif