cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_uncore.h (15023B)


      1/*
      2 * Copyright © 2017 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 *
     23 */
     24
     25#ifndef __INTEL_UNCORE_H__
     26#define __INTEL_UNCORE_H__
     27
     28#include <linux/spinlock.h>
     29#include <linux/notifier.h>
     30#include <linux/hrtimer.h>
     31#include <linux/io-64-nonatomic-lo-hi.h>
     32#include <linux/types.h>
     33
     34#include "i915_reg_defs.h"
     35
     36struct drm_i915_private;
     37struct intel_runtime_pm;
     38struct intel_uncore;
     39struct intel_gt;
     40
     41struct intel_uncore_mmio_debug {
     42	spinlock_t lock; /** lock is also taken in irq contexts. */
     43	int unclaimed_mmio_check;
     44	int saved_mmio_check;
     45	u32 suspend_count;
     46};
     47
     48enum forcewake_domain_id {
     49	FW_DOMAIN_ID_RENDER = 0,
     50	FW_DOMAIN_ID_GT,        /* also includes blitter engine */
     51	FW_DOMAIN_ID_MEDIA,
     52	FW_DOMAIN_ID_MEDIA_VDBOX0,
     53	FW_DOMAIN_ID_MEDIA_VDBOX1,
     54	FW_DOMAIN_ID_MEDIA_VDBOX2,
     55	FW_DOMAIN_ID_MEDIA_VDBOX3,
     56	FW_DOMAIN_ID_MEDIA_VDBOX4,
     57	FW_DOMAIN_ID_MEDIA_VDBOX5,
     58	FW_DOMAIN_ID_MEDIA_VDBOX6,
     59	FW_DOMAIN_ID_MEDIA_VDBOX7,
     60	FW_DOMAIN_ID_MEDIA_VEBOX0,
     61	FW_DOMAIN_ID_MEDIA_VEBOX1,
     62	FW_DOMAIN_ID_MEDIA_VEBOX2,
     63	FW_DOMAIN_ID_MEDIA_VEBOX3,
     64
     65	FW_DOMAIN_ID_COUNT
     66};
     67
     68enum forcewake_domains {
     69	FORCEWAKE_RENDER	= BIT(FW_DOMAIN_ID_RENDER),
     70	FORCEWAKE_GT		= BIT(FW_DOMAIN_ID_GT),
     71	FORCEWAKE_MEDIA		= BIT(FW_DOMAIN_ID_MEDIA),
     72	FORCEWAKE_MEDIA_VDBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
     73	FORCEWAKE_MEDIA_VDBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
     74	FORCEWAKE_MEDIA_VDBOX2	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
     75	FORCEWAKE_MEDIA_VDBOX3	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
     76	FORCEWAKE_MEDIA_VDBOX4	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX4),
     77	FORCEWAKE_MEDIA_VDBOX5	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX5),
     78	FORCEWAKE_MEDIA_VDBOX6	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX6),
     79	FORCEWAKE_MEDIA_VDBOX7	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX7),
     80	FORCEWAKE_MEDIA_VEBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
     81	FORCEWAKE_MEDIA_VEBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
     82	FORCEWAKE_MEDIA_VEBOX2	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX2),
     83	FORCEWAKE_MEDIA_VEBOX3	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX3),
     84
     85	FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1,
     86};
     87
     88struct intel_uncore_fw_get {
     89	void (*force_wake_get)(struct intel_uncore *uncore,
     90			       enum forcewake_domains domains);
     91};
     92
     93struct intel_uncore_funcs {
     94	enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
     95						  i915_reg_t r);
     96	enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
     97						   i915_reg_t r);
     98
     99	u8 (*mmio_readb)(struct intel_uncore *uncore,
    100			 i915_reg_t r, bool trace);
    101	u16 (*mmio_readw)(struct intel_uncore *uncore,
    102			  i915_reg_t r, bool trace);
    103	u32 (*mmio_readl)(struct intel_uncore *uncore,
    104			  i915_reg_t r, bool trace);
    105	u64 (*mmio_readq)(struct intel_uncore *uncore,
    106			  i915_reg_t r, bool trace);
    107
    108	void (*mmio_writeb)(struct intel_uncore *uncore,
    109			    i915_reg_t r, u8 val, bool trace);
    110	void (*mmio_writew)(struct intel_uncore *uncore,
    111			    i915_reg_t r, u16 val, bool trace);
    112	void (*mmio_writel)(struct intel_uncore *uncore,
    113			    i915_reg_t r, u32 val, bool trace);
    114};
    115
    116struct intel_forcewake_range {
    117	u32 start;
    118	u32 end;
    119
    120	enum forcewake_domains domains;
    121};
    122
    123/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
    124struct i915_range {
    125	u32 start;
    126	u32 end;
    127};
    128
    129struct intel_uncore {
    130	void __iomem *regs;
    131
    132	struct drm_i915_private *i915;
    133	struct intel_gt *gt;
    134	struct intel_runtime_pm *rpm;
    135
    136	spinlock_t lock; /** lock is also taken in irq contexts. */
    137
    138	unsigned int flags;
    139#define UNCORE_HAS_FORCEWAKE		BIT(0)
    140#define UNCORE_HAS_FPGA_DBG_UNCLAIMED	BIT(1)
    141#define UNCORE_HAS_DBG_UNCLAIMED	BIT(2)
    142#define UNCORE_HAS_FIFO			BIT(3)
    143
    144	const struct intel_forcewake_range *fw_domains_table;
    145	unsigned int fw_domains_table_entries;
    146
    147	/*
    148	 * Shadowed registers are special cases where we can safely write
    149	 * to the register *without* grabbing forcewake.
    150	 */
    151	const struct i915_range *shadowed_reg_table;
    152	unsigned int shadowed_reg_table_entries;
    153
    154	struct notifier_block pmic_bus_access_nb;
    155	const struct intel_uncore_fw_get *fw_get_funcs;
    156	struct intel_uncore_funcs funcs;
    157
    158	unsigned int fifo_count;
    159
    160	enum forcewake_domains fw_domains;
    161	enum forcewake_domains fw_domains_active;
    162	enum forcewake_domains fw_domains_timer;
    163	enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
    164
    165	struct intel_uncore_forcewake_domain {
    166		struct intel_uncore *uncore;
    167		enum forcewake_domain_id id;
    168		enum forcewake_domains mask;
    169		unsigned int wake_count;
    170		bool active;
    171		struct hrtimer timer;
    172		u32 __iomem *reg_set;
    173		u32 __iomem *reg_ack;
    174	} *fw_domain[FW_DOMAIN_ID_COUNT];
    175
    176	unsigned int user_forcewake_count;
    177
    178	struct intel_uncore_mmio_debug *debug;
    179};
    180
    181/* Iterate over initialised fw domains */
    182#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
    183	for (tmp__ = (mask__); tmp__ ;) \
    184		for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
    185
    186#define for_each_fw_domain(domain__, uncore__, tmp__) \
    187	for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
    188
    189static inline bool
    190intel_uncore_has_forcewake(const struct intel_uncore *uncore)
    191{
    192	return uncore->flags & UNCORE_HAS_FORCEWAKE;
    193}
    194
    195static inline bool
    196intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
    197{
    198	return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
    199}
    200
    201static inline bool
    202intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
    203{
    204	return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
    205}
    206
    207static inline bool
    208intel_uncore_has_fifo(const struct intel_uncore *uncore)
    209{
    210	return uncore->flags & UNCORE_HAS_FIFO;
    211}
    212
    213u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
    214					   i915_reg_t reg,
    215					   int slice, int subslice);
    216u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
    217					i915_reg_t reg,	int slice, int subslice);
    218void intel_uncore_write_with_mcr_steering(struct intel_uncore *uncore,
    219					  i915_reg_t reg, u32 value,
    220					  int slice, int subslice);
    221void
    222intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
    223void intel_uncore_init_early(struct intel_uncore *uncore,
    224			     struct intel_gt *gt);
    225int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
    226int intel_uncore_init_mmio(struct intel_uncore *uncore);
    227void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
    228					  struct intel_gt *gt);
    229bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
    230bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
    231void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
    232void intel_uncore_fini_mmio(struct intel_uncore *uncore);
    233void intel_uncore_suspend(struct intel_uncore *uncore);
    234void intel_uncore_resume_early(struct intel_uncore *uncore);
    235void intel_uncore_runtime_resume(struct intel_uncore *uncore);
    236
    237void assert_forcewakes_inactive(struct intel_uncore *uncore);
    238void assert_forcewakes_active(struct intel_uncore *uncore,
    239			      enum forcewake_domains fw_domains);
    240const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
    241
    242enum forcewake_domains
    243intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
    244			       i915_reg_t reg, unsigned int op);
    245#define FW_REG_READ  (1)
    246#define FW_REG_WRITE (2)
    247
    248void intel_uncore_forcewake_get(struct intel_uncore *uncore,
    249				enum forcewake_domains domains);
    250void intel_uncore_forcewake_put(struct intel_uncore *uncore,
    251				enum forcewake_domains domains);
    252void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
    253					enum forcewake_domains domains);
    254void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
    255				  enum forcewake_domains fw_domains);
    256
    257/*
    258 * Like above but the caller must manage the uncore.lock itself.
    259 * Must be used with intel_uncore_read_fw() and friends.
    260 */
    261void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
    262					enum forcewake_domains domains);
    263void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
    264					enum forcewake_domains domains);
    265
    266void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
    267void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
    268
    269int __intel_wait_for_register(struct intel_uncore *uncore,
    270			      i915_reg_t reg,
    271			      u32 mask,
    272			      u32 value,
    273			      unsigned int fast_timeout_us,
    274			      unsigned int slow_timeout_ms,
    275			      u32 *out_value);
    276static inline int
    277intel_wait_for_register(struct intel_uncore *uncore,
    278			i915_reg_t reg,
    279			u32 mask,
    280			u32 value,
    281			unsigned int timeout_ms)
    282{
    283	return __intel_wait_for_register(uncore, reg, mask, value, 2,
    284					 timeout_ms, NULL);
    285}
    286
    287int __intel_wait_for_register_fw(struct intel_uncore *uncore,
    288				 i915_reg_t reg,
    289				 u32 mask,
    290				 u32 value,
    291				 unsigned int fast_timeout_us,
    292				 unsigned int slow_timeout_ms,
    293				 u32 *out_value);
    294static inline int
    295intel_wait_for_register_fw(struct intel_uncore *uncore,
    296			   i915_reg_t reg,
    297			   u32 mask,
    298			   u32 value,
    299			       unsigned int timeout_ms)
    300{
    301	return __intel_wait_for_register_fw(uncore, reg, mask, value,
    302					    2, timeout_ms, NULL);
    303}
    304
    305/* register access functions */
    306#define __raw_read(x__, s__) \
    307static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
    308					    i915_reg_t reg) \
    309{ \
    310	return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
    311}
    312
    313#define __raw_write(x__, s__) \
    314static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
    315					   i915_reg_t reg, u##x__ val) \
    316{ \
    317	write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
    318}
    319__raw_read(8, b)
    320__raw_read(16, w)
    321__raw_read(32, l)
    322__raw_read(64, q)
    323
    324__raw_write(8, b)
    325__raw_write(16, w)
    326__raw_write(32, l)
    327__raw_write(64, q)
    328
    329#undef __raw_read
    330#undef __raw_write
    331
    332#define __uncore_read(name__, x__, s__, trace__) \
    333static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
    334					   i915_reg_t reg) \
    335{ \
    336	return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
    337}
    338
    339#define __uncore_write(name__, x__, s__, trace__) \
    340static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
    341					 i915_reg_t reg, u##x__ val) \
    342{ \
    343	uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
    344}
    345
    346__uncore_read(read8, 8, b, true)
    347__uncore_read(read16, 16, w, true)
    348__uncore_read(read, 32, l, true)
    349__uncore_read(read16_notrace, 16, w, false)
    350__uncore_read(read_notrace, 32, l, false)
    351
    352__uncore_write(write8, 8, b, true)
    353__uncore_write(write16, 16, w, true)
    354__uncore_write(write, 32, l, true)
    355__uncore_write(write_notrace, 32, l, false)
    356
    357/* Be very careful with read/write 64-bit values. On 32-bit machines, they
    358 * will be implemented using 2 32-bit writes in an arbitrary order with
    359 * an arbitrary delay between them. This can cause the hardware to
    360 * act upon the intermediate value, possibly leading to corruption and
    361 * machine death. For this reason we do not support intel_uncore_write64,
    362 * or uncore->funcs.mmio_writeq.
    363 *
    364 * When reading a 64-bit value as two 32-bit values, the delay may cause
    365 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
    366 * occasionally a 64-bit register does not actually support a full readq
    367 * and must be read using two 32-bit reads.
    368 *
    369 * You have been warned.
    370 */
    371__uncore_read(read64, 64, q, true)
    372
    373static inline u64
    374intel_uncore_read64_2x32(struct intel_uncore *uncore,
    375			 i915_reg_t lower_reg, i915_reg_t upper_reg)
    376{
    377	u32 upper, lower, old_upper, loop = 0;
    378	upper = intel_uncore_read(uncore, upper_reg);
    379	do {
    380		old_upper = upper;
    381		lower = intel_uncore_read(uncore, lower_reg);
    382		upper = intel_uncore_read(uncore, upper_reg);
    383	} while (upper != old_upper && loop++ < 2);
    384	return (u64)upper << 32 | lower;
    385}
    386
    387#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
    388#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
    389
    390#undef __uncore_read
    391#undef __uncore_write
    392
    393/* These are untraced mmio-accessors that are only valid to be used inside
    394 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
    395 * controlled.
    396 *
    397 * Think twice, and think again, before using these.
    398 *
    399 * As an example, these accessors can possibly be used between:
    400 *
    401 * spin_lock_irq(&uncore->lock);
    402 * intel_uncore_forcewake_get__locked();
    403 *
    404 * and
    405 *
    406 * intel_uncore_forcewake_put__locked();
    407 * spin_unlock_irq(&uncore->lock);
    408 *
    409 *
    410 * Note: some registers may not need forcewake held, so
    411 * intel_uncore_forcewake_{get,put} can be omitted, see
    412 * intel_uncore_forcewake_for_reg().
    413 *
    414 * Certain architectures will die if the same cacheline is concurrently accessed
    415 * by different clients (e.g. on Ivybridge). Access to registers should
    416 * therefore generally be serialised, by either the dev_priv->uncore.lock or
    417 * a more localised lock guarding all access to that bank of registers.
    418 */
    419#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
    420#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
    421#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
    422#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
    423
    424static inline void intel_uncore_rmw(struct intel_uncore *uncore,
    425				    i915_reg_t reg, u32 clear, u32 set)
    426{
    427	u32 old, val;
    428
    429	old = intel_uncore_read(uncore, reg);
    430	val = (old & ~clear) | set;
    431	if (val != old)
    432		intel_uncore_write(uncore, reg, val);
    433}
    434
    435static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
    436				       i915_reg_t reg, u32 clear, u32 set)
    437{
    438	u32 old, val;
    439
    440	old = intel_uncore_read_fw(uncore, reg);
    441	val = (old & ~clear) | set;
    442	if (val != old)
    443		intel_uncore_write_fw(uncore, reg, val);
    444}
    445
    446static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
    447						i915_reg_t reg, u32 val,
    448						u32 mask, u32 expected_val)
    449{
    450	u32 reg_val;
    451
    452	intel_uncore_write(uncore, reg, val);
    453	reg_val = intel_uncore_read(uncore, reg);
    454
    455	return (reg_val & mask) != expected_val ? -EINVAL : 0;
    456}
    457
    458#define raw_reg_read(base, reg) \
    459	readl(base + i915_mmio_reg_offset(reg))
    460#define raw_reg_write(base, reg, value) \
    461	writel(value, base + i915_mmio_reg_offset(reg))
    462
    463#endif /* !__INTEL_UNCORE_H__ */