cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

i915_utils.h (12766B)


      1/*
      2 * Copyright © 2016 Intel Corporation
      3 *
      4 * Permission is hereby granted, free of charge, to any person obtaining a
      5 * copy of this software and associated documentation files (the "Software"),
      6 * to deal in the Software without restriction, including without limitation
      7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8 * and/or sell copies of the Software, and to permit persons to whom the
      9 * Software is furnished to do so, subject to the following conditions:
     10 *
     11 * The above copyright notice and this permission notice (including the next
     12 * paragraph) shall be included in all copies or substantial portions of the
     13 * Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21 * IN THE SOFTWARE.
     22 *
     23 */
     24
     25#ifndef __I915_UTILS_H
     26#define __I915_UTILS_H
     27
     28#include <linux/list.h>
     29#include <linux/overflow.h>
     30#include <linux/sched.h>
     31#include <linux/string_helpers.h>
     32#include <linux/types.h>
     33#include <linux/workqueue.h>
     34#include <linux/sched/clock.h>
     35
     36#ifdef CONFIG_X86
     37#include <asm/hypervisor.h>
     38#endif
     39
     40struct drm_i915_private;
     41struct timer_list;
     42
     43#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
     44
     45#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
     46			     __stringify(x), (long)(x))
     47
     48void __printf(3, 4)
     49__i915_printk(struct drm_i915_private *dev_priv, const char *level,
     50	      const char *fmt, ...);
     51
     52#define i915_report_error(dev_priv, fmt, ...)				   \
     53	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
     54
     55#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
     56
     57int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
     58			      const char *func, int line);
     59#define i915_inject_probe_error(_i915, _err) \
     60	__i915_inject_probe_error((_i915), (_err), __func__, __LINE__)
     61bool i915_error_injected(void);
     62
     63#else
     64
     65#define i915_inject_probe_error(i915, e) ({ BUILD_BUG_ON_INVALID(i915); 0; })
     66#define i915_error_injected() false
     67
     68#endif
     69
     70#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV)
     71
     72#define i915_probe_error(i915, fmt, ...)				   \
     73	__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
     74		      fmt, ##__VA_ARGS__)
     75
     76#if defined(GCC_VERSION) && GCC_VERSION >= 70000
     77#define add_overflows_t(T, A, B) \
     78	__builtin_add_overflow_p((A), (B), (T)0)
     79#else
     80#define add_overflows_t(T, A, B) ({ \
     81	typeof(A) a = (A); \
     82	typeof(B) b = (B); \
     83	(T)(a + b) < a; \
     84})
     85#endif
     86
     87#define add_overflows(A, B) \
     88	add_overflows_t(typeof((A) + (B)), (A), (B))
     89
     90#define range_overflows(start, size, max) ({ \
     91	typeof(start) start__ = (start); \
     92	typeof(size) size__ = (size); \
     93	typeof(max) max__ = (max); \
     94	(void)(&start__ == &size__); \
     95	(void)(&start__ == &max__); \
     96	start__ >= max__ || size__ > max__ - start__; \
     97})
     98
     99#define range_overflows_t(type, start, size, max) \
    100	range_overflows((type)(start), (type)(size), (type)(max))
    101
    102#define range_overflows_end(start, size, max) ({ \
    103	typeof(start) start__ = (start); \
    104	typeof(size) size__ = (size); \
    105	typeof(max) max__ = (max); \
    106	(void)(&start__ == &size__); \
    107	(void)(&start__ == &max__); \
    108	start__ > max__ || size__ > max__ - start__; \
    109})
    110
    111#define range_overflows_end_t(type, start, size, max) \
    112	range_overflows_end((type)(start), (type)(size), (type)(max))
    113
    114/* Note we don't consider signbits :| */
    115#define overflows_type(x, T) \
    116	(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
    117
    118static inline bool
    119__check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
    120{
    121	size_t sz;
    122
    123	if (check_mul_overflow(count, arr, &sz))
    124		return false;
    125
    126	if (check_add_overflow(sz, base, &sz))
    127		return false;
    128
    129	*size = sz;
    130	return true;
    131}
    132
    133/**
    134 * check_struct_size() - Calculate size of structure with trailing array.
    135 * @p: Pointer to the structure.
    136 * @member: Name of the array member.
    137 * @n: Number of elements in the array.
    138 * @sz: Total size of structure and array
    139 *
    140 * Calculates size of memory needed for structure @p followed by an
    141 * array of @n @member elements, like struct_size() but reports
    142 * whether it overflowed, and the resultant size in @sz
    143 *
    144 * Return: false if the calculation overflowed.
    145 */
    146#define check_struct_size(p, member, n, sz) \
    147	likely(__check_struct_size(sizeof(*(p)), \
    148				   sizeof(*(p)->member) + __must_be_array((p)->member), \
    149				   n, sz))
    150
    151#define ptr_mask_bits(ptr, n) ({					\
    152	unsigned long __v = (unsigned long)(ptr);			\
    153	(typeof(ptr))(__v & -BIT(n));					\
    154})
    155
    156#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
    157
    158#define ptr_unpack_bits(ptr, bits, n) ({				\
    159	unsigned long __v = (unsigned long)(ptr);			\
    160	*(bits) = __v & (BIT(n) - 1);					\
    161	(typeof(ptr))(__v & -BIT(n));					\
    162})
    163
    164#define ptr_pack_bits(ptr, bits, n) ({					\
    165	unsigned long __bits = (bits);					\
    166	GEM_BUG_ON(__bits & -BIT(n));					\
    167	((typeof(ptr))((unsigned long)(ptr) | __bits));			\
    168})
    169
    170#define ptr_dec(ptr) ({							\
    171	unsigned long __v = (unsigned long)(ptr);			\
    172	(typeof(ptr))(__v - 1);						\
    173})
    174
    175#define ptr_inc(ptr) ({							\
    176	unsigned long __v = (unsigned long)(ptr);			\
    177	(typeof(ptr))(__v + 1);						\
    178})
    179
    180#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
    181#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
    182#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
    183#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
    184
    185#define struct_member(T, member) (((T *)0)->member)
    186
    187#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
    188
    189#define fetch_and_zero(ptr) ({						\
    190	typeof(*ptr) __T = *(ptr);					\
    191	*(ptr) = (typeof(*ptr))0;					\
    192	__T;								\
    193})
    194
    195static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
    196{
    197	return a - b;
    198}
    199
    200/*
    201 * container_of_user: Extract the superclass from a pointer to a member.
    202 *
    203 * Exactly like container_of() with the exception that it plays nicely
    204 * with sparse for __user @ptr.
    205 */
    206#define container_of_user(ptr, type, member) ({				\
    207	void __user *__mptr = (void __user *)(ptr);			\
    208	BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \
    209			 !__same_type(*(ptr), void),			\
    210			 "pointer type mismatch in container_of()");	\
    211	((type __user *)(__mptr - offsetof(type, member))); })
    212
    213/*
    214 * check_user_mbz: Check that a user value exists and is zero
    215 *
    216 * Frequently in our uABI we reserve space for future extensions, and
    217 * two ensure that userspace is prepared we enforce that space must
    218 * be zero. (Then any future extension can safely assume a default value
    219 * of 0.)
    220 *
    221 * check_user_mbz() combines checking that the user pointer is accessible
    222 * and that the contained value is zero.
    223 *
    224 * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
    225 */
    226#define check_user_mbz(U) ({						\
    227	typeof(*(U)) mbz__;						\
    228	get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0;		\
    229})
    230
    231static inline u64 ptr_to_u64(const void *ptr)
    232{
    233	return (uintptr_t)ptr;
    234}
    235
    236#define u64_to_ptr(T, x) ({						\
    237	typecheck(u64, x);						\
    238	(T *)(uintptr_t)(x);						\
    239})
    240
    241#define __mask_next_bit(mask) ({					\
    242	int __idx = ffs(mask) - 1;					\
    243	mask &= ~BIT(__idx);						\
    244	__idx;								\
    245})
    246
    247static inline bool is_power_of_2_u64(u64 n)
    248{
    249	return (n != 0 && ((n & (n - 1)) == 0));
    250}
    251
    252static inline void __list_del_many(struct list_head *head,
    253				   struct list_head *first)
    254{
    255	first->prev = head;
    256	WRITE_ONCE(head->next, first);
    257}
    258
    259static inline int list_is_last_rcu(const struct list_head *list,
    260				   const struct list_head *head)
    261{
    262	return READ_ONCE(list->next) == head;
    263}
    264
    265static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
    266{
    267	unsigned long j = msecs_to_jiffies(m);
    268
    269	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
    270}
    271
    272/*
    273 * If you need to wait X milliseconds between events A and B, but event B
    274 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
    275 * when event A happened, then just before event B you call this function and
    276 * pass the timestamp as the first argument, and X as the second argument.
    277 */
    278static inline void
    279wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
    280{
    281	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
    282
    283	/*
    284	 * Don't re-read the value of "jiffies" every time since it may change
    285	 * behind our back and break the math.
    286	 */
    287	tmp_jiffies = jiffies;
    288	target_jiffies = timestamp_jiffies +
    289			 msecs_to_jiffies_timeout(to_wait_ms);
    290
    291	if (time_after(target_jiffies, tmp_jiffies)) {
    292		remaining_jiffies = target_jiffies - tmp_jiffies;
    293		while (remaining_jiffies)
    294			remaining_jiffies =
    295			    schedule_timeout_uninterruptible(remaining_jiffies);
    296	}
    297}
    298
    299/**
    300 * __wait_for - magic wait macro
    301 *
    302 * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
    303 * important that we check the condition again after having timed out, since the
    304 * timeout could be due to preemption or similar and we've never had a chance to
    305 * check the condition before the timeout.
    306 */
    307#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
    308	const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
    309	long wait__ = (Wmin); /* recommended min for usleep is 10 us */	\
    310	int ret__;							\
    311	might_sleep();							\
    312	for (;;) {							\
    313		const bool expired__ = ktime_after(ktime_get_raw(), end__); \
    314		OP;							\
    315		/* Guarantee COND check prior to timeout */		\
    316		barrier();						\
    317		if (COND) {						\
    318			ret__ = 0;					\
    319			break;						\
    320		}							\
    321		if (expired__) {					\
    322			ret__ = -ETIMEDOUT;				\
    323			break;						\
    324		}							\
    325		usleep_range(wait__, wait__ * 2);			\
    326		if (wait__ < (Wmax))					\
    327			wait__ <<= 1;					\
    328	}								\
    329	ret__;								\
    330})
    331
    332#define _wait_for(COND, US, Wmin, Wmax)	__wait_for(, (COND), (US), (Wmin), \
    333						   (Wmax))
    334#define wait_for(COND, MS)		_wait_for((COND), (MS) * 1000, 10, 1000)
    335
    336/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
    337#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
    338# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
    339#else
    340# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
    341#endif
    342
    343#define _wait_for_atomic(COND, US, ATOMIC) \
    344({ \
    345	int cpu, ret, timeout = (US) * 1000; \
    346	u64 base; \
    347	_WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
    348	if (!(ATOMIC)) { \
    349		preempt_disable(); \
    350		cpu = smp_processor_id(); \
    351	} \
    352	base = local_clock(); \
    353	for (;;) { \
    354		u64 now = local_clock(); \
    355		if (!(ATOMIC)) \
    356			preempt_enable(); \
    357		/* Guarantee COND check prior to timeout */ \
    358		barrier(); \
    359		if (COND) { \
    360			ret = 0; \
    361			break; \
    362		} \
    363		if (now - base >= timeout) { \
    364			ret = -ETIMEDOUT; \
    365			break; \
    366		} \
    367		cpu_relax(); \
    368		if (!(ATOMIC)) { \
    369			preempt_disable(); \
    370			if (unlikely(cpu != smp_processor_id())) { \
    371				timeout -= now - base; \
    372				cpu = smp_processor_id(); \
    373				base = local_clock(); \
    374			} \
    375		} \
    376	} \
    377	ret; \
    378})
    379
    380#define wait_for_us(COND, US) \
    381({ \
    382	int ret__; \
    383	BUILD_BUG_ON(!__builtin_constant_p(US)); \
    384	if ((US) > 10) \
    385		ret__ = _wait_for((COND), (US), 10, 10); \
    386	else \
    387		ret__ = _wait_for_atomic((COND), (US), 0); \
    388	ret__; \
    389})
    390
    391#define wait_for_atomic_us(COND, US) \
    392({ \
    393	BUILD_BUG_ON(!__builtin_constant_p(US)); \
    394	BUILD_BUG_ON((US) > 50000); \
    395	_wait_for_atomic((COND), (US), 1); \
    396})
    397
    398#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
    399
    400#define KHz(x) (1000 * (x))
    401#define MHz(x) KHz(1000 * (x))
    402
    403#define KBps(x) (1000 * (x))
    404#define MBps(x) KBps(1000 * (x))
    405#define GBps(x) ((u64)1000 * MBps((x)))
    406
    407void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
    408static inline void __add_taint_for_CI(unsigned int taint)
    409{
    410	/*
    411	 * The system is "ok", just about surviving for the user, but
    412	 * CI results are now unreliable as the HW is very suspect.
    413	 * CI checks the taint state after every test and will reboot
    414	 * the machine if the kernel is tainted.
    415	 */
    416	add_taint(taint, LOCKDEP_STILL_OK);
    417}
    418
    419void cancel_timer(struct timer_list *t);
    420void set_timer_ms(struct timer_list *t, unsigned long timeout);
    421
    422static inline bool timer_active(const struct timer_list *t)
    423{
    424	return READ_ONCE(t->expires);
    425}
    426
    427static inline bool timer_expired(const struct timer_list *t)
    428{
    429	return timer_active(t) && !timer_pending(t);
    430}
    431
    432static inline bool i915_run_as_guest(void)
    433{
    434#if IS_ENABLED(CONFIG_X86)
    435	return !hypervisor_is_type(X86_HYPER_NATIVE);
    436#else
    437	/* Not supported yet */
    438	return false;
    439#endif
    440}
    441
    442bool i915_vtd_active(struct drm_i915_private *i915);
    443
    444#endif /* !__I915_UTILS_H */