cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_pcode.c (6316B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2013-2021 Intel Corporation
      4 */
      5
      6#include "i915_drv.h"
      7#include "i915_reg.h"
      8#include "intel_pcode.h"
      9
     10static int gen6_check_mailbox_status(u32 mbox)
     11{
     12	switch (mbox & GEN6_PCODE_ERROR_MASK) {
     13	case GEN6_PCODE_SUCCESS:
     14		return 0;
     15	case GEN6_PCODE_UNIMPLEMENTED_CMD:
     16		return -ENODEV;
     17	case GEN6_PCODE_ILLEGAL_CMD:
     18		return -ENXIO;
     19	case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
     20	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
     21		return -EOVERFLOW;
     22	case GEN6_PCODE_TIMEOUT:
     23		return -ETIMEDOUT;
     24	default:
     25		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
     26		return 0;
     27	}
     28}
     29
     30static int gen7_check_mailbox_status(u32 mbox)
     31{
     32	switch (mbox & GEN6_PCODE_ERROR_MASK) {
     33	case GEN6_PCODE_SUCCESS:
     34		return 0;
     35	case GEN6_PCODE_ILLEGAL_CMD:
     36		return -ENXIO;
     37	case GEN7_PCODE_TIMEOUT:
     38		return -ETIMEDOUT;
     39	case GEN7_PCODE_ILLEGAL_DATA:
     40		return -EINVAL;
     41	case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
     42		return -ENXIO;
     43	case GEN11_PCODE_LOCKED:
     44		return -EBUSY;
     45	case GEN11_PCODE_REJECTED:
     46		return -EACCES;
     47	case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
     48		return -EOVERFLOW;
     49	default:
     50		MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
     51		return 0;
     52	}
     53}
     54
     55static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox,
     56			  u32 *val, u32 *val1,
     57			  int fast_timeout_us, int slow_timeout_ms,
     58			  bool is_read)
     59{
     60	struct intel_uncore *uncore = &i915->uncore;
     61
     62	lockdep_assert_held(&i915->sb_lock);
     63
     64	/*
     65	 * GEN6_PCODE_* are outside of the forcewake domain, we can use
     66	 * intel_uncore_read/write_fw variants to reduce the amount of work
     67	 * required when reading/writing.
     68	 */
     69
     70	if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
     71		return -EAGAIN;
     72
     73	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
     74	intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
     75	intel_uncore_write_fw(uncore,
     76			      GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
     77
     78	if (__intel_wait_for_register_fw(uncore,
     79					 GEN6_PCODE_MAILBOX,
     80					 GEN6_PCODE_READY, 0,
     81					 fast_timeout_us,
     82					 slow_timeout_ms,
     83					 &mbox))
     84		return -ETIMEDOUT;
     85
     86	if (is_read)
     87		*val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
     88	if (is_read && val1)
     89		*val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
     90
     91	if (GRAPHICS_VER(i915) > 6)
     92		return gen7_check_mailbox_status(mbox);
     93	else
     94		return gen6_check_mailbox_status(mbox);
     95}
     96
     97int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1)
     98{
     99	int err;
    100
    101	mutex_lock(&i915->sb_lock);
    102	err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true);
    103	mutex_unlock(&i915->sb_lock);
    104
    105	if (err) {
    106		drm_dbg(&i915->drm,
    107			"warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
    108			mbox, __builtin_return_address(0), err);
    109	}
    110
    111	return err;
    112}
    113
    114int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val,
    115			    int fast_timeout_us, int slow_timeout_ms)
    116{
    117	int err;
    118
    119	mutex_lock(&i915->sb_lock);
    120	err = __snb_pcode_rw(i915, mbox, &val, NULL,
    121			     fast_timeout_us, slow_timeout_ms, false);
    122	mutex_unlock(&i915->sb_lock);
    123
    124	if (err) {
    125		drm_dbg(&i915->drm,
    126			"warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
    127			val, mbox, __builtin_return_address(0), err);
    128	}
    129
    130	return err;
    131}
    132
    133static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
    134				  u32 request, u32 reply_mask, u32 reply,
    135				  u32 *status)
    136{
    137	*status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true);
    138
    139	return (*status == 0) && ((request & reply_mask) == reply);
    140}
    141
    142/**
    143 * skl_pcode_request - send PCODE request until acknowledgment
    144 * @i915: device private
    145 * @mbox: PCODE mailbox ID the request is targeted for
    146 * @request: request ID
    147 * @reply_mask: mask used to check for request acknowledgment
    148 * @reply: value used to check for request acknowledgment
    149 * @timeout_base_ms: timeout for polling with preemption enabled
    150 *
    151 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
    152 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
    153 * The request is acknowledged once the PCODE reply dword equals @reply after
    154 * applying @reply_mask. Polling is first attempted with preemption enabled
    155 * for @timeout_base_ms and if this times out for another 50 ms with
    156 * preemption disabled.
    157 *
    158 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
    159 * other error as reported by PCODE.
    160 */
    161int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
    162		      u32 reply_mask, u32 reply, int timeout_base_ms)
    163{
    164	u32 status;
    165	int ret;
    166
    167	mutex_lock(&i915->sb_lock);
    168
    169#define COND \
    170	skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
    171
    172	/*
    173	 * Prime the PCODE by doing a request first. Normally it guarantees
    174	 * that a subsequent request, at most @timeout_base_ms later, succeeds.
    175	 * _wait_for() doesn't guarantee when its passed condition is evaluated
    176	 * first, so send the first request explicitly.
    177	 */
    178	if (COND) {
    179		ret = 0;
    180		goto out;
    181	}
    182	ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
    183	if (!ret)
    184		goto out;
    185
    186	/*
    187	 * The above can time out if the number of requests was low (2 in the
    188	 * worst case) _and_ PCODE was busy for some reason even after a
    189	 * (queued) request and @timeout_base_ms delay. As a workaround retry
    190	 * the poll with preemption disabled to maximize the number of
    191	 * requests. Increase the timeout from @timeout_base_ms to 50ms to
    192	 * account for interrupts that could reduce the number of these
    193	 * requests, and for any quirks of the PCODE firmware that delays
    194	 * the request completion.
    195	 */
    196	drm_dbg_kms(&i915->drm,
    197		    "PCODE timeout, retrying with preemption disabled\n");
    198	drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
    199	preempt_disable();
    200	ret = wait_for_atomic(COND, 50);
    201	preempt_enable();
    202
    203out:
    204	mutex_unlock(&i915->sb_lock);
    205	return status ? status : ret;
    206#undef COND
    207}
    208
    209int intel_pcode_init(struct drm_i915_private *i915)
    210{
    211	int ret = 0;
    212
    213	if (!IS_DGFX(i915))
    214		return ret;
    215
    216	ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
    217				DG1_UNCORE_GET_INIT_STATUS,
    218				DG1_UNCORE_INIT_STATUS_COMPLETE,
    219				DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
    220
    221	drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
    222
    223	if (ret)
    224		drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
    225
    226	return ret;
    227}