cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_guc.h (13256B)


      1/* SPDX-License-Identifier: MIT */
      2/*
      3 * Copyright © 2014-2019 Intel Corporation
      4 */
      5
      6#ifndef _INTEL_GUC_H_
      7#define _INTEL_GUC_H_
      8
      9#include <linux/delay.h>
     10#include <linux/iosys-map.h>
     11#include <linux/xarray.h>
     12
     13#include "intel_guc_ct.h"
     14#include "intel_guc_fw.h"
     15#include "intel_guc_fwif.h"
     16#include "intel_guc_log.h"
     17#include "intel_guc_reg.h"
     18#include "intel_guc_slpc_types.h"
     19#include "intel_uc_fw.h"
     20#include "intel_uncore.h"
     21#include "i915_utils.h"
     22#include "i915_vma.h"
     23
     24struct __guc_ads_blob;
     25struct intel_guc_state_capture;
     26
     27/**
     28 * struct intel_guc - Top level structure of GuC.
     29 *
     30 * It handles firmware loading and manages client pool. intel_guc owns an
     31 * i915_sched_engine for submission.
     32 */
     33struct intel_guc {
     34	/** @fw: the GuC firmware */
     35	struct intel_uc_fw fw;
     36	/** @log: sub-structure containing GuC log related data and objects */
     37	struct intel_guc_log log;
     38	/** @ct: the command transport communication channel */
     39	struct intel_guc_ct ct;
     40	/** @slpc: sub-structure containing SLPC related data and objects */
     41	struct intel_guc_slpc slpc;
     42	/** @capture: the error-state-capture module's data and objects */
     43	struct intel_guc_state_capture *capture;
     44
     45	/** @sched_engine: Global engine used to submit requests to GuC */
     46	struct i915_sched_engine *sched_engine;
     47	/**
     48	 * @stalled_request: if GuC can't process a request for any reason, we
     49	 * save it until GuC restarts processing. No other request can be
     50	 * submitted until the stalled request is processed.
     51	 */
     52	struct i915_request *stalled_request;
     53	/**
     54	 * @submission_stall_reason: reason why submission is stalled
     55	 */
     56	enum {
     57		STALL_NONE,
     58		STALL_REGISTER_CONTEXT,
     59		STALL_MOVE_LRC_TAIL,
     60		STALL_ADD_REQUEST,
     61	} submission_stall_reason;
     62
     63	/* intel_guc_recv interrupt related state */
     64	/** @irq_lock: protects GuC irq state */
     65	spinlock_t irq_lock;
     66	/**
     67	 * @msg_enabled_mask: mask of events that are processed when receiving
     68	 * an INTEL_GUC_ACTION_DEFAULT G2H message.
     69	 */
     70	unsigned int msg_enabled_mask;
     71
     72	/**
     73	 * @outstanding_submission_g2h: number of outstanding GuC to Host
     74	 * responses related to GuC submission, used to determine if the GT is
     75	 * idle
     76	 */
     77	atomic_t outstanding_submission_g2h;
     78
     79	/** @interrupts: pointers to GuC interrupt-managing functions. */
     80	struct {
     81		void (*reset)(struct intel_guc *guc);
     82		void (*enable)(struct intel_guc *guc);
     83		void (*disable)(struct intel_guc *guc);
     84	} interrupts;
     85
     86	/**
     87	 * @submission_state: sub-structure for submission state protected by
     88	 * single lock
     89	 */
     90	struct {
     91		/**
     92		 * @lock: protects everything in submission_state,
     93		 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
     94		 * out of zero
     95		 */
     96		spinlock_t lock;
     97		/**
     98		 * @guc_ids: used to allocate new guc_ids, single-lrc
     99		 */
    100		struct ida guc_ids;
    101		/**
    102		 * @num_guc_ids: Number of guc_ids, selftest feature to be able
    103		 * to reduce this number while testing.
    104		 */
    105		int num_guc_ids;
    106		/**
    107		 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
    108		 */
    109		unsigned long *guc_ids_bitmap;
    110		/**
    111		 * @guc_id_list: list of intel_context with valid guc_ids but no
    112		 * refs
    113		 */
    114		struct list_head guc_id_list;
    115		/**
    116		 * @destroyed_contexts: list of contexts waiting to be destroyed
    117		 * (deregistered with the GuC)
    118		 */
    119		struct list_head destroyed_contexts;
    120		/**
    121		 * @destroyed_worker: worker to deregister contexts, need as we
    122		 * need to take a GT PM reference and can't from destroy
    123		 * function as it might be in an atomic context (no sleeping)
    124		 */
    125		struct work_struct destroyed_worker;
    126		/**
    127		 * @reset_fail_worker: worker to trigger a GT reset after an
    128		 * engine reset fails
    129		 */
    130		struct work_struct reset_fail_worker;
    131		/**
    132		 * @reset_fail_mask: mask of engines that failed to reset
    133		 */
    134		intel_engine_mask_t reset_fail_mask;
    135	} submission_state;
    136
    137	/**
    138	 * @submission_supported: tracks whether we support GuC submission on
    139	 * the current platform
    140	 */
    141	bool submission_supported;
    142	/** @submission_selected: tracks whether the user enabled GuC submission */
    143	bool submission_selected;
    144	/** @submission_initialized: tracks whether GuC submission has been initialised */
    145	bool submission_initialized;
    146	/**
    147	 * @rc_supported: tracks whether we support GuC rc on the current platform
    148	 */
    149	bool rc_supported;
    150	/** @rc_selected: tracks whether the user enabled GuC rc */
    151	bool rc_selected;
    152
    153	/** @ads_vma: object allocated to hold the GuC ADS */
    154	struct i915_vma *ads_vma;
    155	/** @ads_map: contents of the GuC ADS */
    156	struct iosys_map ads_map;
    157	/** @ads_regset_size: size of the save/restore regsets in the ADS */
    158	u32 ads_regset_size;
    159	/**
    160	 * @ads_regset_count: number of save/restore registers in the ADS for
    161	 * each engine
    162	 */
    163	u32 ads_regset_count[I915_NUM_ENGINES];
    164	/** @ads_regset: save/restore regsets in the ADS */
    165	struct guc_mmio_reg *ads_regset;
    166	/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
    167	u32 ads_golden_ctxt_size;
    168	/** @ads_capture_size: size of register lists in the ADS used for error capture */
    169	u32 ads_capture_size;
    170	/** @ads_engine_usage_size: size of engine usage in the ADS */
    171	u32 ads_engine_usage_size;
    172
    173	/**
    174	 * @context_lookup: used to resolve intel_context from guc_id, if a
    175	 * context is present in this structure it is registered with the GuC
    176	 */
    177	struct xarray context_lookup;
    178
    179	/** @params: Control params for fw initialization */
    180	u32 params[GUC_CTL_MAX_DWORDS];
    181
    182	/** @send_regs: GuC's FW specific registers used for sending MMIO H2G */
    183	struct {
    184		u32 base;
    185		unsigned int count;
    186		enum forcewake_domains fw_domains;
    187	} send_regs;
    188
    189	/** @notify_reg: register used to send interrupts to the GuC FW */
    190	i915_reg_t notify_reg;
    191
    192	/**
    193	 * @mmio_msg: notification bitmask that the GuC writes in one of its
    194	 * registers when the CT channel is disabled, to be processed when the
    195	 * channel is back up.
    196	 */
    197	u32 mmio_msg;
    198
    199	/** @send_mutex: used to serialize the intel_guc_send actions */
    200	struct mutex send_mutex;
    201
    202	/**
    203	 * @timestamp: GT timestamp object that stores a copy of the timestamp
    204	 * and adjusts it for overflow using a worker.
    205	 */
    206	struct {
    207		/**
    208		 * @lock: Lock protecting the below fields and the engine stats.
    209		 */
    210		spinlock_t lock;
    211
    212		/**
    213		 * @gt_stamp: 64 bit extended value of the GT timestamp.
    214		 */
    215		u64 gt_stamp;
    216
    217		/**
    218		 * @ping_delay: Period for polling the GT timestamp for
    219		 * overflow.
    220		 */
    221		unsigned long ping_delay;
    222
    223		/**
    224		 * @work: Periodic work to adjust GT timestamp, engine and
    225		 * context usage for overflows.
    226		 */
    227		struct delayed_work work;
    228
    229		/**
    230		 * @shift: Right shift value for the gpm timestamp
    231		 */
    232		u32 shift;
    233	} timestamp;
    234
    235#ifdef CONFIG_DRM_I915_SELFTEST
    236	/**
    237	 * @number_guc_id_stolen: The number of guc_ids that have been stolen
    238	 */
    239	int number_guc_id_stolen;
    240#endif
    241};
    242
    243static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
    244{
    245	return container_of(log, struct intel_guc, log);
    246}
    247
    248static
    249inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
    250{
    251	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
    252}
    253
    254static
    255inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
    256			     u32 g2h_len_dw)
    257{
    258	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
    259				 MAKE_SEND_FLAGS(g2h_len_dw));
    260}
    261
    262static inline int
    263intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
    264			   u32 *response_buf, u32 response_buf_size)
    265{
    266	return intel_guc_ct_send(&guc->ct, action, len,
    267				 response_buf, response_buf_size, 0);
    268}
    269
    270static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
    271					   const u32 *action,
    272					   u32 len,
    273					   u32 g2h_len_dw,
    274					   bool loop)
    275{
    276	int err;
    277	unsigned int sleep_period_ms = 1;
    278	bool not_atomic = !in_atomic() && !irqs_disabled();
    279
    280	/*
    281	 * FIXME: Have caller pass in if we are in an atomic context to avoid
    282	 * using in_atomic(). It is likely safe here as we check for irqs
    283	 * disabled which basically all the spin locks in the i915 do but
    284	 * regardless this should be cleaned up.
    285	 */
    286
    287	/* No sleeping with spin locks, just busy loop */
    288	might_sleep_if(loop && not_atomic);
    289
    290retry:
    291	err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
    292	if (unlikely(err == -EBUSY && loop)) {
    293		if (likely(not_atomic)) {
    294			if (msleep_interruptible(sleep_period_ms))
    295				return -EINTR;
    296			sleep_period_ms = sleep_period_ms << 1;
    297		} else {
    298			cpu_relax();
    299		}
    300		goto retry;
    301	}
    302
    303	return err;
    304}
    305
    306static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
    307{
    308	intel_guc_ct_event_handler(&guc->ct);
    309}
    310
    311/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
    312#define GUC_GGTT_TOP	0xFEE00000
    313
    314/**
    315 * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
    316 * @guc: intel_guc structure.
    317 * @vma: i915 graphics virtual memory area.
    318 *
    319 * GuC does not allow any gfx GGTT address that falls into range
    320 * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
    321 * Currently, in order to exclude [0, ggtt.pin_bias) address space from
    322 * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
    323 * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
    324 *
    325 * Return: GGTT offset of the @vma.
    326 */
    327static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
    328					struct i915_vma *vma)
    329{
    330	u32 offset = i915_ggtt_offset(vma);
    331
    332	GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
    333	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
    334
    335	return offset;
    336}
    337
    338void intel_guc_init_early(struct intel_guc *guc);
    339void intel_guc_init_late(struct intel_guc *guc);
    340void intel_guc_init_send_regs(struct intel_guc *guc);
    341void intel_guc_write_params(struct intel_guc *guc);
    342int intel_guc_init(struct intel_guc *guc);
    343void intel_guc_fini(struct intel_guc *guc);
    344void intel_guc_notify(struct intel_guc *guc);
    345int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
    346			u32 *response_buf, u32 response_buf_size);
    347int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
    348				       const u32 *payload, u32 len);
    349int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
    350int intel_guc_suspend(struct intel_guc *guc);
    351int intel_guc_resume(struct intel_guc *guc);
    352struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
    353int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
    354				   struct i915_vma **out_vma, void **out_vaddr);
    355int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
    356int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
    357
    358static inline bool intel_guc_is_supported(struct intel_guc *guc)
    359{
    360	return intel_uc_fw_is_supported(&guc->fw);
    361}
    362
    363static inline bool intel_guc_is_wanted(struct intel_guc *guc)
    364{
    365	return intel_uc_fw_is_enabled(&guc->fw);
    366}
    367
    368static inline bool intel_guc_is_used(struct intel_guc *guc)
    369{
    370	GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
    371	return intel_uc_fw_is_available(&guc->fw);
    372}
    373
    374static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
    375{
    376	return intel_uc_fw_is_running(&guc->fw);
    377}
    378
    379static inline bool intel_guc_is_ready(struct intel_guc *guc)
    380{
    381	return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
    382}
    383
    384static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
    385{
    386	guc->interrupts.reset(guc);
    387}
    388
    389static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
    390{
    391	guc->interrupts.enable(guc);
    392}
    393
    394static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
    395{
    396	guc->interrupts.disable(guc);
    397}
    398
    399static inline int intel_guc_sanitize(struct intel_guc *guc)
    400{
    401	intel_uc_fw_sanitize(&guc->fw);
    402	intel_guc_disable_interrupts(guc);
    403	intel_guc_ct_sanitize(&guc->ct);
    404	guc->mmio_msg = 0;
    405
    406	return 0;
    407}
    408
    409static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
    410{
    411	spin_lock_irq(&guc->irq_lock);
    412	guc->msg_enabled_mask |= mask;
    413	spin_unlock_irq(&guc->irq_lock);
    414}
    415
    416static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
    417{
    418	spin_lock_irq(&guc->irq_lock);
    419	guc->msg_enabled_mask &= ~mask;
    420	spin_unlock_irq(&guc->irq_lock);
    421}
    422
    423int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
    424
    425int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
    426					  const u32 *msg, u32 len);
    427int intel_guc_sched_done_process_msg(struct intel_guc *guc,
    428				     const u32 *msg, u32 len);
    429int intel_guc_context_reset_process_msg(struct intel_guc *guc,
    430					const u32 *msg, u32 len);
    431int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
    432					 const u32 *msg, u32 len);
    433int intel_guc_error_capture_process_msg(struct intel_guc *guc,
    434					const u32 *msg, u32 len);
    435
    436struct intel_engine_cs *
    437intel_guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance);
    438
    439void intel_guc_find_hung_context(struct intel_engine_cs *engine);
    440
    441int intel_guc_global_policies_update(struct intel_guc *guc);
    442
    443void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
    444
    445void intel_guc_submission_reset_prepare(struct intel_guc *guc);
    446void intel_guc_submission_reset(struct intel_guc *guc, intel_engine_mask_t stalled);
    447void intel_guc_submission_reset_finish(struct intel_guc *guc);
    448void intel_guc_submission_cancel_requests(struct intel_guc *guc);
    449
    450void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
    451
    452void intel_guc_write_barrier(struct intel_guc *guc);
    453
    454#endif