cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_guc_log.c (20785B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2014-2019 Intel Corporation
      4 */
      5
      6#include <linux/debugfs.h>
      7#include <linux/string_helpers.h>
      8
      9#include "gt/intel_gt.h"
     10#include "i915_drv.h"
     11#include "i915_irq.h"
     12#include "i915_memcpy.h"
     13#include "intel_guc_capture.h"
     14#include "intel_guc_log.h"
     15
     16static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log);
     17
     18/**
     19 * DOC: GuC firmware log
     20 *
     21 * Firmware log is enabled by setting i915.guc_log_level to the positive level.
     22 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
     23 * i915_guc_load_status will print out firmware loading status and scratch
     24 * registers value.
     25 */
     26
     27static int guc_action_flush_log_complete(struct intel_guc *guc)
     28{
     29	u32 action[] = {
     30		INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
     31		GUC_DEBUG_LOG_BUFFER
     32	};
     33
     34	return intel_guc_send(guc, action, ARRAY_SIZE(action));
     35}
     36
     37static int guc_action_flush_log(struct intel_guc *guc)
     38{
     39	u32 action[] = {
     40		INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
     41		0
     42	};
     43
     44	return intel_guc_send(guc, action, ARRAY_SIZE(action));
     45}
     46
     47static int guc_action_control_log(struct intel_guc *guc, bool enable,
     48				  bool default_logging, u32 verbosity)
     49{
     50	u32 action[] = {
     51		INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
     52		(enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) |
     53		(verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) |
     54		(default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0)
     55	};
     56
     57	GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX);
     58
     59	return intel_guc_send(guc, action, ARRAY_SIZE(action));
     60}
     61
     62/*
     63 * Sub buffer switch callback. Called whenever relay has to switch to a new
     64 * sub buffer, relay stays on the same sub buffer if 0 is returned.
     65 */
     66static int subbuf_start_callback(struct rchan_buf *buf,
     67				 void *subbuf,
     68				 void *prev_subbuf,
     69				 size_t prev_padding)
     70{
     71	/*
     72	 * Use no-overwrite mode by default, where relay will stop accepting
     73	 * new data if there are no empty sub buffers left.
     74	 * There is no strict synchronization enforced by relay between Consumer
     75	 * and Producer. In overwrite mode, there is a possibility of getting
     76	 * inconsistent/garbled data, the producer could be writing on to the
     77	 * same sub buffer from which Consumer is reading. This can't be avoided
     78	 * unless Consumer is fast enough and can always run in tandem with
     79	 * Producer.
     80	 */
     81	if (relay_buf_full(buf))
     82		return 0;
     83
     84	return 1;
     85}
     86
     87/*
     88 * file_create() callback. Creates relay file in debugfs.
     89 */
     90static struct dentry *create_buf_file_callback(const char *filename,
     91					       struct dentry *parent,
     92					       umode_t mode,
     93					       struct rchan_buf *buf,
     94					       int *is_global)
     95{
     96	struct dentry *buf_file;
     97
     98	/*
     99	 * This to enable the use of a single buffer for the relay channel and
    100	 * correspondingly have a single file exposed to User, through which
    101	 * it can collect the logs in order without any post-processing.
    102	 * Need to set 'is_global' even if parent is NULL for early logging.
    103	 */
    104	*is_global = 1;
    105
    106	if (!parent)
    107		return NULL;
    108
    109	buf_file = debugfs_create_file(filename, mode,
    110				       parent, buf, &relay_file_operations);
    111	if (IS_ERR(buf_file))
    112		return NULL;
    113
    114	return buf_file;
    115}
    116
    117/*
    118 * file_remove() default callback. Removes relay file in debugfs.
    119 */
    120static int remove_buf_file_callback(struct dentry *dentry)
    121{
    122	debugfs_remove(dentry);
    123	return 0;
    124}
    125
    126/* relay channel callbacks */
    127static const struct rchan_callbacks relay_callbacks = {
    128	.subbuf_start = subbuf_start_callback,
    129	.create_buf_file = create_buf_file_callback,
    130	.remove_buf_file = remove_buf_file_callback,
    131};
    132
    133static void guc_move_to_next_buf(struct intel_guc_log *log)
    134{
    135	/*
    136	 * Make sure the updates made in the sub buffer are visible when
    137	 * Consumer sees the following update to offset inside the sub buffer.
    138	 */
    139	smp_wmb();
    140
    141	/* All data has been written, so now move the offset of sub buffer. */
    142	relay_reserve(log->relay.channel, log->vma->obj->base.size - CAPTURE_BUFFER_SIZE);
    143
    144	/* Switch to the next sub buffer */
    145	relay_flush(log->relay.channel);
    146}
    147
    148static void *guc_get_write_buffer(struct intel_guc_log *log)
    149{
    150	/*
    151	 * Just get the base address of a new sub buffer and copy data into it
    152	 * ourselves. NULL will be returned in no-overwrite mode, if all sub
    153	 * buffers are full. Could have used the relay_write() to indirectly
    154	 * copy the data, but that would have been bit convoluted, as we need to
    155	 * write to only certain locations inside a sub buffer which cannot be
    156	 * done without using relay_reserve() along with relay_write(). So its
    157	 * better to use relay_reserve() alone.
    158	 */
    159	return relay_reserve(log->relay.channel, 0);
    160}
    161
    162bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log,
    163				      enum guc_log_buffer_type type,
    164				      unsigned int full_cnt)
    165{
    166	unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
    167	bool overflow = false;
    168
    169	if (full_cnt != prev_full_cnt) {
    170		overflow = true;
    171
    172		log->stats[type].overflow = full_cnt;
    173		log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
    174
    175		if (full_cnt < prev_full_cnt) {
    176			/* buffer_full_cnt is a 4 bit counter */
    177			log->stats[type].sampled_overflow += 16;
    178		}
    179
    180		dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
    181				       "GuC log buffer overflow\n");
    182	}
    183
    184	return overflow;
    185}
    186
    187unsigned int intel_guc_get_log_buffer_size(enum guc_log_buffer_type type)
    188{
    189	switch (type) {
    190	case GUC_DEBUG_LOG_BUFFER:
    191		return DEBUG_BUFFER_SIZE;
    192	case GUC_CRASH_DUMP_LOG_BUFFER:
    193		return CRASH_BUFFER_SIZE;
    194	case GUC_CAPTURE_LOG_BUFFER:
    195		return CAPTURE_BUFFER_SIZE;
    196	default:
    197		MISSING_CASE(type);
    198	}
    199
    200	return 0;
    201}
    202
    203size_t intel_guc_get_log_buffer_offset(enum guc_log_buffer_type type)
    204{
    205	enum guc_log_buffer_type i;
    206	size_t offset = PAGE_SIZE;/* for the log_buffer_states */
    207
    208	for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) {
    209		if (i == type)
    210			break;
    211		offset += intel_guc_get_log_buffer_size(i);
    212	}
    213
    214	return offset;
    215}
    216
    217static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
    218{
    219	unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
    220	struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
    221	struct guc_log_buffer_state log_buf_state_local;
    222	enum guc_log_buffer_type type;
    223	void *src_data, *dst_data;
    224	bool new_overflow;
    225
    226	mutex_lock(&log->relay.lock);
    227
    228	if (WARN_ON(!intel_guc_log_relay_created(log)))
    229		goto out_unlock;
    230
    231	/* Get the pointer to shared GuC log buffer */
    232	src_data = log->buf_addr;
    233	log_buf_state = src_data;
    234
    235	/* Get the pointer to local buffer to store the logs */
    236	log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
    237
    238	if (unlikely(!log_buf_snapshot_state)) {
    239		/*
    240		 * Used rate limited to avoid deluge of messages, logs might be
    241		 * getting consumed by User at a slow rate.
    242		 */
    243		DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n");
    244		log->relay.full_count++;
    245
    246		goto out_unlock;
    247	}
    248
    249	/* Actual logs are present from the 2nd page */
    250	src_data += PAGE_SIZE;
    251	dst_data += PAGE_SIZE;
    252
    253	/* For relay logging, we exclude error state capture */
    254	for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) {
    255		/*
    256		 * Make a copy of the state structure, inside GuC log buffer
    257		 * (which is uncached mapped), on the stack to avoid reading
    258		 * from it multiple times.
    259		 */
    260		memcpy(&log_buf_state_local, log_buf_state,
    261		       sizeof(struct guc_log_buffer_state));
    262		buffer_size = intel_guc_get_log_buffer_size(type);
    263		read_offset = log_buf_state_local.read_ptr;
    264		write_offset = log_buf_state_local.sampled_write_ptr;
    265		full_cnt = log_buf_state_local.buffer_full_cnt;
    266
    267		/* Bookkeeping stuff */
    268		log->stats[type].flush += log_buf_state_local.flush_to_file;
    269		new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt);
    270
    271		/* Update the state of shared log buffer */
    272		log_buf_state->read_ptr = write_offset;
    273		log_buf_state->flush_to_file = 0;
    274		log_buf_state++;
    275
    276		/* First copy the state structure in snapshot buffer */
    277		memcpy(log_buf_snapshot_state, &log_buf_state_local,
    278		       sizeof(struct guc_log_buffer_state));
    279
    280		/*
    281		 * The write pointer could have been updated by GuC firmware,
    282		 * after sending the flush interrupt to Host, for consistency
    283		 * set write pointer value to same value of sampled_write_ptr
    284		 * in the snapshot buffer.
    285		 */
    286		log_buf_snapshot_state->write_ptr = write_offset;
    287		log_buf_snapshot_state++;
    288
    289		/* Now copy the actual logs. */
    290		if (unlikely(new_overflow)) {
    291			/* copy the whole buffer in case of overflow */
    292			read_offset = 0;
    293			write_offset = buffer_size;
    294		} else if (unlikely((read_offset > buffer_size) ||
    295				    (write_offset > buffer_size))) {
    296			DRM_ERROR("invalid log buffer state\n");
    297			/* copy whole buffer as offsets are unreliable */
    298			read_offset = 0;
    299			write_offset = buffer_size;
    300		}
    301
    302		/* Just copy the newly written data */
    303		if (read_offset > write_offset) {
    304			i915_memcpy_from_wc(dst_data, src_data, write_offset);
    305			bytes_to_copy = buffer_size - read_offset;
    306		} else {
    307			bytes_to_copy = write_offset - read_offset;
    308		}
    309		i915_memcpy_from_wc(dst_data + read_offset,
    310				    src_data + read_offset, bytes_to_copy);
    311
    312		src_data += buffer_size;
    313		dst_data += buffer_size;
    314	}
    315
    316	guc_move_to_next_buf(log);
    317
    318out_unlock:
    319	mutex_unlock(&log->relay.lock);
    320}
    321
    322static void copy_debug_logs_work(struct work_struct *work)
    323{
    324	struct intel_guc_log *log =
    325		container_of(work, struct intel_guc_log, relay.flush_work);
    326
    327	guc_log_copy_debuglogs_for_relay(log);
    328}
    329
    330static int guc_log_relay_map(struct intel_guc_log *log)
    331{
    332	lockdep_assert_held(&log->relay.lock);
    333
    334	if (!log->vma || !log->buf_addr)
    335		return -ENODEV;
    336
    337	/*
    338	 * WC vmalloc mapping of log buffer pages was done at
    339	 * GuC Log Init time, but lets keep a ref for book-keeping
    340	 */
    341	i915_gem_object_get(log->vma->obj);
    342	log->relay.buf_in_use = true;
    343
    344	return 0;
    345}
    346
    347static void guc_log_relay_unmap(struct intel_guc_log *log)
    348{
    349	lockdep_assert_held(&log->relay.lock);
    350
    351	i915_gem_object_put(log->vma->obj);
    352	log->relay.buf_in_use = false;
    353}
    354
    355void intel_guc_log_init_early(struct intel_guc_log *log)
    356{
    357	mutex_init(&log->relay.lock);
    358	INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
    359	log->relay.started = false;
    360}
    361
    362static int guc_log_relay_create(struct intel_guc_log *log)
    363{
    364	struct intel_guc *guc = log_to_guc(log);
    365	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
    366	struct rchan *guc_log_relay_chan;
    367	size_t n_subbufs, subbuf_size;
    368	int ret;
    369
    370	lockdep_assert_held(&log->relay.lock);
    371	GEM_BUG_ON(!log->vma);
    372
    373	 /*
    374	  * Keep the size of sub buffers same as shared log buffer
    375	  * but GuC log-events excludes the error-state-capture logs
    376	  */
    377	subbuf_size = log->vma->size - CAPTURE_BUFFER_SIZE;
    378
    379	/*
    380	 * Store up to 8 snapshots, which is large enough to buffer sufficient
    381	 * boot time logs and provides enough leeway to User, in terms of
    382	 * latency, for consuming the logs from relay. Also doesn't take
    383	 * up too much memory.
    384	 */
    385	n_subbufs = 8;
    386
    387	guc_log_relay_chan = relay_open("guc_log",
    388					dev_priv->drm.primary->debugfs_root,
    389					subbuf_size, n_subbufs,
    390					&relay_callbacks, dev_priv);
    391	if (!guc_log_relay_chan) {
    392		DRM_ERROR("Couldn't create relay chan for GuC logging\n");
    393
    394		ret = -ENOMEM;
    395		return ret;
    396	}
    397
    398	GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
    399	log->relay.channel = guc_log_relay_chan;
    400
    401	return 0;
    402}
    403
    404static void guc_log_relay_destroy(struct intel_guc_log *log)
    405{
    406	lockdep_assert_held(&log->relay.lock);
    407
    408	relay_close(log->relay.channel);
    409	log->relay.channel = NULL;
    410}
    411
    412static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
    413{
    414	struct intel_guc *guc = log_to_guc(log);
    415	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
    416	intel_wakeref_t wakeref;
    417
    418	_guc_log_copy_debuglogs_for_relay(log);
    419
    420	/*
    421	 * Generally device is expected to be active only at this
    422	 * time, so get/put should be really quick.
    423	 */
    424	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
    425		guc_action_flush_log_complete(guc);
    426}
    427
    428static u32 __get_default_log_level(struct intel_guc_log *log)
    429{
    430	struct intel_guc *guc = log_to_guc(log);
    431	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
    432
    433	/* A negative value means "use platform/config default" */
    434	if (i915->params.guc_log_level < 0) {
    435		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
    436			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
    437			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
    438	}
    439
    440	if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
    441		DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
    442			 "guc_log_level", i915->params.guc_log_level,
    443			 "verbosity too high");
    444		return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
    445			IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
    446			GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
    447	}
    448
    449	GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED);
    450	GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX);
    451	return i915->params.guc_log_level;
    452}
    453
    454int intel_guc_log_create(struct intel_guc_log *log)
    455{
    456	struct intel_guc *guc = log_to_guc(log);
    457	struct i915_vma *vma;
    458	void *vaddr;
    459	u32 guc_log_size;
    460	int ret;
    461
    462	GEM_BUG_ON(log->vma);
    463
    464	/*
    465	 *  GuC Log buffer Layout
    466	 * (this ordering must follow "enum guc_log_buffer_type" definition)
    467	 *
    468	 *  +===============================+ 00B
    469	 *  |      Debug state header       |
    470	 *  +-------------------------------+ 32B
    471	 *  |    Crash dump state header    |
    472	 *  +-------------------------------+ 64B
    473	 *  |     Capture state header      |
    474	 *  +-------------------------------+ 96B
    475	 *  |                               |
    476	 *  +===============================+ PAGE_SIZE (4KB)
    477	 *  |          Debug logs           |
    478	 *  +===============================+ + DEBUG_SIZE
    479	 *  |        Crash Dump logs        |
    480	 *  +===============================+ + CRASH_SIZE
    481	 *  |         Capture logs          |
    482	 *  +===============================+ + CAPTURE_SIZE
    483	 */
    484	if (intel_guc_capture_output_min_size_est(guc) > CAPTURE_BUFFER_SIZE)
    485		DRM_WARN("GuC log buffer for state_capture maybe too small. %d < %d\n",
    486			 CAPTURE_BUFFER_SIZE, intel_guc_capture_output_min_size_est(guc));
    487
    488	guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE +
    489		       CAPTURE_BUFFER_SIZE;
    490
    491	vma = intel_guc_allocate_vma(guc, guc_log_size);
    492	if (IS_ERR(vma)) {
    493		ret = PTR_ERR(vma);
    494		goto err;
    495	}
    496
    497	log->vma = vma;
    498	/*
    499	 * Create a WC (Uncached for read) vmalloc mapping up front immediate access to
    500	 * data from memory during  critical events such as error capture
    501	 */
    502	vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC);
    503	if (IS_ERR(vaddr)) {
    504		ret = PTR_ERR(vaddr);
    505		i915_vma_unpin_and_release(&log->vma, 0);
    506		goto err;
    507	}
    508	log->buf_addr = vaddr;
    509
    510	log->level = __get_default_log_level(log);
    511	DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
    512			 log->level, str_enabled_disabled(log->level),
    513			 str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
    514			 GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
    515
    516	return 0;
    517
    518err:
    519	DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret);
    520	return ret;
    521}
    522
    523void intel_guc_log_destroy(struct intel_guc_log *log)
    524{
    525	log->buf_addr = NULL;
    526	i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP);
    527}
    528
    529int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
    530{
    531	struct intel_guc *guc = log_to_guc(log);
    532	struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
    533	intel_wakeref_t wakeref;
    534	int ret = 0;
    535
    536	BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
    537	GEM_BUG_ON(!log->vma);
    538
    539	/*
    540	 * GuC is recognizing log levels starting from 0 to max, we're using 0
    541	 * as indication that logging should be disabled.
    542	 */
    543	if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
    544		return -EINVAL;
    545
    546	mutex_lock(&dev_priv->drm.struct_mutex);
    547
    548	if (log->level == level)
    549		goto out_unlock;
    550
    551	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
    552		ret = guc_action_control_log(guc,
    553					     GUC_LOG_LEVEL_IS_VERBOSE(level),
    554					     GUC_LOG_LEVEL_IS_ENABLED(level),
    555					     GUC_LOG_LEVEL_TO_VERBOSITY(level));
    556	if (ret) {
    557		DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
    558		goto out_unlock;
    559	}
    560
    561	log->level = level;
    562
    563out_unlock:
    564	mutex_unlock(&dev_priv->drm.struct_mutex);
    565
    566	return ret;
    567}
    568
    569bool intel_guc_log_relay_created(const struct intel_guc_log *log)
    570{
    571	return log->buf_addr;
    572}
    573
    574int intel_guc_log_relay_open(struct intel_guc_log *log)
    575{
    576	int ret;
    577
    578	if (!log->vma)
    579		return -ENODEV;
    580
    581	mutex_lock(&log->relay.lock);
    582
    583	if (intel_guc_log_relay_created(log)) {
    584		ret = -EEXIST;
    585		goto out_unlock;
    586	}
    587
    588	/*
    589	 * We require SSE 4.1 for fast reads from the GuC log buffer and
    590	 * it should be present on the chipsets supporting GuC based
    591	 * submisssions.
    592	 */
    593	if (!i915_has_memcpy_from_wc()) {
    594		ret = -ENXIO;
    595		goto out_unlock;
    596	}
    597
    598	ret = guc_log_relay_create(log);
    599	if (ret)
    600		goto out_unlock;
    601
    602	ret = guc_log_relay_map(log);
    603	if (ret)
    604		goto out_relay;
    605
    606	mutex_unlock(&log->relay.lock);
    607
    608	return 0;
    609
    610out_relay:
    611	guc_log_relay_destroy(log);
    612out_unlock:
    613	mutex_unlock(&log->relay.lock);
    614
    615	return ret;
    616}
    617
    618int intel_guc_log_relay_start(struct intel_guc_log *log)
    619{
    620	if (log->relay.started)
    621		return -EEXIST;
    622
    623	/*
    624	 * When GuC is logging without us relaying to userspace, we're ignoring
    625	 * the flush notification. This means that we need to unconditionally
    626	 * flush on relay enabling, since GuC only notifies us once.
    627	 */
    628	queue_work(system_highpri_wq, &log->relay.flush_work);
    629
    630	log->relay.started = true;
    631
    632	return 0;
    633}
    634
    635void intel_guc_log_relay_flush(struct intel_guc_log *log)
    636{
    637	struct intel_guc *guc = log_to_guc(log);
    638	intel_wakeref_t wakeref;
    639
    640	if (!log->relay.started)
    641		return;
    642
    643	/*
    644	 * Before initiating the forceful flush, wait for any pending/ongoing
    645	 * flush to complete otherwise forceful flush may not actually happen.
    646	 */
    647	flush_work(&log->relay.flush_work);
    648
    649	with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
    650		guc_action_flush_log(guc);
    651
    652	/* GuC would have updated log buffer by now, so copy it */
    653	guc_log_copy_debuglogs_for_relay(log);
    654}
    655
    656/*
    657 * Stops the relay log. Called from intel_guc_log_relay_close(), so no
    658 * possibility of race with start/flush since relay_write cannot race
    659 * relay_close.
    660 */
    661static void guc_log_relay_stop(struct intel_guc_log *log)
    662{
    663	struct intel_guc *guc = log_to_guc(log);
    664	struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
    665
    666	if (!log->relay.started)
    667		return;
    668
    669	intel_synchronize_irq(i915);
    670
    671	flush_work(&log->relay.flush_work);
    672
    673	log->relay.started = false;
    674}
    675
    676void intel_guc_log_relay_close(struct intel_guc_log *log)
    677{
    678	guc_log_relay_stop(log);
    679
    680	mutex_lock(&log->relay.lock);
    681	GEM_BUG_ON(!intel_guc_log_relay_created(log));
    682	guc_log_relay_unmap(log);
    683	guc_log_relay_destroy(log);
    684	mutex_unlock(&log->relay.lock);
    685}
    686
    687void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
    688{
    689	if (log->relay.started)
    690		queue_work(system_highpri_wq, &log->relay.flush_work);
    691}
    692
    693static const char *
    694stringify_guc_log_type(enum guc_log_buffer_type type)
    695{
    696	switch (type) {
    697	case GUC_DEBUG_LOG_BUFFER:
    698		return "DEBUG";
    699	case GUC_CRASH_DUMP_LOG_BUFFER:
    700		return "CRASH";
    701	case GUC_CAPTURE_LOG_BUFFER:
    702		return "CAPTURE";
    703	default:
    704		MISSING_CASE(type);
    705	}
    706
    707	return "";
    708}
    709
    710/**
    711 * intel_guc_log_info - dump information about GuC log relay
    712 * @log: the GuC log
    713 * @p: the &drm_printer
    714 *
    715 * Pretty printer for GuC log info
    716 */
    717void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
    718{
    719	enum guc_log_buffer_type type;
    720
    721	if (!intel_guc_log_relay_created(log)) {
    722		drm_puts(p, "GuC log relay not created\n");
    723		return;
    724	}
    725
    726	drm_puts(p, "GuC logging stats:\n");
    727
    728	drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
    729
    730	for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
    731		drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
    732			   stringify_guc_log_type(type),
    733			   log->stats[type].flush,
    734			   log->stats[type].sampled_overflow);
    735	}
    736}
    737
    738/**
    739 * intel_guc_log_dump - dump the contents of the GuC log
    740 * @log: the GuC log
    741 * @p: the &drm_printer
    742 * @dump_load_err: dump the log saved on GuC load error
    743 *
    744 * Pretty printer for the GuC log
    745 */
    746int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
    747		       bool dump_load_err)
    748{
    749	struct intel_guc *guc = log_to_guc(log);
    750	struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
    751	struct drm_i915_gem_object *obj = NULL;
    752	u32 *map;
    753	int i = 0;
    754
    755	if (!intel_guc_is_supported(guc))
    756		return -ENODEV;
    757
    758	if (dump_load_err)
    759		obj = uc->load_err_log;
    760	else if (guc->log.vma)
    761		obj = guc->log.vma->obj;
    762
    763	if (!obj)
    764		return 0;
    765
    766	map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
    767	if (IS_ERR(map)) {
    768		DRM_DEBUG("Failed to pin object\n");
    769		drm_puts(p, "(log data unaccessible)\n");
    770		return PTR_ERR(map);
    771	}
    772
    773	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
    774		drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
    775			   *(map + i), *(map + i + 1),
    776			   *(map + i + 2), *(map + i + 3));
    777
    778	drm_puts(p, "\n");
    779
    780	i915_gem_object_unpin_map(obj);
    781
    782	return 0;
    783}