cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

intel_guc_ct.c (32821B)


      1// SPDX-License-Identifier: MIT
      2/*
      3 * Copyright © 2016-2019 Intel Corporation
      4 */
      5
      6#include <linux/circ_buf.h>
      7#include <linux/ktime.h>
      8#include <linux/time64.h>
      9#include <linux/string_helpers.h>
     10#include <linux/timekeeping.h>
     11
     12#include "i915_drv.h"
     13#include "intel_guc_ct.h"
     14#include "gt/intel_gt.h"
     15
     16static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
     17{
     18	return container_of(ct, struct intel_guc, ct);
     19}
     20
     21static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
     22{
     23	return guc_to_gt(ct_to_guc(ct));
     24}
     25
     26static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
     27{
     28	return ct_to_gt(ct)->i915;
     29}
     30
     31static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
     32{
     33	return &ct_to_i915(ct)->drm;
     34}
     35
     36#define CT_ERROR(_ct, _fmt, ...) \
     37	drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
     38#ifdef CONFIG_DRM_I915_DEBUG_GUC
     39#define CT_DEBUG(_ct, _fmt, ...) \
     40	drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
     41#else
     42#define CT_DEBUG(...)	do { } while (0)
     43#endif
     44#define CT_PROBE_ERROR(_ct, _fmt, ...) \
     45	i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
     46
     47/**
     48 * DOC: CTB Blob
     49 *
     50 * We allocate single blob to hold both CTB descriptors and buffers:
     51 *
     52 *      +--------+-----------------------------------------------+------+
     53 *      | offset | contents                                      | size |
     54 *      +========+===============================================+======+
     55 *      | 0x0000 | H2G `CTB Descriptor`_ (send)                  |      |
     56 *      +--------+-----------------------------------------------+  4K  |
     57 *      | 0x0800 | G2H `CTB Descriptor`_ (recv)                  |      |
     58 *      +--------+-----------------------------------------------+------+
     59 *      | 0x1000 | H2G `CT Buffer`_ (send)                       | n*4K |
     60 *      |        |                                               |      |
     61 *      +--------+-----------------------------------------------+------+
     62 *      | 0x1000 | G2H `CT Buffer`_ (recv)                       | m*4K |
     63 *      | + n*4K |                                               |      |
     64 *      +--------+-----------------------------------------------+------+
     65 *
     66 * Size of each `CT Buffer`_ must be multiple of 4K.
     67 * We don't expect too many messages in flight at any time, unless we are
     68 * using the GuC submission. In that case each request requires a minimum
     69 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
     70 * enough space to avoid backpressure on the driver. We increase the size
     71 * of the receive buffer (relative to the send) to ensure a G2H response
     72 * CTB has a landing spot.
     73 */
     74#define CTB_DESC_SIZE		ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
     75#define CTB_H2G_BUFFER_SIZE	(SZ_4K)
     76#define CTB_G2H_BUFFER_SIZE	(4 * CTB_H2G_BUFFER_SIZE)
     77#define G2H_ROOM_BUFFER_SIZE	(CTB_G2H_BUFFER_SIZE / 4)
     78
     79struct ct_request {
     80	struct list_head link;
     81	u32 fence;
     82	u32 status;
     83	u32 response_len;
     84	u32 *response_buf;
     85};
     86
     87struct ct_incoming_msg {
     88	struct list_head link;
     89	u32 size;
     90	u32 msg[];
     91};
     92
     93enum { CTB_SEND = 0, CTB_RECV = 1 };
     94
     95enum { CTB_OWNER_HOST = 0 };
     96
     97static void ct_receive_tasklet_func(struct tasklet_struct *t);
     98static void ct_incoming_request_worker_func(struct work_struct *w);
     99
    100/**
    101 * intel_guc_ct_init_early - Initialize CT state without requiring device access
    102 * @ct: pointer to CT struct
    103 */
    104void intel_guc_ct_init_early(struct intel_guc_ct *ct)
    105{
    106	spin_lock_init(&ct->ctbs.send.lock);
    107	spin_lock_init(&ct->ctbs.recv.lock);
    108	spin_lock_init(&ct->requests.lock);
    109	INIT_LIST_HEAD(&ct->requests.pending);
    110	INIT_LIST_HEAD(&ct->requests.incoming);
    111	INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
    112	tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
    113	init_waitqueue_head(&ct->wq);
    114}
    115
    116static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
    117{
    118	memset(desc, 0, sizeof(*desc));
    119}
    120
    121static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
    122{
    123	u32 space;
    124
    125	ctb->broken = false;
    126	ctb->tail = 0;
    127	ctb->head = 0;
    128	space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
    129	atomic_set(&ctb->space, space);
    130
    131	guc_ct_buffer_desc_init(ctb->desc);
    132}
    133
    134static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
    135			       struct guc_ct_buffer_desc *desc,
    136			       u32 *cmds, u32 size_in_bytes, u32 resv_space)
    137{
    138	GEM_BUG_ON(size_in_bytes % 4);
    139
    140	ctb->desc = desc;
    141	ctb->cmds = cmds;
    142	ctb->size = size_in_bytes / 4;
    143	ctb->resv_space = resv_space / 4;
    144
    145	guc_ct_buffer_reset(ctb);
    146}
    147
    148static int guc_action_control_ctb(struct intel_guc *guc, u32 control)
    149{
    150	u32 request[HOST2GUC_CONTROL_CTB_REQUEST_MSG_LEN] = {
    151		FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
    152		FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
    153		FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_CONTROL_CTB),
    154		FIELD_PREP(HOST2GUC_CONTROL_CTB_REQUEST_MSG_1_CONTROL, control),
    155	};
    156	int ret;
    157
    158	GEM_BUG_ON(control != GUC_CTB_CONTROL_DISABLE && control != GUC_CTB_CONTROL_ENABLE);
    159
    160	/* CT control must go over MMIO */
    161	ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
    162
    163	return ret > 0 ? -EPROTO : ret;
    164}
    165
    166static int ct_control_enable(struct intel_guc_ct *ct, bool enable)
    167{
    168	int err;
    169
    170	err = guc_action_control_ctb(ct_to_guc(ct), enable ?
    171				     GUC_CTB_CONTROL_ENABLE : GUC_CTB_CONTROL_DISABLE);
    172	if (unlikely(err))
    173		CT_PROBE_ERROR(ct, "Failed to control/%s CTB (%pe)\n",
    174			       str_enable_disable(enable), ERR_PTR(err));
    175
    176	return err;
    177}
    178
    179static int ct_register_buffer(struct intel_guc_ct *ct, bool send,
    180			      u32 desc_addr, u32 buff_addr, u32 size)
    181{
    182	int err;
    183
    184	err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
    185				   GUC_KLV_SELF_CFG_H2G_CTB_DESCRIPTOR_ADDR_KEY :
    186				   GUC_KLV_SELF_CFG_G2H_CTB_DESCRIPTOR_ADDR_KEY,
    187				   desc_addr);
    188	if (unlikely(err))
    189		goto failed;
    190
    191	err = intel_guc_self_cfg64(ct_to_guc(ct), send ?
    192				   GUC_KLV_SELF_CFG_H2G_CTB_ADDR_KEY :
    193				   GUC_KLV_SELF_CFG_G2H_CTB_ADDR_KEY,
    194				   buff_addr);
    195	if (unlikely(err))
    196		goto failed;
    197
    198	err = intel_guc_self_cfg32(ct_to_guc(ct), send ?
    199				   GUC_KLV_SELF_CFG_H2G_CTB_SIZE_KEY :
    200				   GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY,
    201				   size);
    202	if (unlikely(err))
    203failed:
    204		CT_PROBE_ERROR(ct, "Failed to register %s buffer (%pe)\n",
    205			       send ? "SEND" : "RECV", ERR_PTR(err));
    206
    207	return err;
    208}
    209
    210/**
    211 * intel_guc_ct_init - Init buffer-based communication
    212 * @ct: pointer to CT struct
    213 *
    214 * Allocate memory required for buffer-based communication.
    215 *
    216 * Return: 0 on success, a negative errno code on failure.
    217 */
    218int intel_guc_ct_init(struct intel_guc_ct *ct)
    219{
    220	struct intel_guc *guc = ct_to_guc(ct);
    221	struct guc_ct_buffer_desc *desc;
    222	u32 blob_size;
    223	u32 cmds_size;
    224	u32 resv_space;
    225	void *blob;
    226	u32 *cmds;
    227	int err;
    228
    229	err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
    230	if (err)
    231		return err;
    232
    233	GEM_BUG_ON(ct->vma);
    234
    235	blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
    236	err = intel_guc_allocate_and_map_vma(guc, blob_size, &ct->vma, &blob);
    237	if (unlikely(err)) {
    238		CT_PROBE_ERROR(ct, "Failed to allocate %u for CTB data (%pe)\n",
    239			       blob_size, ERR_PTR(err));
    240		return err;
    241	}
    242
    243	CT_DEBUG(ct, "base=%#x size=%u\n", intel_guc_ggtt_offset(guc, ct->vma), blob_size);
    244
    245	/* store pointers to desc and cmds for send ctb */
    246	desc = blob;
    247	cmds = blob + 2 * CTB_DESC_SIZE;
    248	cmds_size = CTB_H2G_BUFFER_SIZE;
    249	resv_space = 0;
    250	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
    251		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
    252		 resv_space);
    253
    254	guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
    255
    256	/* store pointers to desc and cmds for recv ctb */
    257	desc = blob + CTB_DESC_SIZE;
    258	cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
    259	cmds_size = CTB_G2H_BUFFER_SIZE;
    260	resv_space = G2H_ROOM_BUFFER_SIZE;
    261	CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
    262		 ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
    263		 resv_space);
    264
    265	guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
    266
    267	return 0;
    268}
    269
    270/**
    271 * intel_guc_ct_fini - Fini buffer-based communication
    272 * @ct: pointer to CT struct
    273 *
    274 * Deallocate memory required for buffer-based communication.
    275 */
    276void intel_guc_ct_fini(struct intel_guc_ct *ct)
    277{
    278	GEM_BUG_ON(ct->enabled);
    279
    280	tasklet_kill(&ct->receive_tasklet);
    281	i915_vma_unpin_and_release(&ct->vma, I915_VMA_RELEASE_MAP);
    282	memset(ct, 0, sizeof(*ct));
    283}
    284
    285/**
    286 * intel_guc_ct_enable - Enable buffer based command transport.
    287 * @ct: pointer to CT struct
    288 *
    289 * Return: 0 on success, a negative errno code on failure.
    290 */
    291int intel_guc_ct_enable(struct intel_guc_ct *ct)
    292{
    293	struct intel_guc *guc = ct_to_guc(ct);
    294	u32 base, desc, cmds, size;
    295	void *blob;
    296	int err;
    297
    298	GEM_BUG_ON(ct->enabled);
    299
    300	/* vma should be already allocated and map'ed */
    301	GEM_BUG_ON(!ct->vma);
    302	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(ct->vma->obj));
    303	base = intel_guc_ggtt_offset(guc, ct->vma);
    304
    305	/* blob should start with send descriptor */
    306	blob = __px_vaddr(ct->vma->obj);
    307	GEM_BUG_ON(blob != ct->ctbs.send.desc);
    308
    309	/* (re)initialize descriptors */
    310	guc_ct_buffer_reset(&ct->ctbs.send);
    311	guc_ct_buffer_reset(&ct->ctbs.recv);
    312
    313	/*
    314	 * Register both CT buffers starting with RECV buffer.
    315	 * Descriptors are in first half of the blob.
    316	 */
    317	desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
    318	cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
    319	size = ct->ctbs.recv.size * 4;
    320	err = ct_register_buffer(ct, false, desc, cmds, size);
    321	if (unlikely(err))
    322		goto err_out;
    323
    324	desc = base + ptrdiff(ct->ctbs.send.desc, blob);
    325	cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
    326	size = ct->ctbs.send.size * 4;
    327	err = ct_register_buffer(ct, true, desc, cmds, size);
    328	if (unlikely(err))
    329		goto err_out;
    330
    331	err = ct_control_enable(ct, true);
    332	if (unlikely(err))
    333		goto err_out;
    334
    335	ct->enabled = true;
    336	ct->stall_time = KTIME_MAX;
    337
    338	return 0;
    339
    340err_out:
    341	CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
    342	return err;
    343}
    344
    345/**
    346 * intel_guc_ct_disable - Disable buffer based command transport.
    347 * @ct: pointer to CT struct
    348 */
    349void intel_guc_ct_disable(struct intel_guc_ct *ct)
    350{
    351	struct intel_guc *guc = ct_to_guc(ct);
    352
    353	GEM_BUG_ON(!ct->enabled);
    354
    355	ct->enabled = false;
    356
    357	if (intel_guc_is_fw_running(guc)) {
    358		ct_control_enable(ct, false);
    359	}
    360}
    361
    362static u32 ct_get_next_fence(struct intel_guc_ct *ct)
    363{
    364	/* For now it's trivial */
    365	return ++ct->requests.last_fence;
    366}
    367
    368static int ct_write(struct intel_guc_ct *ct,
    369		    const u32 *action,
    370		    u32 len /* in dwords */,
    371		    u32 fence, u32 flags)
    372{
    373	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
    374	struct guc_ct_buffer_desc *desc = ctb->desc;
    375	u32 tail = ctb->tail;
    376	u32 size = ctb->size;
    377	u32 header;
    378	u32 hxg;
    379	u32 type;
    380	u32 *cmds = ctb->cmds;
    381	unsigned int i;
    382
    383	if (unlikely(desc->status))
    384		goto corrupted;
    385
    386	GEM_BUG_ON(tail > size);
    387
    388#ifdef CONFIG_DRM_I915_DEBUG_GUC
    389	if (unlikely(tail != READ_ONCE(desc->tail))) {
    390		CT_ERROR(ct, "Tail was modified %u != %u\n",
    391			 desc->tail, tail);
    392		desc->status |= GUC_CTB_STATUS_MISMATCH;
    393		goto corrupted;
    394	}
    395	if (unlikely(READ_ONCE(desc->head) >= size)) {
    396		CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
    397			 desc->head, size);
    398		desc->status |= GUC_CTB_STATUS_OVERFLOW;
    399		goto corrupted;
    400	}
    401#endif
    402
    403	/*
    404	 * dw0: CT header (including fence)
    405	 * dw1: HXG header (including action code)
    406	 * dw2+: action data
    407	 */
    408	header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
    409		 FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
    410		 FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
    411
    412	type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
    413		GUC_HXG_TYPE_REQUEST;
    414	hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
    415		FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
    416			   GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
    417
    418	CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
    419		 tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
    420
    421	cmds[tail] = header;
    422	tail = (tail + 1) % size;
    423
    424	cmds[tail] = hxg;
    425	tail = (tail + 1) % size;
    426
    427	for (i = 1; i < len; i++) {
    428		cmds[tail] = action[i];
    429		tail = (tail + 1) % size;
    430	}
    431	GEM_BUG_ON(tail > size);
    432
    433	/*
    434	 * make sure H2G buffer update and LRC tail update (if this triggering a
    435	 * submission) are visible before updating the descriptor tail
    436	 */
    437	intel_guc_write_barrier(ct_to_guc(ct));
    438
    439	/* update local copies */
    440	ctb->tail = tail;
    441	GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
    442	atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
    443
    444	/* now update descriptor */
    445	WRITE_ONCE(desc->tail, tail);
    446
    447	return 0;
    448
    449corrupted:
    450	CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
    451		 desc->head, desc->tail, desc->status);
    452	ctb->broken = true;
    453	return -EPIPE;
    454}
    455
    456/**
    457 * wait_for_ct_request_update - Wait for CT request state update.
    458 * @req:	pointer to pending request
    459 * @status:	placeholder for status
    460 *
    461 * For each sent request, GuC shall send back CT response message.
    462 * Our message handler will update status of tracked request once
    463 * response message with given fence is received. Wait here and
    464 * check for valid response status value.
    465 *
    466 * Return:
    467 * *	0 response received (status is valid)
    468 * *	-ETIMEDOUT no response within hardcoded timeout
    469 */
    470static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
    471{
    472	int err;
    473
    474	/*
    475	 * Fast commands should complete in less than 10us, so sample quickly
    476	 * up to that length of time, then switch to a slower sleep-wait loop.
    477	 * No GuC command should ever take longer than 10ms but many GuC
    478	 * commands can be inflight at time, so use a 1s timeout on the slower
    479	 * sleep-wait loop.
    480	 */
    481#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
    482#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
    483#define done \
    484	(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
    485	 GUC_HXG_ORIGIN_GUC)
    486	err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
    487	if (err)
    488		err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
    489#undef done
    490
    491	*status = req->status;
    492	return err;
    493}
    494
    495#define GUC_CTB_TIMEOUT_MS	1500
    496static inline bool ct_deadlocked(struct intel_guc_ct *ct)
    497{
    498	long timeout = GUC_CTB_TIMEOUT_MS;
    499	bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
    500
    501	if (unlikely(ret)) {
    502		struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
    503		struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
    504
    505		CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
    506			 ktime_ms_delta(ktime_get(), ct->stall_time),
    507			 send->status, recv->status);
    508		CT_ERROR(ct, "H2G Space: %u (Bytes)\n",
    509			 atomic_read(&ct->ctbs.send.space) * 4);
    510		CT_ERROR(ct, "Head: %u (Dwords)\n", ct->ctbs.send.desc->head);
    511		CT_ERROR(ct, "Tail: %u (Dwords)\n", ct->ctbs.send.desc->tail);
    512		CT_ERROR(ct, "G2H Space: %u (Bytes)\n",
    513			 atomic_read(&ct->ctbs.recv.space) * 4);
    514		CT_ERROR(ct, "Head: %u\n (Dwords)", ct->ctbs.recv.desc->head);
    515		CT_ERROR(ct, "Tail: %u\n (Dwords)", ct->ctbs.recv.desc->tail);
    516
    517		ct->ctbs.send.broken = true;
    518	}
    519
    520	return ret;
    521}
    522
    523static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
    524{
    525	struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
    526
    527	/*
    528	 * We leave a certain amount of space in the G2H CTB buffer for
    529	 * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
    530	 */
    531	return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
    532}
    533
    534static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
    535{
    536	lockdep_assert_held(&ct->ctbs.send.lock);
    537
    538	GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
    539
    540	if (g2h_len_dw)
    541		atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
    542}
    543
    544static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
    545{
    546	atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
    547}
    548
    549static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
    550{
    551	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
    552	struct guc_ct_buffer_desc *desc = ctb->desc;
    553	u32 head;
    554	u32 space;
    555
    556	if (atomic_read(&ctb->space) >= len_dw)
    557		return true;
    558
    559	head = READ_ONCE(desc->head);
    560	if (unlikely(head > ctb->size)) {
    561		CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
    562			 head, ctb->size);
    563		desc->status |= GUC_CTB_STATUS_OVERFLOW;
    564		ctb->broken = true;
    565		return false;
    566	}
    567
    568	space = CIRC_SPACE(ctb->tail, head, ctb->size);
    569	atomic_set(&ctb->space, space);
    570
    571	return space >= len_dw;
    572}
    573
    574static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
    575{
    576	bool h2g = h2g_has_room(ct, h2g_dw);
    577	bool g2h = g2h_has_room(ct, g2h_dw);
    578
    579	lockdep_assert_held(&ct->ctbs.send.lock);
    580
    581	if (unlikely(!h2g || !g2h)) {
    582		if (ct->stall_time == KTIME_MAX)
    583			ct->stall_time = ktime_get();
    584
    585		/* Be paranoid and kick G2H tasklet to free credits */
    586		if (!g2h)
    587			tasklet_hi_schedule(&ct->receive_tasklet);
    588
    589		if (unlikely(ct_deadlocked(ct)))
    590			return -EPIPE;
    591		else
    592			return -EBUSY;
    593	}
    594
    595	ct->stall_time = KTIME_MAX;
    596	return 0;
    597}
    598
    599#define G2H_LEN_DW(f) ({ \
    600	typeof(f) f_ = (f); \
    601	FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
    602	FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
    603	GUC_CTB_HXG_MSG_MIN_LEN : 0; \
    604})
    605static int ct_send_nb(struct intel_guc_ct *ct,
    606		      const u32 *action,
    607		      u32 len,
    608		      u32 flags)
    609{
    610	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
    611	unsigned long spin_flags;
    612	u32 g2h_len_dw = G2H_LEN_DW(flags);
    613	u32 fence;
    614	int ret;
    615
    616	spin_lock_irqsave(&ctb->lock, spin_flags);
    617
    618	ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
    619	if (unlikely(ret))
    620		goto out;
    621
    622	fence = ct_get_next_fence(ct);
    623	ret = ct_write(ct, action, len, fence, flags);
    624	if (unlikely(ret))
    625		goto out;
    626
    627	g2h_reserve_space(ct, g2h_len_dw);
    628	intel_guc_notify(ct_to_guc(ct));
    629
    630out:
    631	spin_unlock_irqrestore(&ctb->lock, spin_flags);
    632
    633	return ret;
    634}
    635
    636static int ct_send(struct intel_guc_ct *ct,
    637		   const u32 *action,
    638		   u32 len,
    639		   u32 *response_buf,
    640		   u32 response_buf_size,
    641		   u32 *status)
    642{
    643	struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
    644	struct ct_request request;
    645	unsigned long flags;
    646	unsigned int sleep_period_ms = 1;
    647	bool send_again;
    648	u32 fence;
    649	int err;
    650
    651	GEM_BUG_ON(!ct->enabled);
    652	GEM_BUG_ON(!len);
    653	GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
    654	GEM_BUG_ON(!response_buf && response_buf_size);
    655	might_sleep();
    656
    657resend:
    658	send_again = false;
    659
    660	/*
    661	 * We use a lazy spin wait loop here as we believe that if the CT
    662	 * buffers are sized correctly the flow control condition should be
    663	 * rare. Reserving the maximum size in the G2H credits as we don't know
    664	 * how big the response is going to be.
    665	 */
    666retry:
    667	spin_lock_irqsave(&ctb->lock, flags);
    668	if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
    669		     !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
    670		if (ct->stall_time == KTIME_MAX)
    671			ct->stall_time = ktime_get();
    672		spin_unlock_irqrestore(&ctb->lock, flags);
    673
    674		if (unlikely(ct_deadlocked(ct)))
    675			return -EPIPE;
    676
    677		if (msleep_interruptible(sleep_period_ms))
    678			return -EINTR;
    679		sleep_period_ms = sleep_period_ms << 1;
    680
    681		goto retry;
    682	}
    683
    684	ct->stall_time = KTIME_MAX;
    685
    686	fence = ct_get_next_fence(ct);
    687	request.fence = fence;
    688	request.status = 0;
    689	request.response_len = response_buf_size;
    690	request.response_buf = response_buf;
    691
    692	spin_lock(&ct->requests.lock);
    693	list_add_tail(&request.link, &ct->requests.pending);
    694	spin_unlock(&ct->requests.lock);
    695
    696	err = ct_write(ct, action, len, fence, 0);
    697	g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
    698
    699	spin_unlock_irqrestore(&ctb->lock, flags);
    700
    701	if (unlikely(err))
    702		goto unlink;
    703
    704	intel_guc_notify(ct_to_guc(ct));
    705
    706	err = wait_for_ct_request_update(&request, status);
    707	g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
    708	if (unlikely(err)) {
    709		CT_ERROR(ct, "No response for request %#x (fence %u)\n",
    710			 action[0], request.fence);
    711		goto unlink;
    712	}
    713
    714	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
    715		CT_DEBUG(ct, "retrying request %#x (%u)\n", *action,
    716			 FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, *status));
    717		send_again = true;
    718		goto unlink;
    719	}
    720
    721	if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
    722		err = -EIO;
    723		goto unlink;
    724	}
    725
    726	if (response_buf) {
    727		/* There shall be no data in the status */
    728		WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
    729		/* Return actual response len */
    730		err = request.response_len;
    731	} else {
    732		/* There shall be no response payload */
    733		WARN_ON(request.response_len);
    734		/* Return data decoded from the status dword */
    735		err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
    736	}
    737
    738unlink:
    739	spin_lock_irqsave(&ct->requests.lock, flags);
    740	list_del(&request.link);
    741	spin_unlock_irqrestore(&ct->requests.lock, flags);
    742
    743	if (unlikely(send_again))
    744		goto resend;
    745
    746	return err;
    747}
    748
    749/*
    750 * Command Transport (CT) buffer based GuC send function.
    751 */
    752int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
    753		      u32 *response_buf, u32 response_buf_size, u32 flags)
    754{
    755	u32 status = ~0; /* undefined */
    756	int ret;
    757
    758	if (unlikely(!ct->enabled)) {
    759		struct intel_guc *guc = ct_to_guc(ct);
    760		struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
    761
    762		WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
    763		return -ENODEV;
    764	}
    765
    766	if (unlikely(ct->ctbs.send.broken))
    767		return -EPIPE;
    768
    769	if (flags & INTEL_GUC_CT_SEND_NB)
    770		return ct_send_nb(ct, action, len, flags);
    771
    772	ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
    773	if (unlikely(ret < 0)) {
    774		CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
    775			 action[0], ERR_PTR(ret), status);
    776	} else if (unlikely(ret)) {
    777		CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
    778			 action[0], ret, ret);
    779	}
    780
    781	return ret;
    782}
    783
    784static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
    785{
    786	struct ct_incoming_msg *msg;
    787
    788	msg = kmalloc(struct_size(msg, msg, num_dwords), GFP_ATOMIC);
    789	if (msg)
    790		msg->size = num_dwords;
    791	return msg;
    792}
    793
    794static void ct_free_msg(struct ct_incoming_msg *msg)
    795{
    796	kfree(msg);
    797}
    798
    799/*
    800 * Return: number available remaining dwords to read (0 if empty)
    801 *         or a negative error code on failure
    802 */
    803static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
    804{
    805	struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
    806	struct guc_ct_buffer_desc *desc = ctb->desc;
    807	u32 head = ctb->head;
    808	u32 tail = READ_ONCE(desc->tail);
    809	u32 size = ctb->size;
    810	u32 *cmds = ctb->cmds;
    811	s32 available;
    812	unsigned int len;
    813	unsigned int i;
    814	u32 header;
    815
    816	if (unlikely(ctb->broken))
    817		return -EPIPE;
    818
    819	if (unlikely(desc->status))
    820		goto corrupted;
    821
    822	GEM_BUG_ON(head > size);
    823
    824#ifdef CONFIG_DRM_I915_DEBUG_GUC
    825	if (unlikely(head != READ_ONCE(desc->head))) {
    826		CT_ERROR(ct, "Head was modified %u != %u\n",
    827			 desc->head, head);
    828		desc->status |= GUC_CTB_STATUS_MISMATCH;
    829		goto corrupted;
    830	}
    831#endif
    832	if (unlikely(tail >= size)) {
    833		CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
    834			 tail, size);
    835		desc->status |= GUC_CTB_STATUS_OVERFLOW;
    836		goto corrupted;
    837	}
    838
    839	/* tail == head condition indicates empty */
    840	available = tail - head;
    841	if (unlikely(available == 0)) {
    842		*msg = NULL;
    843		return 0;
    844	}
    845
    846	/* beware of buffer wrap case */
    847	if (unlikely(available < 0))
    848		available += size;
    849	CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
    850	GEM_BUG_ON(available < 0);
    851
    852	header = cmds[head];
    853	head = (head + 1) % size;
    854
    855	/* message len with header */
    856	len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
    857	if (unlikely(len > (u32)available)) {
    858		CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
    859			 4, &header,
    860			 4 * (head + available - 1 > size ?
    861			      size - head : available - 1), &cmds[head],
    862			 4 * (head + available - 1 > size ?
    863			      available - 1 - size + head : 0), &cmds[0]);
    864		desc->status |= GUC_CTB_STATUS_UNDERFLOW;
    865		goto corrupted;
    866	}
    867
    868	*msg = ct_alloc_msg(len);
    869	if (!*msg) {
    870		CT_ERROR(ct, "No memory for message %*ph %*ph %*ph\n",
    871			 4, &header,
    872			 4 * (head + available - 1 > size ?
    873			      size - head : available - 1), &cmds[head],
    874			 4 * (head + available - 1 > size ?
    875			      available - 1 - size + head : 0), &cmds[0]);
    876		return available;
    877	}
    878
    879	(*msg)->msg[0] = header;
    880
    881	for (i = 1; i < len; i++) {
    882		(*msg)->msg[i] = cmds[head];
    883		head = (head + 1) % size;
    884	}
    885	CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
    886
    887	/* update local copies */
    888	ctb->head = head;
    889
    890	/* now update descriptor */
    891	WRITE_ONCE(desc->head, head);
    892
    893	return available - len;
    894
    895corrupted:
    896	CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
    897		 desc->head, desc->tail, desc->status);
    898	ctb->broken = true;
    899	return -EPIPE;
    900}
    901
    902static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
    903{
    904	u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
    905	u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
    906	const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
    907	const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
    908	u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
    909	struct ct_request *req;
    910	unsigned long flags;
    911	bool found = false;
    912	int err = 0;
    913
    914	GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
    915	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
    916	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
    917		   FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_NO_RESPONSE_RETRY &&
    918		   FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
    919
    920	CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
    921
    922	spin_lock_irqsave(&ct->requests.lock, flags);
    923	list_for_each_entry(req, &ct->requests.pending, link) {
    924		if (unlikely(fence != req->fence)) {
    925			CT_DEBUG(ct, "request %u awaits response\n",
    926				 req->fence);
    927			continue;
    928		}
    929		if (unlikely(datalen > req->response_len)) {
    930			CT_ERROR(ct, "Response %u too long (datalen %u > %u)\n",
    931				 req->fence, datalen, req->response_len);
    932			datalen = min(datalen, req->response_len);
    933			err = -EMSGSIZE;
    934		}
    935		if (datalen)
    936			memcpy(req->response_buf, data, 4 * datalen);
    937		req->response_len = datalen;
    938		WRITE_ONCE(req->status, hxg[0]);
    939		found = true;
    940		break;
    941	}
    942	if (!found) {
    943		CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
    944		CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
    945			 ct->requests.last_fence);
    946		list_for_each_entry(req, &ct->requests.pending, link)
    947			CT_ERROR(ct, "request %u awaits response\n",
    948				 req->fence);
    949		err = -ENOKEY;
    950	}
    951	spin_unlock_irqrestore(&ct->requests.lock, flags);
    952
    953	if (unlikely(err))
    954		return err;
    955
    956	ct_free_msg(response);
    957	return 0;
    958}
    959
    960static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
    961{
    962	struct intel_guc *guc = ct_to_guc(ct);
    963	const u32 *hxg;
    964	const u32 *payload;
    965	u32 hxg_len, action, len;
    966	int ret;
    967
    968	hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
    969	hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
    970	payload = &hxg[GUC_HXG_MSG_MIN_LEN];
    971	action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
    972	len = hxg_len - GUC_HXG_MSG_MIN_LEN;
    973
    974	CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
    975
    976	switch (action) {
    977	case INTEL_GUC_ACTION_DEFAULT:
    978		ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
    979		break;
    980	case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
    981		ret = intel_guc_deregister_done_process_msg(guc, payload,
    982							    len);
    983		break;
    984	case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
    985		ret = intel_guc_sched_done_process_msg(guc, payload, len);
    986		break;
    987	case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
    988		ret = intel_guc_context_reset_process_msg(guc, payload, len);
    989		break;
    990	case INTEL_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
    991		ret = intel_guc_error_capture_process_msg(guc, payload, len);
    992		if (unlikely(ret))
    993			CT_ERROR(ct, "error capture notification failed %x %*ph\n",
    994				 action, 4 * len, payload);
    995		break;
    996	case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
    997		ret = intel_guc_engine_failure_process_msg(guc, payload, len);
    998		break;
    999	case INTEL_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
   1000		intel_guc_log_handle_flush_event(&guc->log);
   1001		ret = 0;
   1002		break;
   1003	case INTEL_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
   1004		CT_ERROR(ct, "Received GuC crash dump notification!\n");
   1005		ret = 0;
   1006		break;
   1007	case INTEL_GUC_ACTION_NOTIFY_EXCEPTION:
   1008		CT_ERROR(ct, "Received GuC exception notification!\n");
   1009		ret = 0;
   1010		break;
   1011	default:
   1012		ret = -EOPNOTSUPP;
   1013		break;
   1014	}
   1015
   1016	if (unlikely(ret)) {
   1017		CT_ERROR(ct, "Failed to process request %04x (%pe)\n",
   1018			 action, ERR_PTR(ret));
   1019		return ret;
   1020	}
   1021
   1022	ct_free_msg(request);
   1023	return 0;
   1024}
   1025
   1026static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
   1027{
   1028	unsigned long flags;
   1029	struct ct_incoming_msg *request;
   1030	bool done;
   1031	int err;
   1032
   1033	spin_lock_irqsave(&ct->requests.lock, flags);
   1034	request = list_first_entry_or_null(&ct->requests.incoming,
   1035					   struct ct_incoming_msg, link);
   1036	if (request)
   1037		list_del(&request->link);
   1038	done = !!list_empty(&ct->requests.incoming);
   1039	spin_unlock_irqrestore(&ct->requests.lock, flags);
   1040
   1041	if (!request)
   1042		return true;
   1043
   1044	err = ct_process_request(ct, request);
   1045	if (unlikely(err)) {
   1046		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
   1047			 ERR_PTR(err), 4 * request->size, request->msg);
   1048		ct_free_msg(request);
   1049	}
   1050
   1051	return done;
   1052}
   1053
   1054static void ct_incoming_request_worker_func(struct work_struct *w)
   1055{
   1056	struct intel_guc_ct *ct =
   1057		container_of(w, struct intel_guc_ct, requests.worker);
   1058	bool done;
   1059
   1060	do {
   1061		done = ct_process_incoming_requests(ct);
   1062	} while (!done);
   1063}
   1064
   1065static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
   1066{
   1067	const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
   1068	u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
   1069	unsigned long flags;
   1070
   1071	GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
   1072
   1073	/*
   1074	 * Adjusting the space must be done in IRQ or deadlock can occur as the
   1075	 * CTB processing in the below workqueue can send CTBs which creates a
   1076	 * circular dependency if the space was returned there.
   1077	 */
   1078	switch (action) {
   1079	case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
   1080	case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
   1081		g2h_release_space(ct, request->size);
   1082	}
   1083
   1084	spin_lock_irqsave(&ct->requests.lock, flags);
   1085	list_add_tail(&request->link, &ct->requests.incoming);
   1086	spin_unlock_irqrestore(&ct->requests.lock, flags);
   1087
   1088	queue_work(system_unbound_wq, &ct->requests.worker);
   1089	return 0;
   1090}
   1091
   1092static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
   1093{
   1094	u32 origin, type;
   1095	u32 *hxg;
   1096	int err;
   1097
   1098	if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
   1099		return -EBADMSG;
   1100
   1101	hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
   1102
   1103	origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
   1104	if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
   1105		err = -EPROTO;
   1106		goto failed;
   1107	}
   1108
   1109	type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
   1110	switch (type) {
   1111	case GUC_HXG_TYPE_EVENT:
   1112		err = ct_handle_event(ct, msg);
   1113		break;
   1114	case GUC_HXG_TYPE_RESPONSE_SUCCESS:
   1115	case GUC_HXG_TYPE_RESPONSE_FAILURE:
   1116	case GUC_HXG_TYPE_NO_RESPONSE_RETRY:
   1117		err = ct_handle_response(ct, msg);
   1118		break;
   1119	default:
   1120		err = -EOPNOTSUPP;
   1121	}
   1122
   1123	if (unlikely(err)) {
   1124failed:
   1125		CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
   1126			 ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
   1127	}
   1128	return err;
   1129}
   1130
   1131static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
   1132{
   1133	u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
   1134	int err;
   1135
   1136	if (format == GUC_CTB_FORMAT_HXG)
   1137		err = ct_handle_hxg(ct, msg);
   1138	else
   1139		err = -EOPNOTSUPP;
   1140
   1141	if (unlikely(err)) {
   1142		CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
   1143			 ERR_PTR(err), 4 * msg->size, msg->msg);
   1144		ct_free_msg(msg);
   1145	}
   1146}
   1147
   1148/*
   1149 * Return: number available remaining dwords to read (0 if empty)
   1150 *         or a negative error code on failure
   1151 */
   1152static int ct_receive(struct intel_guc_ct *ct)
   1153{
   1154	struct ct_incoming_msg *msg = NULL;
   1155	unsigned long flags;
   1156	int ret;
   1157
   1158	spin_lock_irqsave(&ct->ctbs.recv.lock, flags);
   1159	ret = ct_read(ct, &msg);
   1160	spin_unlock_irqrestore(&ct->ctbs.recv.lock, flags);
   1161	if (ret < 0)
   1162		return ret;
   1163
   1164	if (msg)
   1165		ct_handle_msg(ct, msg);
   1166
   1167	return ret;
   1168}
   1169
   1170static void ct_try_receive_message(struct intel_guc_ct *ct)
   1171{
   1172	int ret;
   1173
   1174	if (GEM_WARN_ON(!ct->enabled))
   1175		return;
   1176
   1177	ret = ct_receive(ct);
   1178	if (ret > 0)
   1179		tasklet_hi_schedule(&ct->receive_tasklet);
   1180}
   1181
   1182static void ct_receive_tasklet_func(struct tasklet_struct *t)
   1183{
   1184	struct intel_guc_ct *ct = from_tasklet(ct, t, receive_tasklet);
   1185
   1186	ct_try_receive_message(ct);
   1187}
   1188
   1189/*
   1190 * When we're communicating with the GuC over CT, GuC uses events
   1191 * to notify us about new messages being posted on the RECV buffer.
   1192 */
   1193void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
   1194{
   1195	if (unlikely(!ct->enabled)) {
   1196		WARN(1, "Unexpected GuC event received while CT disabled!\n");
   1197		return;
   1198	}
   1199
   1200	ct_try_receive_message(ct);
   1201}
   1202
   1203void intel_guc_ct_print_info(struct intel_guc_ct *ct,
   1204			     struct drm_printer *p)
   1205{
   1206	drm_printf(p, "CT %s\n", str_enabled_disabled(ct->enabled));
   1207
   1208	if (!ct->enabled)
   1209		return;
   1210
   1211	drm_printf(p, "H2G Space: %u\n",
   1212		   atomic_read(&ct->ctbs.send.space) * 4);
   1213	drm_printf(p, "Head: %u\n",
   1214		   ct->ctbs.send.desc->head);
   1215	drm_printf(p, "Tail: %u\n",
   1216		   ct->ctbs.send.desc->tail);
   1217	drm_printf(p, "G2H Space: %u\n",
   1218		   atomic_read(&ct->ctbs.recv.space) * 4);
   1219	drm_printf(p, "Head: %u\n",
   1220		   ct->ctbs.recv.desc->head);
   1221	drm_printf(p, "Tail: %u\n",
   1222		   ct->ctbs.recv.desc->tail);
   1223}