cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ice_controlq.c (34458B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright (c) 2018, Intel Corporation. */
      3
      4#include "ice_common.h"
      5
      6#define ICE_CQ_INIT_REGS(qinfo, prefix)				\
      7do {								\
      8	(qinfo)->sq.head = prefix##_ATQH;			\
      9	(qinfo)->sq.tail = prefix##_ATQT;			\
     10	(qinfo)->sq.len = prefix##_ATQLEN;			\
     11	(qinfo)->sq.bah = prefix##_ATQBAH;			\
     12	(qinfo)->sq.bal = prefix##_ATQBAL;			\
     13	(qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;	\
     14	(qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M;	\
     15	(qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;	\
     16	(qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;		\
     17	(qinfo)->rq.head = prefix##_ARQH;			\
     18	(qinfo)->rq.tail = prefix##_ARQT;			\
     19	(qinfo)->rq.len = prefix##_ARQLEN;			\
     20	(qinfo)->rq.bah = prefix##_ARQBAH;			\
     21	(qinfo)->rq.bal = prefix##_ARQBAL;			\
     22	(qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;	\
     23	(qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M;	\
     24	(qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;	\
     25	(qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;		\
     26} while (0)
     27
     28/**
     29 * ice_adminq_init_regs - Initialize AdminQ registers
     30 * @hw: pointer to the hardware structure
     31 *
     32 * This assumes the alloc_sq and alloc_rq functions have already been called
     33 */
     34static void ice_adminq_init_regs(struct ice_hw *hw)
     35{
     36	struct ice_ctl_q_info *cq = &hw->adminq;
     37
     38	ICE_CQ_INIT_REGS(cq, PF_FW);
     39}
     40
     41/**
     42 * ice_mailbox_init_regs - Initialize Mailbox registers
     43 * @hw: pointer to the hardware structure
     44 *
     45 * This assumes the alloc_sq and alloc_rq functions have already been called
     46 */
     47static void ice_mailbox_init_regs(struct ice_hw *hw)
     48{
     49	struct ice_ctl_q_info *cq = &hw->mailboxq;
     50
     51	ICE_CQ_INIT_REGS(cq, PF_MBX);
     52}
     53
     54/**
     55 * ice_sb_init_regs - Initialize Sideband registers
     56 * @hw: pointer to the hardware structure
     57 *
     58 * This assumes the alloc_sq and alloc_rq functions have already been called
     59 */
     60static void ice_sb_init_regs(struct ice_hw *hw)
     61{
     62	struct ice_ctl_q_info *cq = &hw->sbq;
     63
     64	ICE_CQ_INIT_REGS(cq, PF_SB);
     65}
     66
     67/**
     68 * ice_check_sq_alive
     69 * @hw: pointer to the HW struct
     70 * @cq: pointer to the specific Control queue
     71 *
     72 * Returns true if Queue is enabled else false.
     73 */
     74bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
     75{
     76	/* check both queue-length and queue-enable fields */
     77	if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
     78		return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
     79						cq->sq.len_ena_mask)) ==
     80			(cq->num_sq_entries | cq->sq.len_ena_mask);
     81
     82	return false;
     83}
     84
     85/**
     86 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
     87 * @hw: pointer to the hardware structure
     88 * @cq: pointer to the specific Control queue
     89 */
     90static int
     91ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
     92{
     93	size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
     94
     95	cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
     96						 &cq->sq.desc_buf.pa,
     97						 GFP_KERNEL | __GFP_ZERO);
     98	if (!cq->sq.desc_buf.va)
     99		return -ENOMEM;
    100	cq->sq.desc_buf.size = size;
    101
    102	cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
    103				      sizeof(struct ice_sq_cd), GFP_KERNEL);
    104	if (!cq->sq.cmd_buf) {
    105		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
    106				   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
    107		cq->sq.desc_buf.va = NULL;
    108		cq->sq.desc_buf.pa = 0;
    109		cq->sq.desc_buf.size = 0;
    110		return -ENOMEM;
    111	}
    112
    113	return 0;
    114}
    115
    116/**
    117 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
    118 * @hw: pointer to the hardware structure
    119 * @cq: pointer to the specific Control queue
    120 */
    121static int
    122ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    123{
    124	size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
    125
    126	cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
    127						 &cq->rq.desc_buf.pa,
    128						 GFP_KERNEL | __GFP_ZERO);
    129	if (!cq->rq.desc_buf.va)
    130		return -ENOMEM;
    131	cq->rq.desc_buf.size = size;
    132	return 0;
    133}
    134
    135/**
    136 * ice_free_cq_ring - Free control queue ring
    137 * @hw: pointer to the hardware structure
    138 * @ring: pointer to the specific control queue ring
    139 *
    140 * This assumes the posted buffers have already been cleaned
    141 * and de-allocated
    142 */
    143static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
    144{
    145	dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
    146			   ring->desc_buf.va, ring->desc_buf.pa);
    147	ring->desc_buf.va = NULL;
    148	ring->desc_buf.pa = 0;
    149	ring->desc_buf.size = 0;
    150}
    151
    152/**
    153 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
    154 * @hw: pointer to the hardware structure
    155 * @cq: pointer to the specific Control queue
    156 */
    157static int
    158ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    159{
    160	int i;
    161
    162	/* We'll be allocating the buffer info memory first, then we can
    163	 * allocate the mapped buffers for the event processing
    164	 */
    165	cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
    166				       sizeof(cq->rq.desc_buf), GFP_KERNEL);
    167	if (!cq->rq.dma_head)
    168		return -ENOMEM;
    169	cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
    170
    171	/* allocate the mapped buffers */
    172	for (i = 0; i < cq->num_rq_entries; i++) {
    173		struct ice_aq_desc *desc;
    174		struct ice_dma_mem *bi;
    175
    176		bi = &cq->rq.r.rq_bi[i];
    177		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
    178					     cq->rq_buf_size, &bi->pa,
    179					     GFP_KERNEL | __GFP_ZERO);
    180		if (!bi->va)
    181			goto unwind_alloc_rq_bufs;
    182		bi->size = cq->rq_buf_size;
    183
    184		/* now configure the descriptors for use */
    185		desc = ICE_CTL_Q_DESC(cq->rq, i);
    186
    187		desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
    188		if (cq->rq_buf_size > ICE_AQ_LG_BUF)
    189			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
    190		desc->opcode = 0;
    191		/* This is in accordance with Admin queue design, there is no
    192		 * register for buffer size configuration
    193		 */
    194		desc->datalen = cpu_to_le16(bi->size);
    195		desc->retval = 0;
    196		desc->cookie_high = 0;
    197		desc->cookie_low = 0;
    198		desc->params.generic.addr_high =
    199			cpu_to_le32(upper_32_bits(bi->pa));
    200		desc->params.generic.addr_low =
    201			cpu_to_le32(lower_32_bits(bi->pa));
    202		desc->params.generic.param0 = 0;
    203		desc->params.generic.param1 = 0;
    204	}
    205	return 0;
    206
    207unwind_alloc_rq_bufs:
    208	/* don't try to free the one that failed... */
    209	i--;
    210	for (; i >= 0; i--) {
    211		dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
    212				   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
    213		cq->rq.r.rq_bi[i].va = NULL;
    214		cq->rq.r.rq_bi[i].pa = 0;
    215		cq->rq.r.rq_bi[i].size = 0;
    216	}
    217	cq->rq.r.rq_bi = NULL;
    218	devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
    219	cq->rq.dma_head = NULL;
    220
    221	return -ENOMEM;
    222}
    223
    224/**
    225 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
    226 * @hw: pointer to the hardware structure
    227 * @cq: pointer to the specific Control queue
    228 */
    229static int
    230ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    231{
    232	int i;
    233
    234	/* No mapped memory needed yet, just the buffer info structures */
    235	cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
    236				       sizeof(cq->sq.desc_buf), GFP_KERNEL);
    237	if (!cq->sq.dma_head)
    238		return -ENOMEM;
    239	cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
    240
    241	/* allocate the mapped buffers */
    242	for (i = 0; i < cq->num_sq_entries; i++) {
    243		struct ice_dma_mem *bi;
    244
    245		bi = &cq->sq.r.sq_bi[i];
    246		bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
    247					     cq->sq_buf_size, &bi->pa,
    248					     GFP_KERNEL | __GFP_ZERO);
    249		if (!bi->va)
    250			goto unwind_alloc_sq_bufs;
    251		bi->size = cq->sq_buf_size;
    252	}
    253	return 0;
    254
    255unwind_alloc_sq_bufs:
    256	/* don't try to free the one that failed... */
    257	i--;
    258	for (; i >= 0; i--) {
    259		dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
    260				   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
    261		cq->sq.r.sq_bi[i].va = NULL;
    262		cq->sq.r.sq_bi[i].pa = 0;
    263		cq->sq.r.sq_bi[i].size = 0;
    264	}
    265	cq->sq.r.sq_bi = NULL;
    266	devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
    267	cq->sq.dma_head = NULL;
    268
    269	return -ENOMEM;
    270}
    271
    272static int
    273ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
    274{
    275	/* Clear Head and Tail */
    276	wr32(hw, ring->head, 0);
    277	wr32(hw, ring->tail, 0);
    278
    279	/* set starting point */
    280	wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
    281	wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
    282	wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
    283
    284	/* Check one register to verify that config was applied */
    285	if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
    286		return -EIO;
    287
    288	return 0;
    289}
    290
    291/**
    292 * ice_cfg_sq_regs - configure Control ATQ registers
    293 * @hw: pointer to the hardware structure
    294 * @cq: pointer to the specific Control queue
    295 *
    296 * Configure base address and length registers for the transmit queue
    297 */
    298static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    299{
    300	return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
    301}
    302
    303/**
    304 * ice_cfg_rq_regs - configure Control ARQ register
    305 * @hw: pointer to the hardware structure
    306 * @cq: pointer to the specific Control queue
    307 *
    308 * Configure base address and length registers for the receive (event queue)
    309 */
    310static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    311{
    312	int status;
    313
    314	status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
    315	if (status)
    316		return status;
    317
    318	/* Update tail in the HW to post pre-allocated buffers */
    319	wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
    320
    321	return 0;
    322}
    323
    324#define ICE_FREE_CQ_BUFS(hw, qi, ring)					\
    325do {									\
    326	/* free descriptors */						\
    327	if ((qi)->ring.r.ring##_bi) {					\
    328		int i;							\
    329									\
    330		for (i = 0; i < (qi)->num_##ring##_entries; i++)	\
    331			if ((qi)->ring.r.ring##_bi[i].pa) {		\
    332				dmam_free_coherent(ice_hw_to_dev(hw),	\
    333					(qi)->ring.r.ring##_bi[i].size,	\
    334					(qi)->ring.r.ring##_bi[i].va,	\
    335					(qi)->ring.r.ring##_bi[i].pa);	\
    336					(qi)->ring.r.ring##_bi[i].va = NULL;\
    337					(qi)->ring.r.ring##_bi[i].pa = 0;\
    338					(qi)->ring.r.ring##_bi[i].size = 0;\
    339		}							\
    340	}								\
    341	/* free the buffer info list */					\
    342	if ((qi)->ring.cmd_buf)						\
    343		devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);	\
    344	/* free DMA head */						\
    345	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
    346} while (0)
    347
    348/**
    349 * ice_init_sq - main initialization routine for Control ATQ
    350 * @hw: pointer to the hardware structure
    351 * @cq: pointer to the specific Control queue
    352 *
    353 * This is the main initialization routine for the Control Send Queue
    354 * Prior to calling this function, the driver *MUST* set the following fields
    355 * in the cq->structure:
    356 *     - cq->num_sq_entries
    357 *     - cq->sq_buf_size
    358 *
    359 * Do *NOT* hold the lock when calling this as the memory allocation routines
    360 * called are not going to be atomic context safe
    361 */
    362static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    363{
    364	int ret_code;
    365
    366	if (cq->sq.count > 0) {
    367		/* queue already initialized */
    368		ret_code = -EBUSY;
    369		goto init_ctrlq_exit;
    370	}
    371
    372	/* verify input for valid configuration */
    373	if (!cq->num_sq_entries || !cq->sq_buf_size) {
    374		ret_code = -EIO;
    375		goto init_ctrlq_exit;
    376	}
    377
    378	cq->sq.next_to_use = 0;
    379	cq->sq.next_to_clean = 0;
    380
    381	/* allocate the ring memory */
    382	ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
    383	if (ret_code)
    384		goto init_ctrlq_exit;
    385
    386	/* allocate buffers in the rings */
    387	ret_code = ice_alloc_sq_bufs(hw, cq);
    388	if (ret_code)
    389		goto init_ctrlq_free_rings;
    390
    391	/* initialize base registers */
    392	ret_code = ice_cfg_sq_regs(hw, cq);
    393	if (ret_code)
    394		goto init_ctrlq_free_rings;
    395
    396	/* success! */
    397	cq->sq.count = cq->num_sq_entries;
    398	goto init_ctrlq_exit;
    399
    400init_ctrlq_free_rings:
    401	ICE_FREE_CQ_BUFS(hw, cq, sq);
    402	ice_free_cq_ring(hw, &cq->sq);
    403
    404init_ctrlq_exit:
    405	return ret_code;
    406}
    407
    408/**
    409 * ice_init_rq - initialize ARQ
    410 * @hw: pointer to the hardware structure
    411 * @cq: pointer to the specific Control queue
    412 *
    413 * The main initialization routine for the Admin Receive (Event) Queue.
    414 * Prior to calling this function, the driver *MUST* set the following fields
    415 * in the cq->structure:
    416 *     - cq->num_rq_entries
    417 *     - cq->rq_buf_size
    418 *
    419 * Do *NOT* hold the lock when calling this as the memory allocation routines
    420 * called are not going to be atomic context safe
    421 */
    422static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    423{
    424	int ret_code;
    425
    426	if (cq->rq.count > 0) {
    427		/* queue already initialized */
    428		ret_code = -EBUSY;
    429		goto init_ctrlq_exit;
    430	}
    431
    432	/* verify input for valid configuration */
    433	if (!cq->num_rq_entries || !cq->rq_buf_size) {
    434		ret_code = -EIO;
    435		goto init_ctrlq_exit;
    436	}
    437
    438	cq->rq.next_to_use = 0;
    439	cq->rq.next_to_clean = 0;
    440
    441	/* allocate the ring memory */
    442	ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
    443	if (ret_code)
    444		goto init_ctrlq_exit;
    445
    446	/* allocate buffers in the rings */
    447	ret_code = ice_alloc_rq_bufs(hw, cq);
    448	if (ret_code)
    449		goto init_ctrlq_free_rings;
    450
    451	/* initialize base registers */
    452	ret_code = ice_cfg_rq_regs(hw, cq);
    453	if (ret_code)
    454		goto init_ctrlq_free_rings;
    455
    456	/* success! */
    457	cq->rq.count = cq->num_rq_entries;
    458	goto init_ctrlq_exit;
    459
    460init_ctrlq_free_rings:
    461	ICE_FREE_CQ_BUFS(hw, cq, rq);
    462	ice_free_cq_ring(hw, &cq->rq);
    463
    464init_ctrlq_exit:
    465	return ret_code;
    466}
    467
    468/**
    469 * ice_shutdown_sq - shutdown the Control ATQ
    470 * @hw: pointer to the hardware structure
    471 * @cq: pointer to the specific Control queue
    472 *
    473 * The main shutdown routine for the Control Transmit Queue
    474 */
    475static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    476{
    477	int ret_code = 0;
    478
    479	mutex_lock(&cq->sq_lock);
    480
    481	if (!cq->sq.count) {
    482		ret_code = -EBUSY;
    483		goto shutdown_sq_out;
    484	}
    485
    486	/* Stop firmware AdminQ processing */
    487	wr32(hw, cq->sq.head, 0);
    488	wr32(hw, cq->sq.tail, 0);
    489	wr32(hw, cq->sq.len, 0);
    490	wr32(hw, cq->sq.bal, 0);
    491	wr32(hw, cq->sq.bah, 0);
    492
    493	cq->sq.count = 0;	/* to indicate uninitialized queue */
    494
    495	/* free ring buffers and the ring itself */
    496	ICE_FREE_CQ_BUFS(hw, cq, sq);
    497	ice_free_cq_ring(hw, &cq->sq);
    498
    499shutdown_sq_out:
    500	mutex_unlock(&cq->sq_lock);
    501	return ret_code;
    502}
    503
    504/**
    505 * ice_aq_ver_check - Check the reported AQ API version.
    506 * @hw: pointer to the hardware structure
    507 *
    508 * Checks if the driver should load on a given AQ API version.
    509 *
    510 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
    511 */
    512static bool ice_aq_ver_check(struct ice_hw *hw)
    513{
    514	if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
    515		/* Major API version is newer than expected, don't load */
    516		dev_warn(ice_hw_to_dev(hw),
    517			 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
    518		return false;
    519	} else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
    520		if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
    521			dev_info(ice_hw_to_dev(hw),
    522				 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
    523		else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
    524			dev_info(ice_hw_to_dev(hw),
    525				 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
    526	} else {
    527		/* Major API version is older than expected, log a warning */
    528		dev_info(ice_hw_to_dev(hw),
    529			 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
    530	}
    531	return true;
    532}
    533
    534/**
    535 * ice_shutdown_rq - shutdown Control ARQ
    536 * @hw: pointer to the hardware structure
    537 * @cq: pointer to the specific Control queue
    538 *
    539 * The main shutdown routine for the Control Receive Queue
    540 */
    541static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    542{
    543	int ret_code = 0;
    544
    545	mutex_lock(&cq->rq_lock);
    546
    547	if (!cq->rq.count) {
    548		ret_code = -EBUSY;
    549		goto shutdown_rq_out;
    550	}
    551
    552	/* Stop Control Queue processing */
    553	wr32(hw, cq->rq.head, 0);
    554	wr32(hw, cq->rq.tail, 0);
    555	wr32(hw, cq->rq.len, 0);
    556	wr32(hw, cq->rq.bal, 0);
    557	wr32(hw, cq->rq.bah, 0);
    558
    559	/* set rq.count to 0 to indicate uninitialized queue */
    560	cq->rq.count = 0;
    561
    562	/* free ring buffers and the ring itself */
    563	ICE_FREE_CQ_BUFS(hw, cq, rq);
    564	ice_free_cq_ring(hw, &cq->rq);
    565
    566shutdown_rq_out:
    567	mutex_unlock(&cq->rq_lock);
    568	return ret_code;
    569}
    570
    571/**
    572 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
    573 * @hw: pointer to the hardware structure
    574 */
    575static int ice_init_check_adminq(struct ice_hw *hw)
    576{
    577	struct ice_ctl_q_info *cq = &hw->adminq;
    578	int status;
    579
    580	status = ice_aq_get_fw_ver(hw, NULL);
    581	if (status)
    582		goto init_ctrlq_free_rq;
    583
    584	if (!ice_aq_ver_check(hw)) {
    585		status = -EIO;
    586		goto init_ctrlq_free_rq;
    587	}
    588
    589	return 0;
    590
    591init_ctrlq_free_rq:
    592	ice_shutdown_rq(hw, cq);
    593	ice_shutdown_sq(hw, cq);
    594	return status;
    595}
    596
    597/**
    598 * ice_init_ctrlq - main initialization routine for any control Queue
    599 * @hw: pointer to the hardware structure
    600 * @q_type: specific Control queue type
    601 *
    602 * Prior to calling this function, the driver *MUST* set the following fields
    603 * in the cq->structure:
    604 *     - cq->num_sq_entries
    605 *     - cq->num_rq_entries
    606 *     - cq->rq_buf_size
    607 *     - cq->sq_buf_size
    608 *
    609 * NOTE: this function does not initialize the controlq locks
    610 */
    611static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
    612{
    613	struct ice_ctl_q_info *cq;
    614	int ret_code;
    615
    616	switch (q_type) {
    617	case ICE_CTL_Q_ADMIN:
    618		ice_adminq_init_regs(hw);
    619		cq = &hw->adminq;
    620		break;
    621	case ICE_CTL_Q_SB:
    622		ice_sb_init_regs(hw);
    623		cq = &hw->sbq;
    624		break;
    625	case ICE_CTL_Q_MAILBOX:
    626		ice_mailbox_init_regs(hw);
    627		cq = &hw->mailboxq;
    628		break;
    629	default:
    630		return -EINVAL;
    631	}
    632	cq->qtype = q_type;
    633
    634	/* verify input for valid configuration */
    635	if (!cq->num_rq_entries || !cq->num_sq_entries ||
    636	    !cq->rq_buf_size || !cq->sq_buf_size) {
    637		return -EIO;
    638	}
    639
    640	/* setup SQ command write back timeout */
    641	cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
    642
    643	/* allocate the ATQ */
    644	ret_code = ice_init_sq(hw, cq);
    645	if (ret_code)
    646		return ret_code;
    647
    648	/* allocate the ARQ */
    649	ret_code = ice_init_rq(hw, cq);
    650	if (ret_code)
    651		goto init_ctrlq_free_sq;
    652
    653	/* success! */
    654	return 0;
    655
    656init_ctrlq_free_sq:
    657	ice_shutdown_sq(hw, cq);
    658	return ret_code;
    659}
    660
    661/**
    662 * ice_is_sbq_supported - is the sideband queue supported
    663 * @hw: pointer to the hardware structure
    664 *
    665 * Returns true if the sideband control queue interface is
    666 * supported for the device, false otherwise
    667 */
    668bool ice_is_sbq_supported(struct ice_hw *hw)
    669{
    670	/* The device sideband queue is only supported on devices with the
    671	 * generic MAC type.
    672	 */
    673	return hw->mac_type == ICE_MAC_GENERIC;
    674}
    675
    676/**
    677 * ice_get_sbq - returns the right control queue to use for sideband
    678 * @hw: pointer to the hardware structure
    679 */
    680struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
    681{
    682	if (ice_is_sbq_supported(hw))
    683		return &hw->sbq;
    684	return &hw->adminq;
    685}
    686
    687/**
    688 * ice_shutdown_ctrlq - shutdown routine for any control queue
    689 * @hw: pointer to the hardware structure
    690 * @q_type: specific Control queue type
    691 *
    692 * NOTE: this function does not destroy the control queue locks.
    693 */
    694static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
    695{
    696	struct ice_ctl_q_info *cq;
    697
    698	switch (q_type) {
    699	case ICE_CTL_Q_ADMIN:
    700		cq = &hw->adminq;
    701		if (ice_check_sq_alive(hw, cq))
    702			ice_aq_q_shutdown(hw, true);
    703		break;
    704	case ICE_CTL_Q_SB:
    705		cq = &hw->sbq;
    706		break;
    707	case ICE_CTL_Q_MAILBOX:
    708		cq = &hw->mailboxq;
    709		break;
    710	default:
    711		return;
    712	}
    713
    714	ice_shutdown_sq(hw, cq);
    715	ice_shutdown_rq(hw, cq);
    716}
    717
    718/**
    719 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
    720 * @hw: pointer to the hardware structure
    721 *
    722 * NOTE: this function does not destroy the control queue locks. The driver
    723 * may call this at runtime to shutdown and later restart control queues, such
    724 * as in response to a reset event.
    725 */
    726void ice_shutdown_all_ctrlq(struct ice_hw *hw)
    727{
    728	/* Shutdown FW admin queue */
    729	ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
    730	/* Shutdown PHY Sideband */
    731	if (ice_is_sbq_supported(hw))
    732		ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
    733	/* Shutdown PF-VF Mailbox */
    734	ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
    735}
    736
    737/**
    738 * ice_init_all_ctrlq - main initialization routine for all control queues
    739 * @hw: pointer to the hardware structure
    740 *
    741 * Prior to calling this function, the driver MUST* set the following fields
    742 * in the cq->structure for all control queues:
    743 *     - cq->num_sq_entries
    744 *     - cq->num_rq_entries
    745 *     - cq->rq_buf_size
    746 *     - cq->sq_buf_size
    747 *
    748 * NOTE: this function does not initialize the controlq locks.
    749 */
    750int ice_init_all_ctrlq(struct ice_hw *hw)
    751{
    752	u32 retry = 0;
    753	int status;
    754
    755	/* Init FW admin queue */
    756	do {
    757		status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
    758		if (status)
    759			return status;
    760
    761		status = ice_init_check_adminq(hw);
    762		if (status != -EIO)
    763			break;
    764
    765		ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
    766		ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
    767		msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
    768	} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
    769
    770	if (status)
    771		return status;
    772	/* sideband control queue (SBQ) interface is not supported on some
    773	 * devices. Initialize if supported, else fallback to the admin queue
    774	 * interface
    775	 */
    776	if (ice_is_sbq_supported(hw)) {
    777		status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
    778		if (status)
    779			return status;
    780	}
    781	/* Init Mailbox queue */
    782	return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
    783}
    784
    785/**
    786 * ice_init_ctrlq_locks - Initialize locks for a control queue
    787 * @cq: pointer to the control queue
    788 *
    789 * Initializes the send and receive queue locks for a given control queue.
    790 */
    791static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
    792{
    793	mutex_init(&cq->sq_lock);
    794	mutex_init(&cq->rq_lock);
    795}
    796
    797/**
    798 * ice_create_all_ctrlq - main initialization routine for all control queues
    799 * @hw: pointer to the hardware structure
    800 *
    801 * Prior to calling this function, the driver *MUST* set the following fields
    802 * in the cq->structure for all control queues:
    803 *     - cq->num_sq_entries
    804 *     - cq->num_rq_entries
    805 *     - cq->rq_buf_size
    806 *     - cq->sq_buf_size
    807 *
    808 * This function creates all the control queue locks and then calls
    809 * ice_init_all_ctrlq. It should be called once during driver load. If the
    810 * driver needs to re-initialize control queues at run time it should call
    811 * ice_init_all_ctrlq instead.
    812 */
    813int ice_create_all_ctrlq(struct ice_hw *hw)
    814{
    815	ice_init_ctrlq_locks(&hw->adminq);
    816	if (ice_is_sbq_supported(hw))
    817		ice_init_ctrlq_locks(&hw->sbq);
    818	ice_init_ctrlq_locks(&hw->mailboxq);
    819
    820	return ice_init_all_ctrlq(hw);
    821}
    822
    823/**
    824 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
    825 * @cq: pointer to the control queue
    826 *
    827 * Destroys the send and receive queue locks for a given control queue.
    828 */
    829static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
    830{
    831	mutex_destroy(&cq->sq_lock);
    832	mutex_destroy(&cq->rq_lock);
    833}
    834
    835/**
    836 * ice_destroy_all_ctrlq - exit routine for all control queues
    837 * @hw: pointer to the hardware structure
    838 *
    839 * This function shuts down all the control queues and then destroys the
    840 * control queue locks. It should be called once during driver unload. The
    841 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
    842 * reinitialize control queues, such as in response to a reset event.
    843 */
    844void ice_destroy_all_ctrlq(struct ice_hw *hw)
    845{
    846	/* shut down all the control queues first */
    847	ice_shutdown_all_ctrlq(hw);
    848
    849	ice_destroy_ctrlq_locks(&hw->adminq);
    850	if (ice_is_sbq_supported(hw))
    851		ice_destroy_ctrlq_locks(&hw->sbq);
    852	ice_destroy_ctrlq_locks(&hw->mailboxq);
    853}
    854
    855/**
    856 * ice_clean_sq - cleans Admin send queue (ATQ)
    857 * @hw: pointer to the hardware structure
    858 * @cq: pointer to the specific Control queue
    859 *
    860 * returns the number of free desc
    861 */
    862static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    863{
    864	struct ice_ctl_q_ring *sq = &cq->sq;
    865	u16 ntc = sq->next_to_clean;
    866	struct ice_sq_cd *details;
    867	struct ice_aq_desc *desc;
    868
    869	desc = ICE_CTL_Q_DESC(*sq, ntc);
    870	details = ICE_CTL_Q_DETAILS(*sq, ntc);
    871
    872	while (rd32(hw, cq->sq.head) != ntc) {
    873		ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
    874		memset(desc, 0, sizeof(*desc));
    875		memset(details, 0, sizeof(*details));
    876		ntc++;
    877		if (ntc == sq->count)
    878			ntc = 0;
    879		desc = ICE_CTL_Q_DESC(*sq, ntc);
    880		details = ICE_CTL_Q_DETAILS(*sq, ntc);
    881	}
    882
    883	sq->next_to_clean = ntc;
    884
    885	return ICE_CTL_Q_DESC_UNUSED(sq);
    886}
    887
    888/**
    889 * ice_debug_cq
    890 * @hw: pointer to the hardware structure
    891 * @desc: pointer to control queue descriptor
    892 * @buf: pointer to command buffer
    893 * @buf_len: max length of buf
    894 *
    895 * Dumps debug log about control command with descriptor contents.
    896 */
    897static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
    898{
    899	struct ice_aq_desc *cq_desc = desc;
    900	u16 len;
    901
    902	if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
    903	    !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
    904		return;
    905
    906	if (!desc)
    907		return;
    908
    909	len = le16_to_cpu(cq_desc->datalen);
    910
    911	ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
    912		  le16_to_cpu(cq_desc->opcode),
    913		  le16_to_cpu(cq_desc->flags),
    914		  le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
    915	ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
    916		  le32_to_cpu(cq_desc->cookie_high),
    917		  le32_to_cpu(cq_desc->cookie_low));
    918	ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
    919		  le32_to_cpu(cq_desc->params.generic.param0),
    920		  le32_to_cpu(cq_desc->params.generic.param1));
    921	ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
    922		  le32_to_cpu(cq_desc->params.generic.addr_high),
    923		  le32_to_cpu(cq_desc->params.generic.addr_low));
    924	if (buf && cq_desc->datalen != 0) {
    925		ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
    926		if (buf_len < len)
    927			len = buf_len;
    928
    929		ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
    930	}
    931}
    932
    933/**
    934 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
    935 * @hw: pointer to the HW struct
    936 * @cq: pointer to the specific Control queue
    937 *
    938 * Returns true if the firmware has processed all descriptors on the
    939 * admin send queue. Returns false if there are still requests pending.
    940 */
    941static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
    942{
    943	/* AQ designers suggest use of head for better
    944	 * timing reliability than DD bit
    945	 */
    946	return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
    947}
    948
    949/**
    950 * ice_sq_send_cmd - send command to Control Queue (ATQ)
    951 * @hw: pointer to the HW struct
    952 * @cq: pointer to the specific Control queue
    953 * @desc: prefilled descriptor describing the command
    954 * @buf: buffer to use for indirect commands (or NULL for direct commands)
    955 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
    956 * @cd: pointer to command details structure
    957 *
    958 * This is the main send command routine for the ATQ. It runs the queue,
    959 * cleans the queue, etc.
    960 */
    961int
    962ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
    963		struct ice_aq_desc *desc, void *buf, u16 buf_size,
    964		struct ice_sq_cd *cd)
    965{
    966	struct ice_dma_mem *dma_buf = NULL;
    967	struct ice_aq_desc *desc_on_ring;
    968	bool cmd_completed = false;
    969	struct ice_sq_cd *details;
    970	u32 total_delay = 0;
    971	int status = 0;
    972	u16 retval = 0;
    973	u32 val = 0;
    974
    975	/* if reset is in progress return a soft error */
    976	if (hw->reset_ongoing)
    977		return -EBUSY;
    978	mutex_lock(&cq->sq_lock);
    979
    980	cq->sq_last_status = ICE_AQ_RC_OK;
    981
    982	if (!cq->sq.count) {
    983		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
    984		status = -EIO;
    985		goto sq_send_command_error;
    986	}
    987
    988	if ((buf && !buf_size) || (!buf && buf_size)) {
    989		status = -EINVAL;
    990		goto sq_send_command_error;
    991	}
    992
    993	if (buf) {
    994		if (buf_size > cq->sq_buf_size) {
    995			ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
    996				  buf_size);
    997			status = -EINVAL;
    998			goto sq_send_command_error;
    999		}
   1000
   1001		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
   1002		if (buf_size > ICE_AQ_LG_BUF)
   1003			desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
   1004	}
   1005
   1006	val = rd32(hw, cq->sq.head);
   1007	if (val >= cq->num_sq_entries) {
   1008		ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
   1009			  val);
   1010		status = -EIO;
   1011		goto sq_send_command_error;
   1012	}
   1013
   1014	details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
   1015	if (cd)
   1016		*details = *cd;
   1017	else
   1018		memset(details, 0, sizeof(*details));
   1019
   1020	/* Call clean and check queue available function to reclaim the
   1021	 * descriptors that were processed by FW/MBX; the function returns the
   1022	 * number of desc available. The clean function called here could be
   1023	 * called in a separate thread in case of asynchronous completions.
   1024	 */
   1025	if (ice_clean_sq(hw, cq) == 0) {
   1026		ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
   1027		status = -ENOSPC;
   1028		goto sq_send_command_error;
   1029	}
   1030
   1031	/* initialize the temp desc pointer with the right desc */
   1032	desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
   1033
   1034	/* if the desc is available copy the temp desc to the right place */
   1035	memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
   1036
   1037	/* if buf is not NULL assume indirect command */
   1038	if (buf) {
   1039		dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
   1040		/* copy the user buf into the respective DMA buf */
   1041		memcpy(dma_buf->va, buf, buf_size);
   1042		desc_on_ring->datalen = cpu_to_le16(buf_size);
   1043
   1044		/* Update the address values in the desc with the pa value
   1045		 * for respective buffer
   1046		 */
   1047		desc_on_ring->params.generic.addr_high =
   1048			cpu_to_le32(upper_32_bits(dma_buf->pa));
   1049		desc_on_ring->params.generic.addr_low =
   1050			cpu_to_le32(lower_32_bits(dma_buf->pa));
   1051	}
   1052
   1053	/* Debug desc and buffer */
   1054	ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
   1055
   1056	ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
   1057
   1058	(cq->sq.next_to_use)++;
   1059	if (cq->sq.next_to_use == cq->sq.count)
   1060		cq->sq.next_to_use = 0;
   1061	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
   1062
   1063	do {
   1064		if (ice_sq_done(hw, cq))
   1065			break;
   1066
   1067		udelay(ICE_CTL_Q_SQ_CMD_USEC);
   1068		total_delay++;
   1069	} while (total_delay < cq->sq_cmd_timeout);
   1070
   1071	/* if ready, copy the desc back to temp */
   1072	if (ice_sq_done(hw, cq)) {
   1073		memcpy(desc, desc_on_ring, sizeof(*desc));
   1074		if (buf) {
   1075			/* get returned length to copy */
   1076			u16 copy_size = le16_to_cpu(desc->datalen);
   1077
   1078			if (copy_size > buf_size) {
   1079				ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
   1080					  copy_size, buf_size);
   1081				status = -EIO;
   1082			} else {
   1083				memcpy(buf, dma_buf->va, copy_size);
   1084			}
   1085		}
   1086		retval = le16_to_cpu(desc->retval);
   1087		if (retval) {
   1088			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
   1089				  le16_to_cpu(desc->opcode),
   1090				  retval);
   1091
   1092			/* strip off FW internal code */
   1093			retval &= 0xff;
   1094		}
   1095		cmd_completed = true;
   1096		if (!status && retval != ICE_AQ_RC_OK)
   1097			status = -EIO;
   1098		cq->sq_last_status = (enum ice_aq_err)retval;
   1099	}
   1100
   1101	ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
   1102
   1103	ice_debug_cq(hw, (void *)desc, buf, buf_size);
   1104
   1105	/* save writeback AQ if requested */
   1106	if (details->wb_desc)
   1107		memcpy(details->wb_desc, desc_on_ring,
   1108		       sizeof(*details->wb_desc));
   1109
   1110	/* update the error if time out occurred */
   1111	if (!cmd_completed) {
   1112		if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
   1113		    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
   1114			ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
   1115			status = -EIO;
   1116		} else {
   1117			ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
   1118			status = -EIO;
   1119		}
   1120	}
   1121
   1122sq_send_command_error:
   1123	mutex_unlock(&cq->sq_lock);
   1124	return status;
   1125}
   1126
   1127/**
   1128 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
   1129 * @desc: pointer to the temp descriptor (non DMA mem)
   1130 * @opcode: the opcode can be used to decide which flags to turn off or on
   1131 *
   1132 * Fill the desc with default values
   1133 */
   1134void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
   1135{
   1136	/* zero out the desc */
   1137	memset(desc, 0, sizeof(*desc));
   1138	desc->opcode = cpu_to_le16(opcode);
   1139	desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
   1140}
   1141
   1142/**
   1143 * ice_clean_rq_elem
   1144 * @hw: pointer to the HW struct
   1145 * @cq: pointer to the specific Control queue
   1146 * @e: event info from the receive descriptor, includes any buffers
   1147 * @pending: number of events that could be left to process
   1148 *
   1149 * This function cleans one Admin Receive Queue element and returns
   1150 * the contents through e. It can also return how many events are
   1151 * left to process through 'pending'.
   1152 */
   1153int
   1154ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
   1155		  struct ice_rq_event_info *e, u16 *pending)
   1156{
   1157	u16 ntc = cq->rq.next_to_clean;
   1158	enum ice_aq_err rq_last_status;
   1159	struct ice_aq_desc *desc;
   1160	struct ice_dma_mem *bi;
   1161	int ret_code = 0;
   1162	u16 desc_idx;
   1163	u16 datalen;
   1164	u16 flags;
   1165	u16 ntu;
   1166
   1167	/* pre-clean the event info */
   1168	memset(&e->desc, 0, sizeof(e->desc));
   1169
   1170	/* take the lock before we start messing with the ring */
   1171	mutex_lock(&cq->rq_lock);
   1172
   1173	if (!cq->rq.count) {
   1174		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
   1175		ret_code = -EIO;
   1176		goto clean_rq_elem_err;
   1177	}
   1178
   1179	/* set next_to_use to head */
   1180	ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
   1181
   1182	if (ntu == ntc) {
   1183		/* nothing to do - shouldn't need to update ring's values */
   1184		ret_code = -EALREADY;
   1185		goto clean_rq_elem_out;
   1186	}
   1187
   1188	/* now clean the next descriptor */
   1189	desc = ICE_CTL_Q_DESC(cq->rq, ntc);
   1190	desc_idx = ntc;
   1191
   1192	rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
   1193	flags = le16_to_cpu(desc->flags);
   1194	if (flags & ICE_AQ_FLAG_ERR) {
   1195		ret_code = -EIO;
   1196		ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
   1197			  le16_to_cpu(desc->opcode), rq_last_status);
   1198	}
   1199	memcpy(&e->desc, desc, sizeof(e->desc));
   1200	datalen = le16_to_cpu(desc->datalen);
   1201	e->msg_len = min_t(u16, datalen, e->buf_len);
   1202	if (e->msg_buf && e->msg_len)
   1203		memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
   1204
   1205	ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
   1206
   1207	ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
   1208
   1209	/* Restore the original datalen and buffer address in the desc,
   1210	 * FW updates datalen to indicate the event message size
   1211	 */
   1212	bi = &cq->rq.r.rq_bi[ntc];
   1213	memset(desc, 0, sizeof(*desc));
   1214
   1215	desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
   1216	if (cq->rq_buf_size > ICE_AQ_LG_BUF)
   1217		desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
   1218	desc->datalen = cpu_to_le16(bi->size);
   1219	desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
   1220	desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
   1221
   1222	/* set tail = the last cleaned desc index. */
   1223	wr32(hw, cq->rq.tail, ntc);
   1224	/* ntc is updated to tail + 1 */
   1225	ntc++;
   1226	if (ntc == cq->num_rq_entries)
   1227		ntc = 0;
   1228	cq->rq.next_to_clean = ntc;
   1229	cq->rq.next_to_use = ntu;
   1230
   1231clean_rq_elem_out:
   1232	/* Set pending if needed, unlock and return */
   1233	if (pending) {
   1234		/* re-read HW head to calculate actual pending messages */
   1235		ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
   1236		*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
   1237	}
   1238clean_rq_elem_err:
   1239	mutex_unlock(&cq->rq_lock);
   1240
   1241	return ret_code;
   1242}