cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

iavf_adminq.c (25529B)


      1// SPDX-License-Identifier: GPL-2.0
      2/* Copyright(c) 2013 - 2018 Intel Corporation. */
      3
      4#include "iavf_status.h"
      5#include "iavf_type.h"
      6#include "iavf_register.h"
      7#include "iavf_adminq.h"
      8#include "iavf_prototype.h"
      9
     10/**
     11 *  iavf_adminq_init_regs - Initialize AdminQ registers
     12 *  @hw: pointer to the hardware structure
     13 *
     14 *  This assumes the alloc_asq and alloc_arq functions have already been called
     15 **/
     16static void iavf_adminq_init_regs(struct iavf_hw *hw)
     17{
     18	/* set head and tail registers in our local struct */
     19	hw->aq.asq.tail = IAVF_VF_ATQT1;
     20	hw->aq.asq.head = IAVF_VF_ATQH1;
     21	hw->aq.asq.len  = IAVF_VF_ATQLEN1;
     22	hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
     23	hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
     24	hw->aq.arq.tail = IAVF_VF_ARQT1;
     25	hw->aq.arq.head = IAVF_VF_ARQH1;
     26	hw->aq.arq.len  = IAVF_VF_ARQLEN1;
     27	hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
     28	hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
     29}
     30
     31/**
     32 *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
     33 *  @hw: pointer to the hardware structure
     34 **/
     35static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
     36{
     37	enum iavf_status ret_code;
     38
     39	ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
     40					 iavf_mem_atq_ring,
     41					 (hw->aq.num_asq_entries *
     42					 sizeof(struct iavf_aq_desc)),
     43					 IAVF_ADMINQ_DESC_ALIGNMENT);
     44	if (ret_code)
     45		return ret_code;
     46
     47	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
     48					  (hw->aq.num_asq_entries *
     49					  sizeof(struct iavf_asq_cmd_details)));
     50	if (ret_code) {
     51		iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
     52		return ret_code;
     53	}
     54
     55	return ret_code;
     56}
     57
     58/**
     59 *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
     60 *  @hw: pointer to the hardware structure
     61 **/
     62static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
     63{
     64	enum iavf_status ret_code;
     65
     66	ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
     67					 iavf_mem_arq_ring,
     68					 (hw->aq.num_arq_entries *
     69					 sizeof(struct iavf_aq_desc)),
     70					 IAVF_ADMINQ_DESC_ALIGNMENT);
     71
     72	return ret_code;
     73}
     74
     75/**
     76 *  iavf_free_adminq_asq - Free Admin Queue send rings
     77 *  @hw: pointer to the hardware structure
     78 *
     79 *  This assumes the posted send buffers have already been cleaned
     80 *  and de-allocated
     81 **/
     82static void iavf_free_adminq_asq(struct iavf_hw *hw)
     83{
     84	iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
     85}
     86
     87/**
     88 *  iavf_free_adminq_arq - Free Admin Queue receive rings
     89 *  @hw: pointer to the hardware structure
     90 *
     91 *  This assumes the posted receive buffers have already been cleaned
     92 *  and de-allocated
     93 **/
     94static void iavf_free_adminq_arq(struct iavf_hw *hw)
     95{
     96	iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
     97}
     98
     99/**
    100 *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
    101 *  @hw: pointer to the hardware structure
    102 **/
    103static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
    104{
    105	struct iavf_aq_desc *desc;
    106	struct iavf_dma_mem *bi;
    107	enum iavf_status ret_code;
    108	int i;
    109
    110	/* We'll be allocating the buffer info memory first, then we can
    111	 * allocate the mapped buffers for the event processing
    112	 */
    113
    114	/* buffer_info structures do not need alignment */
    115	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
    116					  (hw->aq.num_arq_entries *
    117					   sizeof(struct iavf_dma_mem)));
    118	if (ret_code)
    119		goto alloc_arq_bufs;
    120	hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
    121
    122	/* allocate the mapped buffers */
    123	for (i = 0; i < hw->aq.num_arq_entries; i++) {
    124		bi = &hw->aq.arq.r.arq_bi[i];
    125		ret_code = iavf_allocate_dma_mem(hw, bi,
    126						 iavf_mem_arq_buf,
    127						 hw->aq.arq_buf_size,
    128						 IAVF_ADMINQ_DESC_ALIGNMENT);
    129		if (ret_code)
    130			goto unwind_alloc_arq_bufs;
    131
    132		/* now configure the descriptors for use */
    133		desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
    134
    135		desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
    136		if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
    137			desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
    138		desc->opcode = 0;
    139		/* This is in accordance with Admin queue design, there is no
    140		 * register for buffer size configuration
    141		 */
    142		desc->datalen = cpu_to_le16((u16)bi->size);
    143		desc->retval = 0;
    144		desc->cookie_high = 0;
    145		desc->cookie_low = 0;
    146		desc->params.external.addr_high =
    147			cpu_to_le32(upper_32_bits(bi->pa));
    148		desc->params.external.addr_low =
    149			cpu_to_le32(lower_32_bits(bi->pa));
    150		desc->params.external.param0 = 0;
    151		desc->params.external.param1 = 0;
    152	}
    153
    154alloc_arq_bufs:
    155	return ret_code;
    156
    157unwind_alloc_arq_bufs:
    158	/* don't try to free the one that failed... */
    159	i--;
    160	for (; i >= 0; i--)
    161		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
    162	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
    163
    164	return ret_code;
    165}
    166
    167/**
    168 *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
    169 *  @hw: pointer to the hardware structure
    170 **/
    171static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
    172{
    173	struct iavf_dma_mem *bi;
    174	enum iavf_status ret_code;
    175	int i;
    176
    177	/* No mapped memory needed yet, just the buffer info structures */
    178	ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
    179					  (hw->aq.num_asq_entries *
    180					   sizeof(struct iavf_dma_mem)));
    181	if (ret_code)
    182		goto alloc_asq_bufs;
    183	hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
    184
    185	/* allocate the mapped buffers */
    186	for (i = 0; i < hw->aq.num_asq_entries; i++) {
    187		bi = &hw->aq.asq.r.asq_bi[i];
    188		ret_code = iavf_allocate_dma_mem(hw, bi,
    189						 iavf_mem_asq_buf,
    190						 hw->aq.asq_buf_size,
    191						 IAVF_ADMINQ_DESC_ALIGNMENT);
    192		if (ret_code)
    193			goto unwind_alloc_asq_bufs;
    194	}
    195alloc_asq_bufs:
    196	return ret_code;
    197
    198unwind_alloc_asq_bufs:
    199	/* don't try to free the one that failed... */
    200	i--;
    201	for (; i >= 0; i--)
    202		iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
    203	iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
    204
    205	return ret_code;
    206}
    207
    208/**
    209 *  iavf_free_arq_bufs - Free receive queue buffer info elements
    210 *  @hw: pointer to the hardware structure
    211 **/
    212static void iavf_free_arq_bufs(struct iavf_hw *hw)
    213{
    214	int i;
    215
    216	/* free descriptors */
    217	for (i = 0; i < hw->aq.num_arq_entries; i++)
    218		iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
    219
    220	/* free the descriptor memory */
    221	iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
    222
    223	/* free the dma header */
    224	iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
    225}
    226
    227/**
    228 *  iavf_free_asq_bufs - Free send queue buffer info elements
    229 *  @hw: pointer to the hardware structure
    230 **/
    231static void iavf_free_asq_bufs(struct iavf_hw *hw)
    232{
    233	int i;
    234
    235	/* only unmap if the address is non-NULL */
    236	for (i = 0; i < hw->aq.num_asq_entries; i++)
    237		if (hw->aq.asq.r.asq_bi[i].pa)
    238			iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
    239
    240	/* free the buffer info list */
    241	iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
    242
    243	/* free the descriptor memory */
    244	iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
    245
    246	/* free the dma header */
    247	iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
    248}
    249
    250/**
    251 *  iavf_config_asq_regs - configure ASQ registers
    252 *  @hw: pointer to the hardware structure
    253 *
    254 *  Configure base address and length registers for the transmit queue
    255 **/
    256static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
    257{
    258	enum iavf_status ret_code = 0;
    259	u32 reg = 0;
    260
    261	/* Clear Head and Tail */
    262	wr32(hw, hw->aq.asq.head, 0);
    263	wr32(hw, hw->aq.asq.tail, 0);
    264
    265	/* set starting point */
    266	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
    267				  IAVF_VF_ATQLEN1_ATQENABLE_MASK));
    268	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
    269	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
    270
    271	/* Check one register to verify that config was applied */
    272	reg = rd32(hw, hw->aq.asq.bal);
    273	if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
    274		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
    275
    276	return ret_code;
    277}
    278
    279/**
    280 *  iavf_config_arq_regs - ARQ register configuration
    281 *  @hw: pointer to the hardware structure
    282 *
    283 * Configure base address and length registers for the receive (event queue)
    284 **/
    285static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
    286{
    287	enum iavf_status ret_code = 0;
    288	u32 reg = 0;
    289
    290	/* Clear Head and Tail */
    291	wr32(hw, hw->aq.arq.head, 0);
    292	wr32(hw, hw->aq.arq.tail, 0);
    293
    294	/* set starting point */
    295	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
    296				  IAVF_VF_ARQLEN1_ARQENABLE_MASK));
    297	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
    298	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
    299
    300	/* Update tail in the HW to post pre-allocated buffers */
    301	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
    302
    303	/* Check one register to verify that config was applied */
    304	reg = rd32(hw, hw->aq.arq.bal);
    305	if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
    306		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
    307
    308	return ret_code;
    309}
    310
    311/**
    312 *  iavf_init_asq - main initialization routine for ASQ
    313 *  @hw: pointer to the hardware structure
    314 *
    315 *  This is the main initialization routine for the Admin Send Queue
    316 *  Prior to calling this function, drivers *MUST* set the following fields
    317 *  in the hw->aq structure:
    318 *     - hw->aq.num_asq_entries
    319 *     - hw->aq.arq_buf_size
    320 *
    321 *  Do *NOT* hold the lock when calling this as the memory allocation routines
    322 *  called are not going to be atomic context safe
    323 **/
    324static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
    325{
    326	enum iavf_status ret_code = 0;
    327
    328	if (hw->aq.asq.count > 0) {
    329		/* queue already initialized */
    330		ret_code = IAVF_ERR_NOT_READY;
    331		goto init_adminq_exit;
    332	}
    333
    334	/* verify input for valid configuration */
    335	if ((hw->aq.num_asq_entries == 0) ||
    336	    (hw->aq.asq_buf_size == 0)) {
    337		ret_code = IAVF_ERR_CONFIG;
    338		goto init_adminq_exit;
    339	}
    340
    341	hw->aq.asq.next_to_use = 0;
    342	hw->aq.asq.next_to_clean = 0;
    343
    344	/* allocate the ring memory */
    345	ret_code = iavf_alloc_adminq_asq_ring(hw);
    346	if (ret_code)
    347		goto init_adminq_exit;
    348
    349	/* allocate buffers in the rings */
    350	ret_code = iavf_alloc_asq_bufs(hw);
    351	if (ret_code)
    352		goto init_adminq_free_rings;
    353
    354	/* initialize base registers */
    355	ret_code = iavf_config_asq_regs(hw);
    356	if (ret_code)
    357		goto init_adminq_free_rings;
    358
    359	/* success! */
    360	hw->aq.asq.count = hw->aq.num_asq_entries;
    361	goto init_adminq_exit;
    362
    363init_adminq_free_rings:
    364	iavf_free_adminq_asq(hw);
    365
    366init_adminq_exit:
    367	return ret_code;
    368}
    369
    370/**
    371 *  iavf_init_arq - initialize ARQ
    372 *  @hw: pointer to the hardware structure
    373 *
    374 *  The main initialization routine for the Admin Receive (Event) Queue.
    375 *  Prior to calling this function, drivers *MUST* set the following fields
    376 *  in the hw->aq structure:
    377 *     - hw->aq.num_asq_entries
    378 *     - hw->aq.arq_buf_size
    379 *
    380 *  Do *NOT* hold the lock when calling this as the memory allocation routines
    381 *  called are not going to be atomic context safe
    382 **/
    383static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
    384{
    385	enum iavf_status ret_code = 0;
    386
    387	if (hw->aq.arq.count > 0) {
    388		/* queue already initialized */
    389		ret_code = IAVF_ERR_NOT_READY;
    390		goto init_adminq_exit;
    391	}
    392
    393	/* verify input for valid configuration */
    394	if ((hw->aq.num_arq_entries == 0) ||
    395	    (hw->aq.arq_buf_size == 0)) {
    396		ret_code = IAVF_ERR_CONFIG;
    397		goto init_adminq_exit;
    398	}
    399
    400	hw->aq.arq.next_to_use = 0;
    401	hw->aq.arq.next_to_clean = 0;
    402
    403	/* allocate the ring memory */
    404	ret_code = iavf_alloc_adminq_arq_ring(hw);
    405	if (ret_code)
    406		goto init_adminq_exit;
    407
    408	/* allocate buffers in the rings */
    409	ret_code = iavf_alloc_arq_bufs(hw);
    410	if (ret_code)
    411		goto init_adminq_free_rings;
    412
    413	/* initialize base registers */
    414	ret_code = iavf_config_arq_regs(hw);
    415	if (ret_code)
    416		goto init_adminq_free_rings;
    417
    418	/* success! */
    419	hw->aq.arq.count = hw->aq.num_arq_entries;
    420	goto init_adminq_exit;
    421
    422init_adminq_free_rings:
    423	iavf_free_adminq_arq(hw);
    424
    425init_adminq_exit:
    426	return ret_code;
    427}
    428
    429/**
    430 *  iavf_shutdown_asq - shutdown the ASQ
    431 *  @hw: pointer to the hardware structure
    432 *
    433 *  The main shutdown routine for the Admin Send Queue
    434 **/
    435static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
    436{
    437	enum iavf_status ret_code = 0;
    438
    439	mutex_lock(&hw->aq.asq_mutex);
    440
    441	if (hw->aq.asq.count == 0) {
    442		ret_code = IAVF_ERR_NOT_READY;
    443		goto shutdown_asq_out;
    444	}
    445
    446	/* Stop firmware AdminQ processing */
    447	wr32(hw, hw->aq.asq.head, 0);
    448	wr32(hw, hw->aq.asq.tail, 0);
    449	wr32(hw, hw->aq.asq.len, 0);
    450	wr32(hw, hw->aq.asq.bal, 0);
    451	wr32(hw, hw->aq.asq.bah, 0);
    452
    453	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
    454
    455	/* free ring buffers */
    456	iavf_free_asq_bufs(hw);
    457
    458shutdown_asq_out:
    459	mutex_unlock(&hw->aq.asq_mutex);
    460	return ret_code;
    461}
    462
    463/**
    464 *  iavf_shutdown_arq - shutdown ARQ
    465 *  @hw: pointer to the hardware structure
    466 *
    467 *  The main shutdown routine for the Admin Receive Queue
    468 **/
    469static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
    470{
    471	enum iavf_status ret_code = 0;
    472
    473	mutex_lock(&hw->aq.arq_mutex);
    474
    475	if (hw->aq.arq.count == 0) {
    476		ret_code = IAVF_ERR_NOT_READY;
    477		goto shutdown_arq_out;
    478	}
    479
    480	/* Stop firmware AdminQ processing */
    481	wr32(hw, hw->aq.arq.head, 0);
    482	wr32(hw, hw->aq.arq.tail, 0);
    483	wr32(hw, hw->aq.arq.len, 0);
    484	wr32(hw, hw->aq.arq.bal, 0);
    485	wr32(hw, hw->aq.arq.bah, 0);
    486
    487	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
    488
    489	/* free ring buffers */
    490	iavf_free_arq_bufs(hw);
    491
    492shutdown_arq_out:
    493	mutex_unlock(&hw->aq.arq_mutex);
    494	return ret_code;
    495}
    496
    497/**
    498 *  iavf_init_adminq - main initialization routine for Admin Queue
    499 *  @hw: pointer to the hardware structure
    500 *
    501 *  Prior to calling this function, drivers *MUST* set the following fields
    502 *  in the hw->aq structure:
    503 *     - hw->aq.num_asq_entries
    504 *     - hw->aq.num_arq_entries
    505 *     - hw->aq.arq_buf_size
    506 *     - hw->aq.asq_buf_size
    507 **/
    508enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
    509{
    510	enum iavf_status ret_code;
    511
    512	/* verify input for valid configuration */
    513	if ((hw->aq.num_arq_entries == 0) ||
    514	    (hw->aq.num_asq_entries == 0) ||
    515	    (hw->aq.arq_buf_size == 0) ||
    516	    (hw->aq.asq_buf_size == 0)) {
    517		ret_code = IAVF_ERR_CONFIG;
    518		goto init_adminq_exit;
    519	}
    520
    521	/* Set up register offsets */
    522	iavf_adminq_init_regs(hw);
    523
    524	/* setup ASQ command write back timeout */
    525	hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
    526
    527	/* allocate the ASQ */
    528	ret_code = iavf_init_asq(hw);
    529	if (ret_code)
    530		goto init_adminq_destroy_locks;
    531
    532	/* allocate the ARQ */
    533	ret_code = iavf_init_arq(hw);
    534	if (ret_code)
    535		goto init_adminq_free_asq;
    536
    537	/* success! */
    538	goto init_adminq_exit;
    539
    540init_adminq_free_asq:
    541	iavf_shutdown_asq(hw);
    542init_adminq_destroy_locks:
    543
    544init_adminq_exit:
    545	return ret_code;
    546}
    547
    548/**
    549 *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
    550 *  @hw: pointer to the hardware structure
    551 **/
    552enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
    553{
    554	if (iavf_check_asq_alive(hw))
    555		iavf_aq_queue_shutdown(hw, true);
    556
    557	iavf_shutdown_asq(hw);
    558	iavf_shutdown_arq(hw);
    559
    560	return 0;
    561}
    562
    563/**
    564 *  iavf_clean_asq - cleans Admin send queue
    565 *  @hw: pointer to the hardware structure
    566 *
    567 *  returns the number of free desc
    568 **/
    569static u16 iavf_clean_asq(struct iavf_hw *hw)
    570{
    571	struct iavf_adminq_ring *asq = &hw->aq.asq;
    572	struct iavf_asq_cmd_details *details;
    573	u16 ntc = asq->next_to_clean;
    574	struct iavf_aq_desc desc_cb;
    575	struct iavf_aq_desc *desc;
    576
    577	desc = IAVF_ADMINQ_DESC(*asq, ntc);
    578	details = IAVF_ADMINQ_DETAILS(*asq, ntc);
    579	while (rd32(hw, hw->aq.asq.head) != ntc) {
    580		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
    581			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
    582
    583		if (details->callback) {
    584			IAVF_ADMINQ_CALLBACK cb_func =
    585					(IAVF_ADMINQ_CALLBACK)details->callback;
    586			desc_cb = *desc;
    587			cb_func(hw, &desc_cb);
    588		}
    589		memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
    590		memset((void *)details, 0,
    591		       sizeof(struct iavf_asq_cmd_details));
    592		ntc++;
    593		if (ntc == asq->count)
    594			ntc = 0;
    595		desc = IAVF_ADMINQ_DESC(*asq, ntc);
    596		details = IAVF_ADMINQ_DETAILS(*asq, ntc);
    597	}
    598
    599	asq->next_to_clean = ntc;
    600
    601	return IAVF_DESC_UNUSED(asq);
    602}
    603
    604/**
    605 *  iavf_asq_done - check if FW has processed the Admin Send Queue
    606 *  @hw: pointer to the hw struct
    607 *
    608 *  Returns true if the firmware has processed all descriptors on the
    609 *  admin send queue. Returns false if there are still requests pending.
    610 **/
    611bool iavf_asq_done(struct iavf_hw *hw)
    612{
    613	/* AQ designers suggest use of head for better
    614	 * timing reliability than DD bit
    615	 */
    616	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
    617}
    618
    619/**
    620 *  iavf_asq_send_command - send command to Admin Queue
    621 *  @hw: pointer to the hw struct
    622 *  @desc: prefilled descriptor describing the command (non DMA mem)
    623 *  @buff: buffer to use for indirect commands
    624 *  @buff_size: size of buffer for indirect commands
    625 *  @cmd_details: pointer to command details structure
    626 *
    627 *  This is the main send command driver routine for the Admin Queue send
    628 *  queue.  It runs the queue, cleans the queue, etc
    629 **/
    630enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
    631				       struct iavf_aq_desc *desc,
    632				       void *buff, /* can be NULL */
    633				       u16  buff_size,
    634				       struct iavf_asq_cmd_details *cmd_details)
    635{
    636	struct iavf_dma_mem *dma_buff = NULL;
    637	struct iavf_asq_cmd_details *details;
    638	struct iavf_aq_desc *desc_on_ring;
    639	bool cmd_completed = false;
    640	enum iavf_status status = 0;
    641	u16  retval = 0;
    642	u32  val = 0;
    643
    644	mutex_lock(&hw->aq.asq_mutex);
    645
    646	if (hw->aq.asq.count == 0) {
    647		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
    648			   "AQTX: Admin queue not initialized.\n");
    649		status = IAVF_ERR_QUEUE_EMPTY;
    650		goto asq_send_command_error;
    651	}
    652
    653	hw->aq.asq_last_status = IAVF_AQ_RC_OK;
    654
    655	val = rd32(hw, hw->aq.asq.head);
    656	if (val >= hw->aq.num_asq_entries) {
    657		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
    658			   "AQTX: head overrun at %d\n", val);
    659		status = IAVF_ERR_QUEUE_EMPTY;
    660		goto asq_send_command_error;
    661	}
    662
    663	details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
    664	if (cmd_details) {
    665		*details = *cmd_details;
    666
    667		/* If the cmd_details are defined copy the cookie.  The
    668		 * cpu_to_le32 is not needed here because the data is ignored
    669		 * by the FW, only used by the driver
    670		 */
    671		if (details->cookie) {
    672			desc->cookie_high =
    673				cpu_to_le32(upper_32_bits(details->cookie));
    674			desc->cookie_low =
    675				cpu_to_le32(lower_32_bits(details->cookie));
    676		}
    677	} else {
    678		memset(details, 0, sizeof(struct iavf_asq_cmd_details));
    679	}
    680
    681	/* clear requested flags and then set additional flags if defined */
    682	desc->flags &= ~cpu_to_le16(details->flags_dis);
    683	desc->flags |= cpu_to_le16(details->flags_ena);
    684
    685	if (buff_size > hw->aq.asq_buf_size) {
    686		iavf_debug(hw,
    687			   IAVF_DEBUG_AQ_MESSAGE,
    688			   "AQTX: Invalid buffer size: %d.\n",
    689			   buff_size);
    690		status = IAVF_ERR_INVALID_SIZE;
    691		goto asq_send_command_error;
    692	}
    693
    694	if (details->postpone && !details->async) {
    695		iavf_debug(hw,
    696			   IAVF_DEBUG_AQ_MESSAGE,
    697			   "AQTX: Async flag not set along with postpone flag");
    698		status = IAVF_ERR_PARAM;
    699		goto asq_send_command_error;
    700	}
    701
    702	/* call clean and check queue available function to reclaim the
    703	 * descriptors that were processed by FW, the function returns the
    704	 * number of desc available
    705	 */
    706	/* the clean function called here could be called in a separate thread
    707	 * in case of asynchronous completions
    708	 */
    709	if (iavf_clean_asq(hw) == 0) {
    710		iavf_debug(hw,
    711			   IAVF_DEBUG_AQ_MESSAGE,
    712			   "AQTX: Error queue is full.\n");
    713		status = IAVF_ERR_ADMIN_QUEUE_FULL;
    714		goto asq_send_command_error;
    715	}
    716
    717	/* initialize the temp desc pointer with the right desc */
    718	desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
    719
    720	/* if the desc is available copy the temp desc to the right place */
    721	*desc_on_ring = *desc;
    722
    723	/* if buff is not NULL assume indirect command */
    724	if (buff) {
    725		dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
    726		/* copy the user buff into the respective DMA buff */
    727		memcpy(dma_buff->va, buff, buff_size);
    728		desc_on_ring->datalen = cpu_to_le16(buff_size);
    729
    730		/* Update the address values in the desc with the pa value
    731		 * for respective buffer
    732		 */
    733		desc_on_ring->params.external.addr_high =
    734				cpu_to_le32(upper_32_bits(dma_buff->pa));
    735		desc_on_ring->params.external.addr_low =
    736				cpu_to_le32(lower_32_bits(dma_buff->pa));
    737	}
    738
    739	/* bump the tail */
    740	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
    741	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
    742		      buff, buff_size);
    743	(hw->aq.asq.next_to_use)++;
    744	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
    745		hw->aq.asq.next_to_use = 0;
    746	if (!details->postpone)
    747		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
    748
    749	/* if cmd_details are not defined or async flag is not set,
    750	 * we need to wait for desc write back
    751	 */
    752	if (!details->async && !details->postpone) {
    753		u32 total_delay = 0;
    754
    755		do {
    756			/* AQ designers suggest use of head for better
    757			 * timing reliability than DD bit
    758			 */
    759			if (iavf_asq_done(hw))
    760				break;
    761			udelay(50);
    762			total_delay += 50;
    763		} while (total_delay < hw->aq.asq_cmd_timeout);
    764	}
    765
    766	/* if ready, copy the desc back to temp */
    767	if (iavf_asq_done(hw)) {
    768		*desc = *desc_on_ring;
    769		if (buff)
    770			memcpy(buff, dma_buff->va, buff_size);
    771		retval = le16_to_cpu(desc->retval);
    772		if (retval != 0) {
    773			iavf_debug(hw,
    774				   IAVF_DEBUG_AQ_MESSAGE,
    775				   "AQTX: Command completed with error 0x%X.\n",
    776				   retval);
    777
    778			/* strip off FW internal code */
    779			retval &= 0xff;
    780		}
    781		cmd_completed = true;
    782		if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
    783			status = 0;
    784		else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
    785			status = IAVF_ERR_NOT_READY;
    786		else
    787			status = IAVF_ERR_ADMIN_QUEUE_ERROR;
    788		hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
    789	}
    790
    791	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
    792		   "AQTX: desc and buffer writeback:\n");
    793	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
    794
    795	/* save writeback aq if requested */
    796	if (details->wb_desc)
    797		*details->wb_desc = *desc_on_ring;
    798
    799	/* update the error if time out occurred */
    800	if ((!cmd_completed) &&
    801	    (!details->async && !details->postpone)) {
    802		if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
    803			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
    804				   "AQTX: AQ Critical error.\n");
    805			status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
    806		} else {
    807			iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
    808				   "AQTX: Writeback timeout.\n");
    809			status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
    810		}
    811	}
    812
    813asq_send_command_error:
    814	mutex_unlock(&hw->aq.asq_mutex);
    815	return status;
    816}
    817
    818/**
    819 *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
    820 *  @desc:     pointer to the temp descriptor (non DMA mem)
    821 *  @opcode:   the opcode can be used to decide which flags to turn off or on
    822 *
    823 *  Fill the desc with default values
    824 **/
    825void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
    826{
    827	/* zero out the desc */
    828	memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
    829	desc->opcode = cpu_to_le16(opcode);
    830	desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
    831}
    832
    833/**
    834 *  iavf_clean_arq_element
    835 *  @hw: pointer to the hw struct
    836 *  @e: event info from the receive descriptor, includes any buffers
    837 *  @pending: number of events that could be left to process
    838 *
    839 *  This function cleans one Admin Receive Queue element and returns
    840 *  the contents through e.  It can also return how many events are
    841 *  left to process through 'pending'
    842 **/
    843enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
    844					struct iavf_arq_event_info *e,
    845					u16 *pending)
    846{
    847	u16 ntc = hw->aq.arq.next_to_clean;
    848	struct iavf_aq_desc *desc;
    849	enum iavf_status ret_code = 0;
    850	struct iavf_dma_mem *bi;
    851	u16 desc_idx;
    852	u16 datalen;
    853	u16 flags;
    854	u16 ntu;
    855
    856	/* pre-clean the event info */
    857	memset(&e->desc, 0, sizeof(e->desc));
    858
    859	/* take the lock before we start messing with the ring */
    860	mutex_lock(&hw->aq.arq_mutex);
    861
    862	if (hw->aq.arq.count == 0) {
    863		iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
    864			   "AQRX: Admin queue not initialized.\n");
    865		ret_code = IAVF_ERR_QUEUE_EMPTY;
    866		goto clean_arq_element_err;
    867	}
    868
    869	/* set next_to_use to head */
    870	ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
    871	if (ntu == ntc) {
    872		/* nothing to do - shouldn't need to update ring's values */
    873		ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
    874		goto clean_arq_element_out;
    875	}
    876
    877	/* now clean the next descriptor */
    878	desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
    879	desc_idx = ntc;
    880
    881	hw->aq.arq_last_status =
    882		(enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
    883	flags = le16_to_cpu(desc->flags);
    884	if (flags & IAVF_AQ_FLAG_ERR) {
    885		ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
    886		iavf_debug(hw,
    887			   IAVF_DEBUG_AQ_MESSAGE,
    888			   "AQRX: Event received with error 0x%X.\n",
    889			   hw->aq.arq_last_status);
    890	}
    891
    892	e->desc = *desc;
    893	datalen = le16_to_cpu(desc->datalen);
    894	e->msg_len = min(datalen, e->buf_len);
    895	if (e->msg_buf && (e->msg_len != 0))
    896		memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
    897		       e->msg_len);
    898
    899	iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
    900	iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
    901		      hw->aq.arq_buf_size);
    902
    903	/* Restore the original datalen and buffer address in the desc,
    904	 * FW updates datalen to indicate the event message
    905	 * size
    906	 */
    907	bi = &hw->aq.arq.r.arq_bi[ntc];
    908	memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
    909
    910	desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
    911	if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
    912		desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
    913	desc->datalen = cpu_to_le16((u16)bi->size);
    914	desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
    915	desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
    916
    917	/* set tail = the last cleaned desc index. */
    918	wr32(hw, hw->aq.arq.tail, ntc);
    919	/* ntc is updated to tail + 1 */
    920	ntc++;
    921	if (ntc == hw->aq.num_arq_entries)
    922		ntc = 0;
    923	hw->aq.arq.next_to_clean = ntc;
    924	hw->aq.arq.next_to_use = ntu;
    925
    926clean_arq_element_out:
    927	/* Set pending if needed, unlock and return */
    928	if (pending)
    929		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
    930
    931clean_arq_element_err:
    932	mutex_unlock(&hw->aq.arq_mutex);
    933
    934	return ret_code;
    935}