cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

bnxt_hwrm.c (26536B)


      1/* Broadcom NetXtreme-C/E network driver.
      2 *
      3 * Copyright (c) 2020 Broadcom Limited
      4 *
      5 * This program is free software; you can redistribute it and/or modify
      6 * it under the terms of the GNU General Public License as published by
      7 * the Free Software Foundation.
      8 */
      9
     10#include <asm/byteorder.h>
     11#include <linux/dma-mapping.h>
     12#include <linux/dmapool.h>
     13#include <linux/errno.h>
     14#include <linux/ethtool.h>
     15#include <linux/if_ether.h>
     16#include <linux/io.h>
     17#include <linux/irq.h>
     18#include <linux/kernel.h>
     19#include <linux/list.h>
     20#include <linux/netdevice.h>
     21#include <linux/pci.h>
     22#include <linux/skbuff.h>
     23
     24#include "bnxt_hsi.h"
     25#include "bnxt.h"
     26#include "bnxt_hwrm.h"
     27
     28static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type)
     29{
     30	return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL;
     31}
     32
     33/**
     34 * __hwrm_req_init() - Initialize an HWRM request.
     35 * @bp: The driver context.
     36 * @req: A pointer to the request pointer to initialize.
     37 * @req_type: The request type. This will be converted to the little endian
     38 *	before being written to the req_type field of the returned request.
     39 * @req_len: The length of the request to be allocated.
     40 *
     41 * Allocate DMA resources and initialize a new HWRM request object of the
     42 * given type. The response address field in the request is configured with
     43 * the DMA bus address that has been mapped for the response and the passed
     44 * request is pointed to kernel virtual memory mapped for the request (such
     45 * that short_input indirection can be accomplished without copying). The
     46 * request’s target and completion ring are initialized to default values and
     47 * can be overridden by writing to the returned request object directly.
     48 *
     49 * The initialized request can be further customized by writing to its fields
     50 * directly, taking care to covert such fields to little endian. The request
     51 * object will be consumed (and all its associated resources release) upon
     52 * passing it to hwrm_req_send() unless ownership of the request has been
     53 * claimed by the caller via a call to hwrm_req_hold(). If the request is not
     54 * consumed, either because it is never sent or because ownership has been
     55 * claimed, then it must be released by a call to hwrm_req_drop().
     56 *
     57 * Return: zero on success, negative error code otherwise:
     58 *	E2BIG: the type of request pointer is too large to fit.
     59 *	ENOMEM: an allocation failure occurred.
     60 */
     61int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len)
     62{
     63	struct bnxt_hwrm_ctx *ctx;
     64	dma_addr_t dma_handle;
     65	u8 *req_addr;
     66
     67	if (req_len > BNXT_HWRM_CTX_OFFSET)
     68		return -E2BIG;
     69
     70	req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO,
     71				  &dma_handle);
     72	if (!req_addr)
     73		return -ENOMEM;
     74
     75	ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET);
     76	/* safety first, sentinel used to check for invalid requests */
     77	ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
     78	ctx->req_len = req_len;
     79	ctx->req = (struct input *)req_addr;
     80	ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET);
     81	ctx->dma_handle = dma_handle;
     82	ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */
     83	ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT;
     84	ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
     85	ctx->gfp = GFP_KERNEL;
     86	ctx->slice_addr = NULL;
     87
     88	/* initialize common request fields */
     89	ctx->req->req_type = cpu_to_le16(req_type);
     90	ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET);
     91	ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING);
     92	ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET);
     93	*req = ctx->req;
     94
     95	return 0;
     96}
     97
     98static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr)
     99{
    100	void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET;
    101	struct input *req = (struct input *)req_addr;
    102	struct bnxt_hwrm_ctx *ctx = ctx_addr;
    103	u64 sentinel;
    104
    105	if (!req) {
    106		/* can only be due to software bug, be loud */
    107		netdev_err(bp->dev, "null HWRM request");
    108		dump_stack();
    109		return NULL;
    110	}
    111
    112	/* HWRM API has no type safety, verify sentinel to validate address */
    113	sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type));
    114	if (ctx->sentinel != sentinel) {
    115		/* can only be due to software bug, be loud */
    116		netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n",
    117			   (u32)le16_to_cpu(req->req_type));
    118		dump_stack();
    119		return NULL;
    120	}
    121
    122	return ctx;
    123}
    124
    125/**
    126 * hwrm_req_timeout() - Set the completion timeout for the request.
    127 * @bp: The driver context.
    128 * @req: The request to set the timeout.
    129 * @timeout: The timeout in milliseconds.
    130 *
    131 * Set the timeout associated with the request for subsequent calls to
    132 * hwrm_req_send(). Some requests are long running and require a different
    133 * timeout than the default.
    134 */
    135void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout)
    136{
    137	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    138
    139	if (ctx)
    140		ctx->timeout = timeout;
    141}
    142
    143/**
    144 * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices.
    145 * @bp: The driver context.
    146 * @req: The request for which calls to hwrm_req_dma_slice() will have altered
    147 *	allocation flags.
    148 * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent()
    149 *	whenever it is used to allocate backing memory for slices. Note that
    150 *	calls to hwrm_req_dma_slice() will not always result in new allocations,
    151 *	however, memory suballocated from the request buffer is already
    152 *	__GFP_ZERO.
    153 *
    154 * Sets the GFP allocation flags associated with the request for subsequent
    155 * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO
    156 * for slice allocations.
    157 */
    158void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp)
    159{
    160	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    161
    162	if (ctx)
    163		ctx->gfp = gfp;
    164}
    165
    166/**
    167 * hwrm_req_replace() - Replace request data.
    168 * @bp: The driver context.
    169 * @req: The request to modify. A call to hwrm_req_replace() is conceptually
    170 *	an assignment of new_req to req. Subsequent calls to HWRM API functions,
    171 *	such as hwrm_req_send(), should thus use req and not new_req (in fact,
    172 *	calls to HWRM API functions will fail if non-managed request objects
    173 *	are passed).
    174 * @len: The length of new_req.
    175 * @new_req: The pre-built request to copy or reference.
    176 *
    177 * Replaces the request data in req with that of new_req. This is useful in
    178 * scenarios where a request object has already been constructed by a third
    179 * party prior to creating a resource managed request using hwrm_req_init().
    180 * Depending on the length, hwrm_req_replace() will either copy the new
    181 * request data into the DMA memory allocated for req, or it will simply
    182 * reference the new request and use it in lieu of req during subsequent
    183 * calls to hwrm_req_send(). The resource management is associated with
    184 * req and is independent of and does not apply to new_req. The caller must
    185 * ensure that the lifetime of new_req is least as long as req. Any slices
    186 * that may have been associated with the original request are released.
    187 *
    188 * Return: zero on success, negative error code otherwise:
    189 *     E2BIG: Request is too large.
    190 *     EINVAL: Invalid request to modify.
    191 */
    192int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len)
    193{
    194	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    195	struct input *internal_req = req;
    196	u16 req_type;
    197
    198	if (!ctx)
    199		return -EINVAL;
    200
    201	if (len > BNXT_HWRM_CTX_OFFSET)
    202		return -E2BIG;
    203
    204	/* free any existing slices */
    205	ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET;
    206	if (ctx->slice_addr) {
    207		dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
    208				  ctx->slice_addr, ctx->slice_handle);
    209		ctx->slice_addr = NULL;
    210	}
    211	ctx->gfp = GFP_KERNEL;
    212
    213	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) {
    214		memcpy(internal_req, new_req, len);
    215	} else {
    216		internal_req->req_type = ((struct input *)new_req)->req_type;
    217		ctx->req = new_req;
    218	}
    219
    220	ctx->req_len = len;
    221	ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle +
    222					  BNXT_HWRM_RESP_OFFSET);
    223
    224	/* update sentinel for potentially new request type */
    225	req_type = le16_to_cpu(internal_req->req_type);
    226	ctx->sentinel = hwrm_calc_sentinel(ctx, req_type);
    227
    228	return 0;
    229}
    230
    231/**
    232 * hwrm_req_flags() - Set non internal flags of the ctx
    233 * @bp: The driver context.
    234 * @req: The request containing the HWRM command
    235 * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set
    236 *
    237 * ctx flags can be used by the callers to instruct how the subsequent
    238 * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags
    239 * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send()
    240 * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout
    241 * even if FW is not responding.
    242 * This generic function can be used to set any flag that is not an internal flag
    243 * of the HWRM module.
    244 */
    245void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags)
    246{
    247	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    248
    249	if (ctx)
    250		ctx->flags |= (flags & HWRM_API_FLAGS);
    251}
    252
    253/**
    254 * hwrm_req_hold() - Claim ownership of the request's resources.
    255 * @bp: The driver context.
    256 * @req: A pointer to the request to own. The request will no longer be
    257 *	consumed by calls to hwrm_req_send().
    258 *
    259 * Take ownership of the request. Ownership places responsibility on the
    260 * caller to free the resources associated with the request via a call to
    261 * hwrm_req_drop(). The caller taking ownership implies that a subsequent
    262 * call to hwrm_req_send() will not consume the request (ie. sending will
    263 * not free the associated resources if the request is owned by the caller).
    264 * Taking ownership returns a reference to the response. Retaining and
    265 * accessing the response data is the most common reason to take ownership
    266 * of the request. Ownership can also be acquired in order to reuse the same
    267 * request object across multiple invocations of hwrm_req_send().
    268 *
    269 * Return: A pointer to the response object.
    270 *
    271 * The resources associated with the response will remain available to the
    272 * caller until ownership of the request is relinquished via a call to
    273 * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if
    274 * a valid request is provided. A returned NULL value would imply a driver
    275 * bug and the implementation will complain loudly in the logs to aid in
    276 * detection. It should not be necessary to check the result for NULL.
    277 */
    278void *hwrm_req_hold(struct bnxt *bp, void *req)
    279{
    280	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    281	struct input *input = (struct input *)req;
    282
    283	if (!ctx)
    284		return NULL;
    285
    286	if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) {
    287		/* can only be due to software bug, be loud */
    288		netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n",
    289			   (u32)le16_to_cpu(input->req_type));
    290		dump_stack();
    291		return NULL;
    292	}
    293
    294	ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED;
    295	return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET;
    296}
    297
    298static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
    299{
    300	void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET;
    301	dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */
    302
    303	/* unmap any auxiliary DMA slice */
    304	if (ctx->slice_addr)
    305		dma_free_coherent(&bp->pdev->dev, ctx->slice_size,
    306				  ctx->slice_addr, ctx->slice_handle);
    307
    308	/* invalidate, ensure ownership, sentinel and dma_handle are cleared */
    309	memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx));
    310
    311	/* return the buffer to the DMA pool */
    312	if (dma_handle)
    313		dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle);
    314}
    315
    316/**
    317 * hwrm_req_drop() - Release all resources associated with the request.
    318 * @bp: The driver context.
    319 * @req: The request to consume, releasing the associated resources. The
    320 *	request object, any slices, and its associated response are no
    321 *	longer valid.
    322 *
    323 * It is legal to call hwrm_req_drop() on an unowned request, provided it
    324 * has not already been consumed by hwrm_req_send() (for example, to release
    325 * an aborted request). A given request should not be dropped more than once,
    326 * nor should it be dropped after having been consumed by hwrm_req_send(). To
    327 * do so is an error (the context will not be found and a stack trace will be
    328 * rendered in the kernel log).
    329 */
    330void hwrm_req_drop(struct bnxt *bp, void *req)
    331{
    332	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    333
    334	if (ctx)
    335		__hwrm_ctx_drop(bp, ctx);
    336}
    337
    338static int __hwrm_to_stderr(u32 hwrm_err)
    339{
    340	switch (hwrm_err) {
    341	case HWRM_ERR_CODE_SUCCESS:
    342		return 0;
    343	case HWRM_ERR_CODE_RESOURCE_LOCKED:
    344		return -EROFS;
    345	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
    346		return -EACCES;
    347	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
    348		return -ENOSPC;
    349	case HWRM_ERR_CODE_INVALID_PARAMS:
    350	case HWRM_ERR_CODE_INVALID_FLAGS:
    351	case HWRM_ERR_CODE_INVALID_ENABLES:
    352	case HWRM_ERR_CODE_UNSUPPORTED_TLV:
    353	case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
    354		return -EINVAL;
    355	case HWRM_ERR_CODE_NO_BUFFER:
    356		return -ENOMEM;
    357	case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
    358	case HWRM_ERR_CODE_BUSY:
    359		return -EAGAIN;
    360	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
    361		return -EOPNOTSUPP;
    362	case HWRM_ERR_CODE_PF_UNAVAILABLE:
    363		return -ENODEV;
    364	default:
    365		return -EIO;
    366	}
    367}
    368
    369static struct bnxt_hwrm_wait_token *
    370__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst)
    371{
    372	struct bnxt_hwrm_wait_token *token;
    373
    374	token = kzalloc(sizeof(*token), GFP_KERNEL);
    375	if (!token)
    376		return NULL;
    377
    378	mutex_lock(&bp->hwrm_cmd_lock);
    379
    380	token->dst = dst;
    381	token->state = BNXT_HWRM_PENDING;
    382	if (dst == BNXT_HWRM_CHNL_CHIMP) {
    383		token->seq_id = bp->hwrm_cmd_seq++;
    384		hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list);
    385	} else {
    386		token->seq_id = bp->hwrm_cmd_kong_seq++;
    387	}
    388
    389	return token;
    390}
    391
    392static void
    393__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token)
    394{
    395	if (token->dst == BNXT_HWRM_CHNL_CHIMP) {
    396		hlist_del_rcu(&token->node);
    397		kfree_rcu(token, rcu);
    398	} else {
    399		kfree(token);
    400	}
    401	mutex_unlock(&bp->hwrm_cmd_lock);
    402}
    403
    404void
    405hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
    406{
    407	struct bnxt_hwrm_wait_token *token;
    408
    409	rcu_read_lock();
    410	hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) {
    411		if (token->seq_id == seq_id) {
    412			WRITE_ONCE(token->state, state);
    413			rcu_read_unlock();
    414			return;
    415		}
    416	}
    417	rcu_read_unlock();
    418	netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
    419}
    420
    421static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
    422{
    423	u32 ring = le16_to_cpu(req->cmpl_ring);
    424	u32 type = le16_to_cpu(req->req_type);
    425	u32 tgt = le16_to_cpu(req->target_id);
    426	u32 seq = le16_to_cpu(req->seq_id);
    427	char opt[32] = "\n";
    428
    429	if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
    430		snprintf(opt, 16, " ring %d\n", ring);
    431
    432	if (unlikely(tgt != BNXT_HWRM_TARGET))
    433		snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
    434
    435	netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
    436		   type, seq, opt);
    437}
    438
    439#define hwrm_err(bp, ctx, fmt, ...)				       \
    440	do {							       \
    441		if ((ctx)->flags & BNXT_HWRM_CTX_SILENT)	       \
    442			netdev_dbg((bp)->dev, fmt, __VA_ARGS__);       \
    443		else						       \
    444			netdev_err((bp)->dev, fmt, __VA_ARGS__);       \
    445	} while (0)
    446
    447static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
    448{
    449	if (req_type == HWRM_VER_GET)
    450		return false;
    451
    452	if (!bp->fw_health || !bp->fw_health->status_reliable)
    453		return false;
    454
    455	*fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
    456	return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
    457}
    458
    459static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
    460{
    461	u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
    462	enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP;
    463	u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
    464	struct bnxt_hwrm_wait_token *token = NULL;
    465	struct hwrm_short_input short_input = {0};
    466	u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
    467	unsigned int i, timeout, tmo_count;
    468	u32 *data = (u32 *)ctx->req;
    469	u32 msg_len = ctx->req_len;
    470	u32 req_type, sts;
    471	int rc = -EBUSY;
    472	u16 len = 0;
    473	u8 *valid;
    474
    475	if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY)
    476		memset(ctx->resp, 0, PAGE_SIZE);
    477
    478	req_type = le16_to_cpu(ctx->req->req_type);
    479	if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) {
    480		netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
    481			   req_type);
    482		goto exit;
    483	}
    484
    485	if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
    486	    msg_len > bp->hwrm_max_ext_req_len) {
    487		rc = -E2BIG;
    488		goto exit;
    489	}
    490
    491	if (bnxt_kong_hwrm_message(bp, ctx->req)) {
    492		dst = BNXT_HWRM_CHNL_KONG;
    493		bar_offset = BNXT_GRCPF_REG_KONG_COMM;
    494		doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
    495		if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
    496			netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n",
    497				   req_type);
    498			rc = -EINVAL;
    499			goto exit;
    500		}
    501	}
    502
    503	token = __hwrm_acquire_token(bp, dst);
    504	if (!token) {
    505		rc = -ENOMEM;
    506		goto exit;
    507	}
    508	ctx->req->seq_id = cpu_to_le16(token->seq_id);
    509
    510	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
    511	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
    512		short_input.req_type = ctx->req->req_type;
    513		short_input.signature =
    514				cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
    515		short_input.size = cpu_to_le16(msg_len);
    516		short_input.req_addr = cpu_to_le64(ctx->dma_handle);
    517
    518		data = (u32 *)&short_input;
    519		msg_len = sizeof(short_input);
    520
    521		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
    522	}
    523
    524	/* Ensure any associated DMA buffers are written before doorbell */
    525	wmb();
    526
    527	/* Write request msg to hwrm channel */
    528	__iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
    529
    530	for (i = msg_len; i < max_req_len; i += 4)
    531		writel(0, bp->bar0 + bar_offset + i);
    532
    533	/* Ring channel doorbell */
    534	writel(1, bp->bar0 + doorbell_offset);
    535
    536	hwrm_req_dbg(bp, ctx->req);
    537
    538	if (!pci_is_enabled(bp->pdev)) {
    539		rc = -ENODEV;
    540		goto exit;
    541	}
    542
    543	/* Limit timeout to an upper limit */
    544	timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
    545	/* convert timeout to usec */
    546	timeout *= 1000;
    547
    548	i = 0;
    549	/* Short timeout for the first few iterations:
    550	 * number of loops = number of loops for short timeout +
    551	 * number of loops for standard timeout.
    552	 */
    553	tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
    554	timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
    555	tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
    556
    557	if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) {
    558		/* Wait until hwrm response cmpl interrupt is processed */
    559		while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE &&
    560		       i++ < tmo_count) {
    561			/* Abort the wait for completion if the FW health
    562			 * check has failed.
    563			 */
    564			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
    565				goto exit;
    566			/* on first few passes, just barely sleep */
    567			if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
    568				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
    569					     HWRM_SHORT_MAX_TIMEOUT);
    570			} else {
    571				if (hwrm_wait_must_abort(bp, req_type, &sts)) {
    572					hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
    573						 req_type, sts);
    574					goto exit;
    575				}
    576				usleep_range(HWRM_MIN_TIMEOUT,
    577					     HWRM_MAX_TIMEOUT);
    578			}
    579		}
    580
    581		if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
    582			hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
    583				 req_type);
    584			goto exit;
    585		}
    586		len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
    587		valid = ((u8 *)ctx->resp) + len - 1;
    588	} else {
    589		__le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */
    590		int j;
    591
    592		/* Check if response len is updated */
    593		for (i = 0; i < tmo_count; i++) {
    594			/* Abort the wait for completion if the FW health
    595			 * check has failed.
    596			 */
    597			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
    598				goto exit;
    599
    600			if (token &&
    601			    READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) {
    602				__hwrm_release_token(bp, token);
    603				token = NULL;
    604			}
    605
    606			len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
    607			if (len) {
    608				__le16 resp_seq = READ_ONCE(ctx->resp->seq_id);
    609
    610				if (resp_seq == ctx->req->seq_id)
    611					break;
    612				if (resp_seq != seen_out_of_seq) {
    613					netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
    614						    le16_to_cpu(resp_seq),
    615						    req_type,
    616						    le16_to_cpu(ctx->req->seq_id));
    617					seen_out_of_seq = resp_seq;
    618				}
    619			}
    620
    621			/* on first few passes, just barely sleep */
    622			if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
    623				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
    624					     HWRM_SHORT_MAX_TIMEOUT);
    625			} else {
    626				if (hwrm_wait_must_abort(bp, req_type, &sts)) {
    627					hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
    628						 req_type,
    629						 le16_to_cpu(ctx->req->seq_id),
    630						 len, sts);
    631					goto exit;
    632				}
    633				usleep_range(HWRM_MIN_TIMEOUT,
    634					     HWRM_MAX_TIMEOUT);
    635			}
    636		}
    637
    638		if (i >= tmo_count) {
    639			hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
    640				 hwrm_total_timeout(i), req_type,
    641				 le16_to_cpu(ctx->req->seq_id), len);
    642			goto exit;
    643		}
    644
    645		/* Last byte of resp contains valid bit */
    646		valid = ((u8 *)ctx->resp) + len - 1;
    647		for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
    648			/* make sure we read from updated DMA memory */
    649			dma_rmb();
    650			if (*valid)
    651				break;
    652			if (j < 10) {
    653				udelay(1);
    654				j++;
    655			} else {
    656				usleep_range(20, 30);
    657				j += 20;
    658			}
    659		}
    660
    661		if (j >= HWRM_VALID_BIT_DELAY_USEC) {
    662			hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
    663				 hwrm_total_timeout(i) + j, req_type,
    664				 le16_to_cpu(ctx->req->seq_id), len, *valid);
    665			goto exit;
    666		}
    667	}
    668
    669	/* Zero valid bit for compatibility.  Valid bit in an older spec
    670	 * may become a new field in a newer spec.  We must make sure that
    671	 * a new field not implemented by old spec will read zero.
    672	 */
    673	*valid = 0;
    674	rc = le16_to_cpu(ctx->resp->error_code);
    675	if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
    676		netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
    677			    req_type);
    678	else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
    679		hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
    680			 req_type, token->seq_id, rc);
    681	rc = __hwrm_to_stderr(rc);
    682exit:
    683	if (token)
    684		__hwrm_release_token(bp, token);
    685	if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED)
    686		ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY;
    687	else
    688		__hwrm_ctx_drop(bp, ctx);
    689	return rc;
    690}
    691
    692/**
    693 * hwrm_req_send() - Execute an HWRM command.
    694 * @bp: The driver context.
    695 * @req: A pointer to the request to send. The DMA resources associated with
    696 *	the request will be released (ie. the request will be consumed) unless
    697 *	ownership of the request has been assumed by the caller via a call to
    698 *	hwrm_req_hold().
    699 *
    700 * Send an HWRM request to the device and wait for a response. The request is
    701 * consumed if it is not owned by the caller. This function will block until
    702 * the request has either completed or times out due to an error.
    703 *
    704 * Return: A result code.
    705 *
    706 * The result is zero on success, otherwise the negative error code indicates
    707 * one of the following errors:
    708 *	E2BIG: The request was too large.
    709 *	EBUSY: The firmware is in a fatal state or the request timed out
    710 *	EACCESS: HWRM access denied.
    711 *	ENOSPC: HWRM resource allocation error.
    712 *	EINVAL: Request parameters are invalid.
    713 *	ENOMEM: HWRM has no buffers.
    714 *	EAGAIN: HWRM busy or reset in progress.
    715 *	EOPNOTSUPP: Invalid request type.
    716 *	EIO: Any other error.
    717 * Error handling is orthogonal to request ownership. An unowned request will
    718 * still be consumed on error. If the caller owns the request, then the caller
    719 * is responsible for releasing the resources. Otherwise, hwrm_req_send() will
    720 * always consume the request.
    721 */
    722int hwrm_req_send(struct bnxt *bp, void *req)
    723{
    724	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    725
    726	if (!ctx)
    727		return -EINVAL;
    728
    729	return __hwrm_send(bp, ctx);
    730}
    731
    732/**
    733 * hwrm_req_send_silent() - A silent version of hwrm_req_send().
    734 * @bp: The driver context.
    735 * @req: The request to send without logging.
    736 *
    737 * The same as hwrm_req_send(), except that the request is silenced using
    738 * hwrm_req_silence() prior the call. This version of the function is
    739 * provided solely to preserve the legacy API’s flavor for this functionality.
    740 *
    741 * Return: A result code, see hwrm_req_send().
    742 */
    743int hwrm_req_send_silent(struct bnxt *bp, void *req)
    744{
    745	hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
    746	return hwrm_req_send(bp, req);
    747}
    748
    749/**
    750 * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory.
    751 * @bp: The driver context.
    752 * @req: The request for which indirect data will be associated.
    753 * @size: The size of the allocation.
    754 * @dma_handle: The bus address associated with the allocation. The HWRM API has
    755 *	no knowledge about the type of the request and so cannot infer how the
    756 *	caller intends to use the indirect data. Thus, the caller is
    757 *	responsible for configuring the request object appropriately to
    758 *	point to the associated indirect memory. Note, DMA handle has the
    759 *	same definition as it does in dma_alloc_coherent(), the caller is
    760 *	responsible for endian conversions via cpu_to_le64() before assigning
    761 *	this address.
    762 *
    763 * Allocates DMA mapped memory for indirect data related to a request. The
    764 * lifetime of the DMA resources will be bound to that of the request (ie.
    765 * they will be automatically released when the request is either consumed by
    766 * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are
    767 * efficiently suballocated out of the request buffer space, hence the name
    768 * slice, while larger requests are satisfied via an underlying call to
    769 * dma_alloc_coherent(). Multiple suballocations are supported, however, only
    770 * one externally mapped region is.
    771 *
    772 * Return: The kernel virtual address of the DMA mapping.
    773 */
    774void *
    775hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle)
    776{
    777	struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req);
    778	u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE;
    779	struct input *input = req;
    780	u8 *addr, *req_addr = req;
    781	u32 max_offset, offset;
    782
    783	if (!ctx)
    784		return NULL;
    785
    786	max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated;
    787	offset = max_offset - size;
    788	offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN);
    789	addr = req_addr + offset;
    790
    791	if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) {
    792		ctx->allocated = end - addr;
    793		*dma_handle = ctx->dma_handle + offset;
    794		return addr;
    795	}
    796
    797	/* could not suballocate from ctx buffer, try create a new mapping */
    798	if (ctx->slice_addr) {
    799		/* if one exists, can only be due to software bug, be loud */
    800		netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n",
    801			   (u32)le16_to_cpu(input->req_type));
    802		dump_stack();
    803		return NULL;
    804	}
    805
    806	addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp);
    807
    808	if (!addr)
    809		return NULL;
    810
    811	ctx->slice_addr = addr;
    812	ctx->slice_size = size;
    813	ctx->slice_handle = *dma_handle;
    814
    815	return addr;
    816}