cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mcdi.c (65860B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/****************************************************************************
      3 * Driver for Solarflare network controllers and boards
      4 * Copyright 2008-2013 Solarflare Communications Inc.
      5 */
      6
      7#include <linux/delay.h>
      8#include <linux/moduleparam.h>
      9#include <linux/atomic.h>
     10#include "net_driver.h"
     11#include "nic.h"
     12#include "io.h"
     13#include "farch_regs.h"
     14#include "mcdi_pcol.h"
     15
     16/**************************************************************************
     17 *
     18 * Management-Controller-to-Driver Interface
     19 *
     20 **************************************************************************
     21 */
     22
     23#define MCDI_RPC_TIMEOUT       (10 * HZ)
     24
     25/* A reboot/assertion causes the MCDI status word to be set after the
     26 * command word is set or a REBOOT event is sent. If we notice a reboot
     27 * via these mechanisms then wait 250ms for the status word to be set.
     28 */
     29#define MCDI_STATUS_DELAY_US		100
     30#define MCDI_STATUS_DELAY_COUNT		2500
     31#define MCDI_STATUS_SLEEP_MS						\
     32	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
     33
     34#define SEQ_MASK							\
     35	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
     36
     37struct efx_mcdi_async_param {
     38	struct list_head list;
     39	unsigned int cmd;
     40	size_t inlen;
     41	size_t outlen;
     42	bool quiet;
     43	efx_mcdi_async_completer *complete;
     44	unsigned long cookie;
     45	/* followed by request/response buffer */
     46};
     47
     48static void efx_mcdi_timeout_async(struct timer_list *t);
     49static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
     50			       bool *was_attached_out);
     51static bool efx_mcdi_poll_once(struct efx_nic *efx);
     52static void efx_mcdi_abandon(struct efx_nic *efx);
     53
     54#ifdef CONFIG_SFC_MCDI_LOGGING
     55static bool mcdi_logging_default;
     56module_param(mcdi_logging_default, bool, 0644);
     57MODULE_PARM_DESC(mcdi_logging_default,
     58		 "Enable MCDI logging on newly-probed functions");
     59#endif
     60
     61int efx_mcdi_init(struct efx_nic *efx)
     62{
     63	struct efx_mcdi_iface *mcdi;
     64	bool already_attached;
     65	int rc = -ENOMEM;
     66
     67	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
     68	if (!efx->mcdi)
     69		goto fail;
     70
     71	mcdi = efx_mcdi(efx);
     72	mcdi->efx = efx;
     73#ifdef CONFIG_SFC_MCDI_LOGGING
     74	/* consuming code assumes buffer is page-sized */
     75	mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
     76	if (!mcdi->logging_buffer)
     77		goto fail1;
     78	mcdi->logging_enabled = mcdi_logging_default;
     79#endif
     80	init_waitqueue_head(&mcdi->wq);
     81	init_waitqueue_head(&mcdi->proxy_rx_wq);
     82	spin_lock_init(&mcdi->iface_lock);
     83	mcdi->state = MCDI_STATE_QUIESCENT;
     84	mcdi->mode = MCDI_MODE_POLL;
     85	spin_lock_init(&mcdi->async_lock);
     86	INIT_LIST_HEAD(&mcdi->async_list);
     87	timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
     88
     89	(void) efx_mcdi_poll_reboot(efx);
     90	mcdi->new_epoch = true;
     91
     92	/* Recover from a failed assertion before probing */
     93	rc = efx_mcdi_handle_assertion(efx);
     94	if (rc)
     95		goto fail2;
     96
     97	/* Let the MC (and BMC, if this is a LOM) know that the driver
     98	 * is loaded. We should do this before we reset the NIC.
     99	 */
    100	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
    101	if (rc) {
    102		netif_err(efx, probe, efx->net_dev,
    103			  "Unable to register driver with MCPU\n");
    104		goto fail2;
    105	}
    106	if (already_attached)
    107		/* Not a fatal error */
    108		netif_err(efx, probe, efx->net_dev,
    109			  "Host already registered with MCPU\n");
    110
    111	if (efx->mcdi->fn_flags &
    112	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
    113		efx->primary = efx;
    114
    115	return 0;
    116fail2:
    117#ifdef CONFIG_SFC_MCDI_LOGGING
    118	free_page((unsigned long)mcdi->logging_buffer);
    119fail1:
    120#endif
    121	kfree(efx->mcdi);
    122	efx->mcdi = NULL;
    123fail:
    124	return rc;
    125}
    126
    127void efx_mcdi_detach(struct efx_nic *efx)
    128{
    129	if (!efx->mcdi)
    130		return;
    131
    132	BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
    133
    134	/* Relinquish the device (back to the BMC, if this is a LOM) */
    135	efx_mcdi_drv_attach(efx, false, NULL);
    136}
    137
    138void efx_mcdi_fini(struct efx_nic *efx)
    139{
    140	if (!efx->mcdi)
    141		return;
    142
    143#ifdef CONFIG_SFC_MCDI_LOGGING
    144	free_page((unsigned long)efx->mcdi->iface.logging_buffer);
    145#endif
    146
    147	kfree(efx->mcdi);
    148}
    149
    150static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
    151				  const efx_dword_t *inbuf, size_t inlen)
    152{
    153	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    154#ifdef CONFIG_SFC_MCDI_LOGGING
    155	char *buf = mcdi->logging_buffer; /* page-sized */
    156#endif
    157	efx_dword_t hdr[2];
    158	size_t hdr_len;
    159	u32 xflags, seqno;
    160
    161	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
    162
    163	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
    164	spin_lock_bh(&mcdi->iface_lock);
    165	++mcdi->seqno;
    166	seqno = mcdi->seqno & SEQ_MASK;
    167	spin_unlock_bh(&mcdi->iface_lock);
    168
    169	xflags = 0;
    170	if (mcdi->mode == MCDI_MODE_EVENTS)
    171		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
    172
    173	if (efx->type->mcdi_max_ver == 1) {
    174		/* MCDI v1 */
    175		EFX_POPULATE_DWORD_7(hdr[0],
    176				     MCDI_HEADER_RESPONSE, 0,
    177				     MCDI_HEADER_RESYNC, 1,
    178				     MCDI_HEADER_CODE, cmd,
    179				     MCDI_HEADER_DATALEN, inlen,
    180				     MCDI_HEADER_SEQ, seqno,
    181				     MCDI_HEADER_XFLAGS, xflags,
    182				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
    183		hdr_len = 4;
    184	} else {
    185		/* MCDI v2 */
    186		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
    187		EFX_POPULATE_DWORD_7(hdr[0],
    188				     MCDI_HEADER_RESPONSE, 0,
    189				     MCDI_HEADER_RESYNC, 1,
    190				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
    191				     MCDI_HEADER_DATALEN, 0,
    192				     MCDI_HEADER_SEQ, seqno,
    193				     MCDI_HEADER_XFLAGS, xflags,
    194				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
    195		EFX_POPULATE_DWORD_2(hdr[1],
    196				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
    197				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
    198		hdr_len = 8;
    199	}
    200
    201#ifdef CONFIG_SFC_MCDI_LOGGING
    202	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
    203		int bytes = 0;
    204		int i;
    205		/* Lengths should always be a whole number of dwords, so scream
    206		 * if they're not.
    207		 */
    208		WARN_ON_ONCE(hdr_len % 4);
    209		WARN_ON_ONCE(inlen % 4);
    210
    211		/* We own the logging buffer, as only one MCDI can be in
    212		 * progress on a NIC at any one time.  So no need for locking.
    213		 */
    214		for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
    215			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    216					   " %08x",
    217					   le32_to_cpu(hdr[i].u32[0]));
    218
    219		for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
    220			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    221					   " %08x",
    222					   le32_to_cpu(inbuf[i].u32[0]));
    223
    224		netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
    225	}
    226#endif
    227
    228	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
    229
    230	mcdi->new_epoch = false;
    231}
    232
    233static int efx_mcdi_errno(unsigned int mcdi_err)
    234{
    235	switch (mcdi_err) {
    236	case 0:
    237		return 0;
    238#define TRANSLATE_ERROR(name)					\
    239	case MC_CMD_ERR_ ## name:				\
    240		return -name;
    241	TRANSLATE_ERROR(EPERM);
    242	TRANSLATE_ERROR(ENOENT);
    243	TRANSLATE_ERROR(EINTR);
    244	TRANSLATE_ERROR(EAGAIN);
    245	TRANSLATE_ERROR(EACCES);
    246	TRANSLATE_ERROR(EBUSY);
    247	TRANSLATE_ERROR(EINVAL);
    248	TRANSLATE_ERROR(EDEADLK);
    249	TRANSLATE_ERROR(ENOSYS);
    250	TRANSLATE_ERROR(ETIME);
    251	TRANSLATE_ERROR(EALREADY);
    252	TRANSLATE_ERROR(ENOSPC);
    253#undef TRANSLATE_ERROR
    254	case MC_CMD_ERR_ENOTSUP:
    255		return -EOPNOTSUPP;
    256	case MC_CMD_ERR_ALLOC_FAIL:
    257		return -ENOBUFS;
    258	case MC_CMD_ERR_MAC_EXIST:
    259		return -EADDRINUSE;
    260	default:
    261		return -EPROTO;
    262	}
    263}
    264
    265static void efx_mcdi_read_response_header(struct efx_nic *efx)
    266{
    267	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    268	unsigned int respseq, respcmd, error;
    269#ifdef CONFIG_SFC_MCDI_LOGGING
    270	char *buf = mcdi->logging_buffer; /* page-sized */
    271#endif
    272	efx_dword_t hdr;
    273
    274	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
    275	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
    276	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
    277	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
    278
    279	if (respcmd != MC_CMD_V2_EXTN) {
    280		mcdi->resp_hdr_len = 4;
    281		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
    282	} else {
    283		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
    284		mcdi->resp_hdr_len = 8;
    285		mcdi->resp_data_len =
    286			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
    287	}
    288
    289#ifdef CONFIG_SFC_MCDI_LOGGING
    290	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
    291		size_t hdr_len, data_len;
    292		int bytes = 0;
    293		int i;
    294
    295		WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
    296		hdr_len = mcdi->resp_hdr_len / 4;
    297		/* MCDI_DECLARE_BUF ensures that underlying buffer is padded
    298		 * to dword size, and the MCDI buffer is always dword size
    299		 */
    300		data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
    301
    302		/* We own the logging buffer, as only one MCDI can be in
    303		 * progress on a NIC at any one time.  So no need for locking.
    304		 */
    305		for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
    306			efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
    307			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    308					   " %08x", le32_to_cpu(hdr.u32[0]));
    309		}
    310
    311		for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
    312			efx->type->mcdi_read_response(efx, &hdr,
    313					mcdi->resp_hdr_len + (i * 4), 4);
    314			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    315					   " %08x", le32_to_cpu(hdr.u32[0]));
    316		}
    317
    318		netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
    319	}
    320#endif
    321
    322	mcdi->resprc_raw = 0;
    323	if (error && mcdi->resp_data_len == 0) {
    324		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
    325		mcdi->resprc = -EIO;
    326	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
    327		netif_err(efx, hw, efx->net_dev,
    328			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
    329			  respseq, mcdi->seqno);
    330		mcdi->resprc = -EIO;
    331	} else if (error) {
    332		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
    333		mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
    334		mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
    335	} else {
    336		mcdi->resprc = 0;
    337	}
    338}
    339
    340static bool efx_mcdi_poll_once(struct efx_nic *efx)
    341{
    342	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    343
    344	rmb();
    345	if (!efx->type->mcdi_poll_response(efx))
    346		return false;
    347
    348	spin_lock_bh(&mcdi->iface_lock);
    349	efx_mcdi_read_response_header(efx);
    350	spin_unlock_bh(&mcdi->iface_lock);
    351
    352	return true;
    353}
    354
    355static int efx_mcdi_poll(struct efx_nic *efx)
    356{
    357	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    358	unsigned long time, finish;
    359	unsigned int spins;
    360	int rc;
    361
    362	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
    363	rc = efx_mcdi_poll_reboot(efx);
    364	if (rc) {
    365		spin_lock_bh(&mcdi->iface_lock);
    366		mcdi->resprc = rc;
    367		mcdi->resp_hdr_len = 0;
    368		mcdi->resp_data_len = 0;
    369		spin_unlock_bh(&mcdi->iface_lock);
    370		return 0;
    371	}
    372
    373	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
    374	 * because generally mcdi responses are fast. After that, back off
    375	 * and poll once a jiffy (approximately)
    376	 */
    377	spins = USER_TICK_USEC;
    378	finish = jiffies + MCDI_RPC_TIMEOUT;
    379
    380	while (1) {
    381		if (spins != 0) {
    382			--spins;
    383			udelay(1);
    384		} else {
    385			schedule_timeout_uninterruptible(1);
    386		}
    387
    388		time = jiffies;
    389
    390		if (efx_mcdi_poll_once(efx))
    391			break;
    392
    393		if (time_after(time, finish))
    394			return -ETIMEDOUT;
    395	}
    396
    397	/* Return rc=0 like wait_event_timeout() */
    398	return 0;
    399}
    400
    401/* Test and clear MC-rebooted flag for this port/function; reset
    402 * software state as necessary.
    403 */
    404int efx_mcdi_poll_reboot(struct efx_nic *efx)
    405{
    406	if (!efx->mcdi)
    407		return 0;
    408
    409	return efx->type->mcdi_poll_reboot(efx);
    410}
    411
    412static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
    413{
    414	return cmpxchg(&mcdi->state,
    415		       MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
    416		MCDI_STATE_QUIESCENT;
    417}
    418
    419static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
    420{
    421	/* Wait until the interface becomes QUIESCENT and we win the race
    422	 * to mark it RUNNING_SYNC.
    423	 */
    424	wait_event(mcdi->wq,
    425		   cmpxchg(&mcdi->state,
    426			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
    427		   MCDI_STATE_QUIESCENT);
    428}
    429
    430static int efx_mcdi_await_completion(struct efx_nic *efx)
    431{
    432	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    433
    434	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
    435			       MCDI_RPC_TIMEOUT) == 0)
    436		return -ETIMEDOUT;
    437
    438	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
    439	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
    440	 * completed the request first, then we'll just end up completing the
    441	 * request again, which is safe.
    442	 *
    443	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
    444	 * wait_event_timeout() implicitly provides.
    445	 */
    446	if (mcdi->mode == MCDI_MODE_POLL)
    447		return efx_mcdi_poll(efx);
    448
    449	return 0;
    450}
    451
    452/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
    453 * requester.  Return whether this was done.  Does not take any locks.
    454 */
    455static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
    456{
    457	if (cmpxchg(&mcdi->state,
    458		    MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
    459	    MCDI_STATE_RUNNING_SYNC) {
    460		wake_up(&mcdi->wq);
    461		return true;
    462	}
    463
    464	return false;
    465}
    466
    467static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
    468{
    469	if (mcdi->mode == MCDI_MODE_EVENTS) {
    470		struct efx_mcdi_async_param *async;
    471		struct efx_nic *efx = mcdi->efx;
    472
    473		/* Process the asynchronous request queue */
    474		spin_lock_bh(&mcdi->async_lock);
    475		async = list_first_entry_or_null(
    476			&mcdi->async_list, struct efx_mcdi_async_param, list);
    477		if (async) {
    478			mcdi->state = MCDI_STATE_RUNNING_ASYNC;
    479			efx_mcdi_send_request(efx, async->cmd,
    480					      (const efx_dword_t *)(async + 1),
    481					      async->inlen);
    482			mod_timer(&mcdi->async_timer,
    483				  jiffies + MCDI_RPC_TIMEOUT);
    484		}
    485		spin_unlock_bh(&mcdi->async_lock);
    486
    487		if (async)
    488			return;
    489	}
    490
    491	mcdi->state = MCDI_STATE_QUIESCENT;
    492	wake_up(&mcdi->wq);
    493}
    494
    495/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
    496 * asynchronous completion function, and release the interface.
    497 * Return whether this was done.  Must be called in bh-disabled
    498 * context.  Will take iface_lock and async_lock.
    499 */
    500static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
    501{
    502	struct efx_nic *efx = mcdi->efx;
    503	struct efx_mcdi_async_param *async;
    504	size_t hdr_len, data_len, err_len;
    505	efx_dword_t *outbuf;
    506	MCDI_DECLARE_BUF_ERR(errbuf);
    507	int rc;
    508
    509	if (cmpxchg(&mcdi->state,
    510		    MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
    511	    MCDI_STATE_RUNNING_ASYNC)
    512		return false;
    513
    514	spin_lock(&mcdi->iface_lock);
    515	if (timeout) {
    516		/* Ensure that if the completion event arrives later,
    517		 * the seqno check in efx_mcdi_ev_cpl() will fail
    518		 */
    519		++mcdi->seqno;
    520		++mcdi->credits;
    521		rc = -ETIMEDOUT;
    522		hdr_len = 0;
    523		data_len = 0;
    524	} else {
    525		rc = mcdi->resprc;
    526		hdr_len = mcdi->resp_hdr_len;
    527		data_len = mcdi->resp_data_len;
    528	}
    529	spin_unlock(&mcdi->iface_lock);
    530
    531	/* Stop the timer.  In case the timer function is running, we
    532	 * must wait for it to return so that there is no possibility
    533	 * of it aborting the next request.
    534	 */
    535	if (!timeout)
    536		del_timer_sync(&mcdi->async_timer);
    537
    538	spin_lock(&mcdi->async_lock);
    539	async = list_first_entry(&mcdi->async_list,
    540				 struct efx_mcdi_async_param, list);
    541	list_del(&async->list);
    542	spin_unlock(&mcdi->async_lock);
    543
    544	outbuf = (efx_dword_t *)(async + 1);
    545	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
    546				      min(async->outlen, data_len));
    547	if (!timeout && rc && !async->quiet) {
    548		err_len = min(sizeof(errbuf), data_len);
    549		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
    550					      sizeof(errbuf));
    551		efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
    552				       err_len, rc);
    553	}
    554
    555	if (async->complete)
    556		async->complete(efx, async->cookie, rc, outbuf,
    557				min(async->outlen, data_len));
    558	kfree(async);
    559
    560	efx_mcdi_release(mcdi);
    561
    562	return true;
    563}
    564
    565static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
    566			    unsigned int datalen, unsigned int mcdi_err)
    567{
    568	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    569	bool wake = false;
    570
    571	spin_lock(&mcdi->iface_lock);
    572
    573	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
    574		if (mcdi->credits)
    575			/* The request has been cancelled */
    576			--mcdi->credits;
    577		else
    578			netif_err(efx, hw, efx->net_dev,
    579				  "MC response mismatch tx seq 0x%x rx "
    580				  "seq 0x%x\n", seqno, mcdi->seqno);
    581	} else {
    582		if (efx->type->mcdi_max_ver >= 2) {
    583			/* MCDI v2 responses don't fit in an event */
    584			efx_mcdi_read_response_header(efx);
    585		} else {
    586			mcdi->resprc = efx_mcdi_errno(mcdi_err);
    587			mcdi->resp_hdr_len = 4;
    588			mcdi->resp_data_len = datalen;
    589		}
    590
    591		wake = true;
    592	}
    593
    594	spin_unlock(&mcdi->iface_lock);
    595
    596	if (wake) {
    597		if (!efx_mcdi_complete_async(mcdi, false))
    598			(void) efx_mcdi_complete_sync(mcdi);
    599
    600		/* If the interface isn't RUNNING_ASYNC or
    601		 * RUNNING_SYNC then we've received a duplicate
    602		 * completion after we've already transitioned back to
    603		 * QUIESCENT. [A subsequent invocation would increment
    604		 * seqno, so would have failed the seqno check].
    605		 */
    606	}
    607}
    608
    609static void efx_mcdi_timeout_async(struct timer_list *t)
    610{
    611	struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
    612
    613	efx_mcdi_complete_async(mcdi, true);
    614}
    615
    616static int
    617efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
    618{
    619	if (efx->type->mcdi_max_ver < 0 ||
    620	     (efx->type->mcdi_max_ver < 2 &&
    621	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
    622		return -EINVAL;
    623
    624	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
    625	    (efx->type->mcdi_max_ver < 2 &&
    626	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
    627		return -EMSGSIZE;
    628
    629	return 0;
    630}
    631
    632static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
    633				      size_t hdr_len, size_t data_len,
    634				      u32 *proxy_handle)
    635{
    636	MCDI_DECLARE_BUF_ERR(testbuf);
    637	const size_t buflen = sizeof(testbuf);
    638
    639	if (!proxy_handle || data_len < buflen)
    640		return false;
    641
    642	efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
    643	if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
    644		*proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
    645		return true;
    646	}
    647
    648	return false;
    649}
    650
    651static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
    652				size_t inlen,
    653				efx_dword_t *outbuf, size_t outlen,
    654				size_t *outlen_actual, bool quiet,
    655				u32 *proxy_handle, int *raw_rc)
    656{
    657	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    658	MCDI_DECLARE_BUF_ERR(errbuf);
    659	int rc;
    660
    661	if (mcdi->mode == MCDI_MODE_POLL)
    662		rc = efx_mcdi_poll(efx);
    663	else
    664		rc = efx_mcdi_await_completion(efx);
    665
    666	if (rc != 0) {
    667		netif_err(efx, hw, efx->net_dev,
    668			  "MC command 0x%x inlen %d mode %d timed out\n",
    669			  cmd, (int)inlen, mcdi->mode);
    670
    671		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
    672			netif_err(efx, hw, efx->net_dev,
    673				  "MCDI request was completed without an event\n");
    674			rc = 0;
    675		}
    676
    677		efx_mcdi_abandon(efx);
    678
    679		/* Close the race with efx_mcdi_ev_cpl() executing just too late
    680		 * and completing a request we've just cancelled, by ensuring
    681		 * that the seqno check therein fails.
    682		 */
    683		spin_lock_bh(&mcdi->iface_lock);
    684		++mcdi->seqno;
    685		++mcdi->credits;
    686		spin_unlock_bh(&mcdi->iface_lock);
    687	}
    688
    689	if (proxy_handle)
    690		*proxy_handle = 0;
    691
    692	if (rc != 0) {
    693		if (outlen_actual)
    694			*outlen_actual = 0;
    695	} else {
    696		size_t hdr_len, data_len, err_len;
    697
    698		/* At the very least we need a memory barrier here to ensure
    699		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
    700		 * a spurious efx_mcdi_ev_cpl() running concurrently by
    701		 * acquiring the iface_lock. */
    702		spin_lock_bh(&mcdi->iface_lock);
    703		rc = mcdi->resprc;
    704		if (raw_rc)
    705			*raw_rc = mcdi->resprc_raw;
    706		hdr_len = mcdi->resp_hdr_len;
    707		data_len = mcdi->resp_data_len;
    708		err_len = min(sizeof(errbuf), data_len);
    709		spin_unlock_bh(&mcdi->iface_lock);
    710
    711		BUG_ON(rc > 0);
    712
    713		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
    714					      min(outlen, data_len));
    715		if (outlen_actual)
    716			*outlen_actual = data_len;
    717
    718		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
    719
    720		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
    721			/* Don't reset if MC_CMD_REBOOT returns EIO */
    722		} else if (rc == -EIO || rc == -EINTR) {
    723			netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
    724			netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
    725				  cmd, -rc);
    726			if (efx->type->mcdi_reboot_detected)
    727				efx->type->mcdi_reboot_detected(efx);
    728			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
    729		} else if (proxy_handle && (rc == -EPROTO) &&
    730			   efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
    731						     proxy_handle)) {
    732			mcdi->proxy_rx_status = 0;
    733			mcdi->proxy_rx_handle = 0;
    734			mcdi->state = MCDI_STATE_PROXY_WAIT;
    735		} else if (rc && !quiet) {
    736			efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
    737					       rc);
    738		}
    739
    740		if (rc == -EIO || rc == -EINTR) {
    741			msleep(MCDI_STATUS_SLEEP_MS);
    742			efx_mcdi_poll_reboot(efx);
    743			mcdi->new_epoch = true;
    744		}
    745	}
    746
    747	if (!proxy_handle || !*proxy_handle)
    748		efx_mcdi_release(mcdi);
    749	return rc;
    750}
    751
    752static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
    753{
    754	if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
    755		/* Interrupt the proxy wait. */
    756		mcdi->proxy_rx_status = -EINTR;
    757		wake_up(&mcdi->proxy_rx_wq);
    758	}
    759}
    760
    761static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
    762				       u32 handle, int status)
    763{
    764	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    765
    766	WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
    767
    768	mcdi->proxy_rx_status = efx_mcdi_errno(status);
    769	/* Ensure the status is written before we update the handle, since the
    770	 * latter is used to check if we've finished.
    771	 */
    772	wmb();
    773	mcdi->proxy_rx_handle = handle;
    774	wake_up(&mcdi->proxy_rx_wq);
    775}
    776
    777static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
    778{
    779	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    780	int rc;
    781
    782	/* Wait for a proxy event, or timeout. */
    783	rc = wait_event_timeout(mcdi->proxy_rx_wq,
    784				mcdi->proxy_rx_handle != 0 ||
    785				mcdi->proxy_rx_status == -EINTR,
    786				MCDI_RPC_TIMEOUT);
    787
    788	if (rc <= 0) {
    789		netif_dbg(efx, hw, efx->net_dev,
    790			  "MCDI proxy timeout %d\n", handle);
    791		return -ETIMEDOUT;
    792	} else if (mcdi->proxy_rx_handle != handle) {
    793		netif_warn(efx, hw, efx->net_dev,
    794			   "MCDI proxy unexpected handle %d (expected %d)\n",
    795			   mcdi->proxy_rx_handle, handle);
    796		return -EINVAL;
    797	}
    798
    799	return mcdi->proxy_rx_status;
    800}
    801
    802static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
    803			 const efx_dword_t *inbuf, size_t inlen,
    804			 efx_dword_t *outbuf, size_t outlen,
    805			 size_t *outlen_actual, bool quiet, int *raw_rc)
    806{
    807	u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
    808	int rc;
    809
    810	if (inbuf && inlen && (inbuf == outbuf)) {
    811		/* The input buffer can't be aliased with the output. */
    812		WARN_ON(1);
    813		return -EINVAL;
    814	}
    815
    816	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
    817	if (rc)
    818		return rc;
    819
    820	rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
    821				  outlen_actual, quiet, &proxy_handle, raw_rc);
    822
    823	if (proxy_handle) {
    824		/* Handle proxy authorisation. This allows approval of MCDI
    825		 * operations to be delegated to the admin function, allowing
    826		 * fine control over (eg) multicast subscriptions.
    827		 */
    828		struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    829
    830		netif_dbg(efx, hw, efx->net_dev,
    831			  "MCDI waiting for proxy auth %d\n",
    832			  proxy_handle);
    833		rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
    834
    835		if (rc == 0) {
    836			netif_dbg(efx, hw, efx->net_dev,
    837				  "MCDI proxy retry %d\n", proxy_handle);
    838
    839			/* We now retry the original request. */
    840			mcdi->state = MCDI_STATE_RUNNING_SYNC;
    841			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
    842
    843			rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
    844						  outbuf, outlen, outlen_actual,
    845						  quiet, NULL, raw_rc);
    846		} else {
    847			netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
    848				       "MC command 0x%x failed after proxy auth rc=%d\n",
    849				       cmd, rc);
    850
    851			if (rc == -EINTR || rc == -EIO)
    852				efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
    853			efx_mcdi_release(mcdi);
    854		}
    855	}
    856
    857	return rc;
    858}
    859
    860static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
    861				   const efx_dword_t *inbuf, size_t inlen,
    862				   efx_dword_t *outbuf, size_t outlen,
    863				   size_t *outlen_actual, bool quiet)
    864{
    865	int raw_rc = 0;
    866	int rc;
    867
    868	rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
    869			   outbuf, outlen, outlen_actual, true, &raw_rc);
    870
    871	if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
    872	    efx->type->is_vf) {
    873		/* If the EVB port isn't available within a VF this may
    874		 * mean the PF is still bringing the switch up. We should
    875		 * retry our request shortly.
    876		 */
    877		unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
    878		unsigned int delay_us = 10000;
    879
    880		netif_dbg(efx, hw, efx->net_dev,
    881			  "%s: NO_EVB_PORT; will retry request\n",
    882			  __func__);
    883
    884		do {
    885			usleep_range(delay_us, delay_us + 10000);
    886			rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
    887					   outbuf, outlen, outlen_actual,
    888					   true, &raw_rc);
    889			if (delay_us < 100000)
    890				delay_us <<= 1;
    891		} while ((rc == -EPROTO) &&
    892			 (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
    893			 time_before(jiffies, abort_time));
    894	}
    895
    896	if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
    897		efx_mcdi_display_error(efx, cmd, inlen,
    898				       outbuf, outlen, rc);
    899
    900	return rc;
    901}
    902
    903/**
    904 * efx_mcdi_rpc - Issue an MCDI command and wait for completion
    905 * @efx: NIC through which to issue the command
    906 * @cmd: Command type number
    907 * @inbuf: Command parameters
    908 * @inlen: Length of command parameters, in bytes.  Must be a multiple
    909 *	of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
    910 * @outbuf: Response buffer.  May be %NULL if @outlen is 0.
    911 * @outlen: Length of response buffer, in bytes.  If the actual
    912 *	response is longer than @outlen & ~3, it will be truncated
    913 *	to that length.
    914 * @outlen_actual: Pointer through which to return the actual response
    915 *	length.  May be %NULL if this is not needed.
    916 *
    917 * This function may sleep and therefore must be called in an appropriate
    918 * context.
    919 *
    920 * Return: A negative error code, or zero if successful.  The error
    921 *	code may come from the MCDI response or may indicate a failure
    922 *	to communicate with the MC.  In the former case, the response
    923 *	will still be copied to @outbuf and *@outlen_actual will be
    924 *	set accordingly.  In the latter case, *@outlen_actual will be
    925 *	set to zero.
    926 */
    927int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
    928		 const efx_dword_t *inbuf, size_t inlen,
    929		 efx_dword_t *outbuf, size_t outlen,
    930		 size_t *outlen_actual)
    931{
    932	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
    933				       outlen_actual, false);
    934}
    935
    936/* Normally, on receiving an error code in the MCDI response,
    937 * efx_mcdi_rpc will log an error message containing (among other
    938 * things) the raw error code, by means of efx_mcdi_display_error.
    939 * This _quiet version suppresses that; if the caller wishes to log
    940 * the error conditionally on the return code, it should call this
    941 * function and is then responsible for calling efx_mcdi_display_error
    942 * as needed.
    943 */
    944int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
    945		       const efx_dword_t *inbuf, size_t inlen,
    946		       efx_dword_t *outbuf, size_t outlen,
    947		       size_t *outlen_actual)
    948{
    949	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
    950				       outlen_actual, true);
    951}
    952
    953int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
    954		       const efx_dword_t *inbuf, size_t inlen)
    955{
    956	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    957	int rc;
    958
    959	rc = efx_mcdi_check_supported(efx, cmd, inlen);
    960	if (rc)
    961		return rc;
    962
    963	if (efx->mc_bist_for_other_fn)
    964		return -ENETDOWN;
    965
    966	if (mcdi->mode == MCDI_MODE_FAIL)
    967		return -ENETDOWN;
    968
    969	efx_mcdi_acquire_sync(mcdi);
    970	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
    971	return 0;
    972}
    973
    974static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
    975			       const efx_dword_t *inbuf, size_t inlen,
    976			       size_t outlen,
    977			       efx_mcdi_async_completer *complete,
    978			       unsigned long cookie, bool quiet)
    979{
    980	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    981	struct efx_mcdi_async_param *async;
    982	int rc;
    983
    984	rc = efx_mcdi_check_supported(efx, cmd, inlen);
    985	if (rc)
    986		return rc;
    987
    988	if (efx->mc_bist_for_other_fn)
    989		return -ENETDOWN;
    990
    991	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
    992			GFP_ATOMIC);
    993	if (!async)
    994		return -ENOMEM;
    995
    996	async->cmd = cmd;
    997	async->inlen = inlen;
    998	async->outlen = outlen;
    999	async->quiet = quiet;
   1000	async->complete = complete;
   1001	async->cookie = cookie;
   1002	memcpy(async + 1, inbuf, inlen);
   1003
   1004	spin_lock_bh(&mcdi->async_lock);
   1005
   1006	if (mcdi->mode == MCDI_MODE_EVENTS) {
   1007		list_add_tail(&async->list, &mcdi->async_list);
   1008
   1009		/* If this is at the front of the queue, try to start it
   1010		 * immediately
   1011		 */
   1012		if (mcdi->async_list.next == &async->list &&
   1013		    efx_mcdi_acquire_async(mcdi)) {
   1014			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
   1015			mod_timer(&mcdi->async_timer,
   1016				  jiffies + MCDI_RPC_TIMEOUT);
   1017		}
   1018	} else {
   1019		kfree(async);
   1020		rc = -ENETDOWN;
   1021	}
   1022
   1023	spin_unlock_bh(&mcdi->async_lock);
   1024
   1025	return rc;
   1026}
   1027
   1028/**
   1029 * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
   1030 * @efx: NIC through which to issue the command
   1031 * @cmd: Command type number
   1032 * @inbuf: Command parameters
   1033 * @inlen: Length of command parameters, in bytes
   1034 * @outlen: Length to allocate for response buffer, in bytes
   1035 * @complete: Function to be called on completion or cancellation.
   1036 * @cookie: Arbitrary value to be passed to @complete.
   1037 *
   1038 * This function does not sleep and therefore may be called in atomic
   1039 * context.  It will fail if event queues are disabled or if MCDI
   1040 * event completions have been disabled due to an error.
   1041 *
   1042 * If it succeeds, the @complete function will be called exactly once
   1043 * in atomic context, when one of the following occurs:
   1044 * (a) the completion event is received (in NAPI context)
   1045 * (b) event queues are disabled (in the process that disables them)
   1046 * (c) the request times-out (in timer context)
   1047 */
   1048int
   1049efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
   1050		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
   1051		   efx_mcdi_async_completer *complete, unsigned long cookie)
   1052{
   1053	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
   1054				   cookie, false);
   1055}
   1056
   1057int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
   1058			     const efx_dword_t *inbuf, size_t inlen,
   1059			     size_t outlen, efx_mcdi_async_completer *complete,
   1060			     unsigned long cookie)
   1061{
   1062	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
   1063				   cookie, true);
   1064}
   1065
   1066int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
   1067			efx_dword_t *outbuf, size_t outlen,
   1068			size_t *outlen_actual)
   1069{
   1070	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
   1071				    outlen_actual, false, NULL, NULL);
   1072}
   1073
   1074int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
   1075			      efx_dword_t *outbuf, size_t outlen,
   1076			      size_t *outlen_actual)
   1077{
   1078	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
   1079				    outlen_actual, true, NULL, NULL);
   1080}
   1081
   1082void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
   1083			    size_t inlen, efx_dword_t *outbuf,
   1084			    size_t outlen, int rc)
   1085{
   1086	int code = 0, err_arg = 0;
   1087
   1088	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
   1089		code = MCDI_DWORD(outbuf, ERR_CODE);
   1090	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
   1091		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
   1092	netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
   1093		       "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
   1094		       cmd, inlen, rc, code, err_arg);
   1095}
   1096
   1097/* Switch to polled MCDI completions.  This can be called in various
   1098 * error conditions with various locks held, so it must be lockless.
   1099 * Caller is responsible for flushing asynchronous requests later.
   1100 */
   1101void efx_mcdi_mode_poll(struct efx_nic *efx)
   1102{
   1103	struct efx_mcdi_iface *mcdi;
   1104
   1105	if (!efx->mcdi)
   1106		return;
   1107
   1108	mcdi = efx_mcdi(efx);
   1109	/* If already in polling mode, nothing to do.
   1110	 * If in fail-fast state, don't switch to polled completion.
   1111	 * FLR recovery will do that later.
   1112	 */
   1113	if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
   1114		return;
   1115
   1116	/* We can switch from event completion to polled completion, because
   1117	 * mcdi requests are always completed in shared memory. We do this by
   1118	 * switching the mode to POLL'd then completing the request.
   1119	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
   1120	 *
   1121	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
   1122	 * which efx_mcdi_complete_sync() provides for us.
   1123	 */
   1124	mcdi->mode = MCDI_MODE_POLL;
   1125
   1126	efx_mcdi_complete_sync(mcdi);
   1127}
   1128
   1129/* Flush any running or queued asynchronous requests, after event processing
   1130 * is stopped
   1131 */
   1132void efx_mcdi_flush_async(struct efx_nic *efx)
   1133{
   1134	struct efx_mcdi_async_param *async, *next;
   1135	struct efx_mcdi_iface *mcdi;
   1136
   1137	if (!efx->mcdi)
   1138		return;
   1139
   1140	mcdi = efx_mcdi(efx);
   1141
   1142	/* We must be in poll or fail mode so no more requests can be queued */
   1143	BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
   1144
   1145	del_timer_sync(&mcdi->async_timer);
   1146
   1147	/* If a request is still running, make sure we give the MC
   1148	 * time to complete it so that the response won't overwrite our
   1149	 * next request.
   1150	 */
   1151	if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
   1152		efx_mcdi_poll(efx);
   1153		mcdi->state = MCDI_STATE_QUIESCENT;
   1154	}
   1155
   1156	/* Nothing else will access the async list now, so it is safe
   1157	 * to walk it without holding async_lock.  If we hold it while
   1158	 * calling a completer then lockdep may warn that we have
   1159	 * acquired locks in the wrong order.
   1160	 */
   1161	list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
   1162		if (async->complete)
   1163			async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
   1164		list_del(&async->list);
   1165		kfree(async);
   1166	}
   1167}
   1168
   1169void efx_mcdi_mode_event(struct efx_nic *efx)
   1170{
   1171	struct efx_mcdi_iface *mcdi;
   1172
   1173	if (!efx->mcdi)
   1174		return;
   1175
   1176	mcdi = efx_mcdi(efx);
   1177	/* If already in event completion mode, nothing to do.
   1178	 * If in fail-fast state, don't switch to event completion.  FLR
   1179	 * recovery will do that later.
   1180	 */
   1181	if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
   1182		return;
   1183
   1184	/* We can't switch from polled to event completion in the middle of a
   1185	 * request, because the completion method is specified in the request.
   1186	 * So acquire the interface to serialise the requestors. We don't need
   1187	 * to acquire the iface_lock to change the mode here, but we do need a
   1188	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
   1189	 * efx_mcdi_acquire() provides.
   1190	 */
   1191	efx_mcdi_acquire_sync(mcdi);
   1192	mcdi->mode = MCDI_MODE_EVENTS;
   1193	efx_mcdi_release(mcdi);
   1194}
   1195
   1196static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
   1197{
   1198	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1199
   1200	/* If there is an outstanding MCDI request, it has been terminated
   1201	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
   1202	 * in polled mode, then do nothing because the MC reboot handler will
   1203	 * set the header correctly. However, if the mcdi interface is waiting
   1204	 * for a CMDDONE event it won't receive it [and since all MCDI events
   1205	 * are sent to the same queue, we can't be racing with
   1206	 * efx_mcdi_ev_cpl()]
   1207	 *
   1208	 * If there is an outstanding asynchronous request, we can't
   1209	 * complete it now (efx_mcdi_complete() would deadlock).  The
   1210	 * reset process will take care of this.
   1211	 *
   1212	 * There's a race here with efx_mcdi_send_request(), because
   1213	 * we might receive a REBOOT event *before* the request has
   1214	 * been copied out. In polled mode (during startup) this is
   1215	 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
   1216	 * event mode, this condition is just an edge-case of
   1217	 * receiving a REBOOT event after posting the MCDI
   1218	 * request. Did the mc reboot before or after the copyout? The
   1219	 * best we can do always is just return failure.
   1220	 *
   1221	 * If there is an outstanding proxy response expected it is not going
   1222	 * to arrive. We should thus abort it.
   1223	 */
   1224	spin_lock(&mcdi->iface_lock);
   1225	efx_mcdi_proxy_abort(mcdi);
   1226
   1227	if (efx_mcdi_complete_sync(mcdi)) {
   1228		if (mcdi->mode == MCDI_MODE_EVENTS) {
   1229			mcdi->resprc = rc;
   1230			mcdi->resp_hdr_len = 0;
   1231			mcdi->resp_data_len = 0;
   1232			++mcdi->credits;
   1233		}
   1234	} else {
   1235		int count;
   1236
   1237		/* Consume the status word since efx_mcdi_rpc_finish() won't */
   1238		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
   1239			rc = efx_mcdi_poll_reboot(efx);
   1240			if (rc)
   1241				break;
   1242			udelay(MCDI_STATUS_DELAY_US);
   1243		}
   1244
   1245		/* On EF10, a CODE_MC_REBOOT event can be received without the
   1246		 * reboot detection in efx_mcdi_poll_reboot() being triggered.
   1247		 * If zero was returned from the final call to
   1248		 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
   1249		 * MC has definitely rebooted so prepare for the reset.
   1250		 */
   1251		if (!rc && efx->type->mcdi_reboot_detected)
   1252			efx->type->mcdi_reboot_detected(efx);
   1253
   1254		mcdi->new_epoch = true;
   1255
   1256		/* Nobody was waiting for an MCDI request, so trigger a reset */
   1257		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
   1258	}
   1259
   1260	spin_unlock(&mcdi->iface_lock);
   1261}
   1262
   1263/* The MC is going down in to BIST mode. set the BIST flag to block
   1264 * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
   1265 * (which doesn't actually execute a reset, it waits for the controlling
   1266 * function to reset it).
   1267 */
   1268static void efx_mcdi_ev_bist(struct efx_nic *efx)
   1269{
   1270	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1271
   1272	spin_lock(&mcdi->iface_lock);
   1273	efx->mc_bist_for_other_fn = true;
   1274	efx_mcdi_proxy_abort(mcdi);
   1275
   1276	if (efx_mcdi_complete_sync(mcdi)) {
   1277		if (mcdi->mode == MCDI_MODE_EVENTS) {
   1278			mcdi->resprc = -EIO;
   1279			mcdi->resp_hdr_len = 0;
   1280			mcdi->resp_data_len = 0;
   1281			++mcdi->credits;
   1282		}
   1283	}
   1284	mcdi->new_epoch = true;
   1285	efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
   1286	spin_unlock(&mcdi->iface_lock);
   1287}
   1288
   1289/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
   1290 * to recover.
   1291 */
   1292static void efx_mcdi_abandon(struct efx_nic *efx)
   1293{
   1294	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1295
   1296	if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
   1297		return; /* it had already been done */
   1298	netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
   1299	efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
   1300}
   1301
   1302static void efx_handle_drain_event(struct efx_nic *efx)
   1303{
   1304	if (atomic_dec_and_test(&efx->active_queues))
   1305		wake_up(&efx->flush_wq);
   1306
   1307	WARN_ON(atomic_read(&efx->active_queues) < 0);
   1308}
   1309
   1310/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
   1311void efx_mcdi_process_event(struct efx_channel *channel,
   1312			    efx_qword_t *event)
   1313{
   1314	struct efx_nic *efx = channel->efx;
   1315	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
   1316	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
   1317
   1318	switch (code) {
   1319	case MCDI_EVENT_CODE_BADSSERT:
   1320		netif_err(efx, hw, efx->net_dev,
   1321			  "MC watchdog or assertion failure at 0x%x\n", data);
   1322		efx_mcdi_ev_death(efx, -EINTR);
   1323		break;
   1324
   1325	case MCDI_EVENT_CODE_PMNOTICE:
   1326		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
   1327		break;
   1328
   1329	case MCDI_EVENT_CODE_CMDDONE:
   1330		efx_mcdi_ev_cpl(efx,
   1331				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
   1332				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
   1333				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
   1334		break;
   1335
   1336	case MCDI_EVENT_CODE_LINKCHANGE:
   1337		efx_mcdi_process_link_change(efx, event);
   1338		break;
   1339	case MCDI_EVENT_CODE_SENSOREVT:
   1340		efx_sensor_event(efx, event);
   1341		break;
   1342	case MCDI_EVENT_CODE_SCHEDERR:
   1343		netif_dbg(efx, hw, efx->net_dev,
   1344			  "MC Scheduler alert (0x%x)\n", data);
   1345		break;
   1346	case MCDI_EVENT_CODE_REBOOT:
   1347	case MCDI_EVENT_CODE_MC_REBOOT:
   1348		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
   1349		efx_mcdi_ev_death(efx, -EIO);
   1350		break;
   1351	case MCDI_EVENT_CODE_MC_BIST:
   1352		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
   1353		efx_mcdi_ev_bist(efx);
   1354		break;
   1355	case MCDI_EVENT_CODE_MAC_STATS_DMA:
   1356		/* MAC stats are gather lazily.  We can ignore this. */
   1357		break;
   1358	case MCDI_EVENT_CODE_FLR:
   1359		if (efx->type->sriov_flr)
   1360			efx->type->sriov_flr(efx,
   1361					     MCDI_EVENT_FIELD(*event, FLR_VF));
   1362		break;
   1363	case MCDI_EVENT_CODE_PTP_RX:
   1364	case MCDI_EVENT_CODE_PTP_FAULT:
   1365	case MCDI_EVENT_CODE_PTP_PPS:
   1366		efx_ptp_event(efx, event);
   1367		break;
   1368	case MCDI_EVENT_CODE_PTP_TIME:
   1369		efx_time_sync_event(channel, event);
   1370		break;
   1371	case MCDI_EVENT_CODE_TX_FLUSH:
   1372	case MCDI_EVENT_CODE_RX_FLUSH:
   1373		/* Two flush events will be sent: one to the same event
   1374		 * queue as completions, and one to event queue 0.
   1375		 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
   1376		 * flag will be set, and we should ignore the event
   1377		 * because we want to wait for all completions.
   1378		 */
   1379		BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
   1380			     MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
   1381		if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
   1382			efx_handle_drain_event(efx);
   1383		break;
   1384	case MCDI_EVENT_CODE_TX_ERR:
   1385	case MCDI_EVENT_CODE_RX_ERR:
   1386		netif_err(efx, hw, efx->net_dev,
   1387			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
   1388			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
   1389			  EFX_QWORD_VAL(*event));
   1390		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
   1391		break;
   1392	case MCDI_EVENT_CODE_PROXY_RESPONSE:
   1393		efx_mcdi_ev_proxy_response(efx,
   1394				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
   1395				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
   1396		break;
   1397	default:
   1398		netif_err(efx, hw, efx->net_dev,
   1399			  "Unknown MCDI event " EFX_QWORD_FMT "\n",
   1400			  EFX_QWORD_VAL(*event));
   1401	}
   1402}
   1403
   1404/**************************************************************************
   1405 *
   1406 * Specific request functions
   1407 *
   1408 **************************************************************************
   1409 */
   1410
   1411void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
   1412{
   1413	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
   1414	size_t outlength;
   1415	const __le16 *ver_words;
   1416	size_t offset;
   1417	int rc;
   1418
   1419	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
   1420	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
   1421			  outbuf, sizeof(outbuf), &outlength);
   1422	if (rc)
   1423		goto fail;
   1424	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
   1425		rc = -EIO;
   1426		goto fail;
   1427	}
   1428
   1429	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
   1430	offset = scnprintf(buf, len, "%u.%u.%u.%u",
   1431			   le16_to_cpu(ver_words[0]),
   1432			   le16_to_cpu(ver_words[1]),
   1433			   le16_to_cpu(ver_words[2]),
   1434			   le16_to_cpu(ver_words[3]));
   1435
   1436	if (efx->type->print_additional_fwver)
   1437		offset += efx->type->print_additional_fwver(efx, buf + offset,
   1438							    len - offset);
   1439
   1440	/* It's theoretically possible for the string to exceed 31
   1441	 * characters, though in practice the first three version
   1442	 * components are short enough that this doesn't happen.
   1443	 */
   1444	if (WARN_ON(offset >= len))
   1445		buf[0] = 0;
   1446
   1447	return;
   1448
   1449fail:
   1450	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1451	buf[0] = 0;
   1452}
   1453
   1454static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
   1455			       bool *was_attached)
   1456{
   1457	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
   1458	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
   1459	size_t outlen;
   1460	int rc;
   1461
   1462	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
   1463		       driver_operating ? 1 : 0);
   1464	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
   1465	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
   1466
   1467	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
   1468				outbuf, sizeof(outbuf), &outlen);
   1469	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
   1470	 * specified will fail with EPERM, and we have to tell the MC we don't
   1471	 * care what firmware we get.
   1472	 */
   1473	if (rc == -EPERM) {
   1474		netif_dbg(efx, probe, efx->net_dev,
   1475			  "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
   1476		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
   1477			       MC_CMD_FW_DONT_CARE);
   1478		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
   1479					sizeof(inbuf), outbuf, sizeof(outbuf),
   1480					&outlen);
   1481	}
   1482	if (rc) {
   1483		efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
   1484				       outbuf, outlen, rc);
   1485		goto fail;
   1486	}
   1487	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
   1488		rc = -EIO;
   1489		goto fail;
   1490	}
   1491
   1492	if (driver_operating) {
   1493		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
   1494			efx->mcdi->fn_flags =
   1495				MCDI_DWORD(outbuf,
   1496					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
   1497		} else {
   1498			/* Synthesise flags for Siena */
   1499			efx->mcdi->fn_flags =
   1500				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
   1501				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
   1502				(efx_port_num(efx) == 0) <<
   1503				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
   1504		}
   1505	}
   1506
   1507	/* We currently assume we have control of the external link
   1508	 * and are completely trusted by firmware.  Abort probing
   1509	 * if that's not true for this function.
   1510	 */
   1511
   1512	if (was_attached != NULL)
   1513		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
   1514	return 0;
   1515
   1516fail:
   1517	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1518	return rc;
   1519}
   1520
   1521int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
   1522			   u16 *fw_subtype_list, u32 *capabilities)
   1523{
   1524	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
   1525	size_t outlen, i;
   1526	int port_num = efx_port_num(efx);
   1527	int rc;
   1528
   1529	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
   1530	/* we need __aligned(2) for ether_addr_copy */
   1531	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
   1532	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
   1533
   1534	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
   1535			  outbuf, sizeof(outbuf), &outlen);
   1536	if (rc)
   1537		goto fail;
   1538
   1539	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
   1540		rc = -EIO;
   1541		goto fail;
   1542	}
   1543
   1544	if (mac_address)
   1545		ether_addr_copy(mac_address,
   1546				port_num ?
   1547				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
   1548				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
   1549	if (fw_subtype_list) {
   1550		for (i = 0;
   1551		     i < MCDI_VAR_ARRAY_LEN(outlen,
   1552					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
   1553		     i++)
   1554			fw_subtype_list[i] = MCDI_ARRAY_WORD(
   1555				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
   1556		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
   1557			fw_subtype_list[i] = 0;
   1558	}
   1559	if (capabilities) {
   1560		if (port_num)
   1561			*capabilities = MCDI_DWORD(outbuf,
   1562					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
   1563		else
   1564			*capabilities = MCDI_DWORD(outbuf,
   1565					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
   1566	}
   1567
   1568	return 0;
   1569
   1570fail:
   1571	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
   1572		  __func__, rc, (int)outlen);
   1573
   1574	return rc;
   1575}
   1576
   1577int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
   1578{
   1579	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
   1580	u32 dest = 0;
   1581	int rc;
   1582
   1583	if (uart)
   1584		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
   1585	if (evq)
   1586		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
   1587
   1588	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
   1589	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
   1590
   1591	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
   1592
   1593	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
   1594			  NULL, 0, NULL);
   1595	return rc;
   1596}
   1597
   1598int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
   1599{
   1600	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
   1601	size_t outlen;
   1602	int rc;
   1603
   1604	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
   1605
   1606	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
   1607			  outbuf, sizeof(outbuf), &outlen);
   1608	if (rc)
   1609		goto fail;
   1610	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
   1611		rc = -EIO;
   1612		goto fail;
   1613	}
   1614
   1615	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
   1616	return 0;
   1617
   1618fail:
   1619	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
   1620		  __func__, rc);
   1621	return rc;
   1622}
   1623
   1624/* This function finds types using the new NVRAM_PARTITIONS mcdi. */
   1625static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
   1626				    u32 *nvram_types)
   1627{
   1628	efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
   1629				      GFP_KERNEL);
   1630	size_t outlen;
   1631	int rc;
   1632
   1633	if (!outbuf)
   1634		return -ENOMEM;
   1635
   1636	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
   1637
   1638	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
   1639			  outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
   1640	if (rc)
   1641		goto fail;
   1642
   1643	*number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
   1644
   1645	memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
   1646	       *number * sizeof(u32));
   1647
   1648fail:
   1649	kfree(outbuf);
   1650	return rc;
   1651}
   1652
   1653int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
   1654			size_t *size_out, size_t *erase_size_out,
   1655			bool *protected_out)
   1656{
   1657	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
   1658	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
   1659	size_t outlen;
   1660	int rc;
   1661
   1662	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
   1663
   1664	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
   1665			  outbuf, sizeof(outbuf), &outlen);
   1666	if (rc)
   1667		goto fail;
   1668	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
   1669		rc = -EIO;
   1670		goto fail;
   1671	}
   1672
   1673	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
   1674	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
   1675	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
   1676				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
   1677	return 0;
   1678
   1679fail:
   1680	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1681	return rc;
   1682}
   1683
   1684static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
   1685{
   1686	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
   1687	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
   1688	int rc;
   1689
   1690	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
   1691
   1692	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
   1693			  outbuf, sizeof(outbuf), NULL);
   1694	if (rc)
   1695		return rc;
   1696
   1697	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
   1698	case MC_CMD_NVRAM_TEST_PASS:
   1699	case MC_CMD_NVRAM_TEST_NOTSUPP:
   1700		return 0;
   1701	default:
   1702		return -EIO;
   1703	}
   1704}
   1705
   1706/* This function tests nvram partitions using the new mcdi partition lookup scheme */
   1707int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
   1708{
   1709	u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
   1710				   GFP_KERNEL);
   1711	unsigned int number;
   1712	int rc, i;
   1713
   1714	if (!nvram_types)
   1715		return -ENOMEM;
   1716
   1717	rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
   1718	if (rc)
   1719		goto fail;
   1720
   1721	/* Require at least one check */
   1722	rc = -EAGAIN;
   1723
   1724	for (i = 0; i < number; i++) {
   1725		if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
   1726		    nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
   1727			continue;
   1728
   1729		rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
   1730		if (rc)
   1731			goto fail;
   1732	}
   1733
   1734fail:
   1735	kfree(nvram_types);
   1736	return rc;
   1737}
   1738
   1739int efx_mcdi_nvram_test_all(struct efx_nic *efx)
   1740{
   1741	u32 nvram_types;
   1742	unsigned int type;
   1743	int rc;
   1744
   1745	rc = efx_mcdi_nvram_types(efx, &nvram_types);
   1746	if (rc)
   1747		goto fail1;
   1748
   1749	type = 0;
   1750	while (nvram_types != 0) {
   1751		if (nvram_types & 1) {
   1752			rc = efx_mcdi_nvram_test(efx, type);
   1753			if (rc)
   1754				goto fail2;
   1755		}
   1756		type++;
   1757		nvram_types >>= 1;
   1758	}
   1759
   1760	return 0;
   1761
   1762fail2:
   1763	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
   1764		  __func__, type);
   1765fail1:
   1766	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1767	return rc;
   1768}
   1769
   1770/* Returns 1 if an assertion was read, 0 if no assertion had fired,
   1771 * negative on error.
   1772 */
   1773static int efx_mcdi_read_assertion(struct efx_nic *efx)
   1774{
   1775	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
   1776	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
   1777	unsigned int flags, index;
   1778	const char *reason;
   1779	size_t outlen;
   1780	int retry;
   1781	int rc;
   1782
   1783	/* Attempt to read any stored assertion state before we reboot
   1784	 * the mcfw out of the assertion handler. Retry twice, once
   1785	 * because a boot-time assertion might cause this command to fail
   1786	 * with EINTR. And once again because GET_ASSERTS can race with
   1787	 * MC_CMD_REBOOT running on the other port. */
   1788	retry = 2;
   1789	do {
   1790		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
   1791		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
   1792					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
   1793					outbuf, sizeof(outbuf), &outlen);
   1794		if (rc == -EPERM)
   1795			return 0;
   1796	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
   1797
   1798	if (rc) {
   1799		efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
   1800				       MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
   1801				       outlen, rc);
   1802		return rc;
   1803	}
   1804	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
   1805		return -EIO;
   1806
   1807	/* Print out any recorded assertion state */
   1808	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
   1809	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
   1810		return 0;
   1811
   1812	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
   1813		? "system-level assertion"
   1814		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
   1815		? "thread-level assertion"
   1816		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
   1817		? "watchdog reset"
   1818		: "unknown assertion";
   1819	netif_err(efx, hw, efx->net_dev,
   1820		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
   1821		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
   1822		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
   1823
   1824	/* Print out the registers */
   1825	for (index = 0;
   1826	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
   1827	     index++)
   1828		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
   1829			  1 + index,
   1830			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
   1831					   index));
   1832
   1833	return 1;
   1834}
   1835
   1836static int efx_mcdi_exit_assertion(struct efx_nic *efx)
   1837{
   1838	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
   1839	int rc;
   1840
   1841	/* If the MC is running debug firmware, it might now be
   1842	 * waiting for a debugger to attach, but we just want it to
   1843	 * reboot.  We set a flag that makes the command a no-op if it
   1844	 * has already done so.
   1845	 * The MCDI will thus return either 0 or -EIO.
   1846	 */
   1847	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
   1848	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
   1849		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
   1850	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
   1851				NULL, 0, NULL);
   1852	if (rc == -EIO)
   1853		rc = 0;
   1854	if (rc)
   1855		efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
   1856				       NULL, 0, rc);
   1857	return rc;
   1858}
   1859
   1860int efx_mcdi_handle_assertion(struct efx_nic *efx)
   1861{
   1862	int rc;
   1863
   1864	rc = efx_mcdi_read_assertion(efx);
   1865	if (rc <= 0)
   1866		return rc;
   1867
   1868	return efx_mcdi_exit_assertion(efx);
   1869}
   1870
   1871int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
   1872{
   1873	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
   1874
   1875	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
   1876	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
   1877	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
   1878
   1879	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
   1880
   1881	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
   1882
   1883	return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
   1884}
   1885
   1886static int efx_mcdi_reset_func(struct efx_nic *efx)
   1887{
   1888	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
   1889	int rc;
   1890
   1891	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
   1892	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
   1893			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
   1894	rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
   1895			  NULL, 0, NULL);
   1896	return rc;
   1897}
   1898
   1899static int efx_mcdi_reset_mc(struct efx_nic *efx)
   1900{
   1901	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
   1902	int rc;
   1903
   1904	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
   1905	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
   1906	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
   1907			  NULL, 0, NULL);
   1908	/* White is black, and up is down */
   1909	if (rc == -EIO)
   1910		return 0;
   1911	if (rc == 0)
   1912		rc = -EIO;
   1913	return rc;
   1914}
   1915
   1916enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
   1917{
   1918	return RESET_TYPE_RECOVER_OR_ALL;
   1919}
   1920
   1921int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
   1922{
   1923	int rc;
   1924
   1925	/* If MCDI is down, we can't handle_assertion */
   1926	if (method == RESET_TYPE_MCDI_TIMEOUT) {
   1927		rc = pci_reset_function(efx->pci_dev);
   1928		if (rc)
   1929			return rc;
   1930		/* Re-enable polled MCDI completion */
   1931		if (efx->mcdi) {
   1932			struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1933			mcdi->mode = MCDI_MODE_POLL;
   1934		}
   1935		return 0;
   1936	}
   1937
   1938	/* Recover from a failed assertion pre-reset */
   1939	rc = efx_mcdi_handle_assertion(efx);
   1940	if (rc)
   1941		return rc;
   1942
   1943	if (method == RESET_TYPE_DATAPATH)
   1944		return 0;
   1945	else if (method == RESET_TYPE_WORLD)
   1946		return efx_mcdi_reset_mc(efx);
   1947	else
   1948		return efx_mcdi_reset_func(efx);
   1949}
   1950
   1951static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
   1952				   const u8 *mac, int *id_out)
   1953{
   1954	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
   1955	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
   1956	size_t outlen;
   1957	int rc;
   1958
   1959	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
   1960	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
   1961		       MC_CMD_FILTER_MODE_SIMPLE);
   1962	ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
   1963
   1964	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
   1965			  outbuf, sizeof(outbuf), &outlen);
   1966	if (rc)
   1967		goto fail;
   1968
   1969	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
   1970		rc = -EIO;
   1971		goto fail;
   1972	}
   1973
   1974	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
   1975
   1976	return 0;
   1977
   1978fail:
   1979	*id_out = -1;
   1980	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1981	return rc;
   1982
   1983}
   1984
   1985
   1986int
   1987efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
   1988{
   1989	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
   1990}
   1991
   1992
   1993int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
   1994{
   1995	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
   1996	size_t outlen;
   1997	int rc;
   1998
   1999	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
   2000			  outbuf, sizeof(outbuf), &outlen);
   2001	if (rc)
   2002		goto fail;
   2003
   2004	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
   2005		rc = -EIO;
   2006		goto fail;
   2007	}
   2008
   2009	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
   2010
   2011	return 0;
   2012
   2013fail:
   2014	*id_out = -1;
   2015	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   2016	return rc;
   2017}
   2018
   2019
   2020int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
   2021{
   2022	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
   2023	int rc;
   2024
   2025	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
   2026
   2027	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
   2028			  NULL, 0, NULL);
   2029	return rc;
   2030}
   2031
   2032int efx_mcdi_flush_rxqs(struct efx_nic *efx)
   2033{
   2034	struct efx_channel *channel;
   2035	struct efx_rx_queue *rx_queue;
   2036	MCDI_DECLARE_BUF(inbuf,
   2037			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
   2038	int rc, count;
   2039
   2040	BUILD_BUG_ON(EFX_MAX_CHANNELS >
   2041		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
   2042
   2043	count = 0;
   2044	efx_for_each_channel(channel, efx) {
   2045		efx_for_each_channel_rx_queue(rx_queue, channel) {
   2046			if (rx_queue->flush_pending) {
   2047				rx_queue->flush_pending = false;
   2048				atomic_dec(&efx->rxq_flush_pending);
   2049				MCDI_SET_ARRAY_DWORD(
   2050					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
   2051					count, efx_rx_queue_index(rx_queue));
   2052				count++;
   2053			}
   2054		}
   2055	}
   2056
   2057	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
   2058			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
   2059	WARN_ON(rc < 0);
   2060
   2061	return rc;
   2062}
   2063
   2064int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
   2065{
   2066	int rc;
   2067
   2068	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
   2069	return rc;
   2070}
   2071
   2072int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
   2073			    unsigned int *flags)
   2074{
   2075	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
   2076	MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
   2077	size_t outlen;
   2078	int rc;
   2079
   2080	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
   2081	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
   2082	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
   2083	rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
   2084			  outbuf, sizeof(outbuf), &outlen);
   2085	if (rc)
   2086		return rc;
   2087
   2088	if (!flags)
   2089		return 0;
   2090
   2091	if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
   2092		*flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
   2093	else
   2094		*flags = 0;
   2095
   2096	return 0;
   2097}
   2098
   2099int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
   2100			     unsigned int *enabled_out)
   2101{
   2102	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
   2103	size_t outlen;
   2104	int rc;
   2105
   2106	rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
   2107			  outbuf, sizeof(outbuf), &outlen);
   2108	if (rc)
   2109		goto fail;
   2110
   2111	if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
   2112		rc = -EIO;
   2113		goto fail;
   2114	}
   2115
   2116	if (impl_out)
   2117		*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
   2118
   2119	if (enabled_out)
   2120		*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
   2121
   2122	return 0;
   2123
   2124fail:
   2125	/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
   2126	 * terrifying.  The call site will have to deal with it though.
   2127	 */
   2128	netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
   2129		       "%s: failed rc=%d\n", __func__, rc);
   2130	return rc;
   2131}
   2132
   2133#ifdef CONFIG_SFC_MTD
   2134
   2135#define EFX_MCDI_NVRAM_LEN_MAX 128
   2136
   2137static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
   2138{
   2139	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
   2140	int rc;
   2141
   2142	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
   2143	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
   2144			      NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
   2145			      1);
   2146
   2147	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
   2148
   2149	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
   2150			  NULL, 0, NULL);
   2151
   2152	return rc;
   2153}
   2154
   2155static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
   2156			       loff_t offset, u8 *buffer, size_t length)
   2157{
   2158	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
   2159	MCDI_DECLARE_BUF(outbuf,
   2160			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
   2161	size_t outlen;
   2162	int rc;
   2163
   2164	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
   2165	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
   2166	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
   2167	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
   2168		       MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
   2169
   2170	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
   2171			  outbuf, sizeof(outbuf), &outlen);
   2172	if (rc)
   2173		return rc;
   2174
   2175	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
   2176	return 0;
   2177}
   2178
   2179static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
   2180				loff_t offset, const u8 *buffer, size_t length)
   2181{
   2182	MCDI_DECLARE_BUF(inbuf,
   2183			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
   2184	int rc;
   2185
   2186	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
   2187	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
   2188	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
   2189	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
   2190
   2191	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
   2192
   2193	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
   2194			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
   2195			  NULL, 0, NULL);
   2196	return rc;
   2197}
   2198
   2199static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
   2200				loff_t offset, size_t length)
   2201{
   2202	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
   2203	int rc;
   2204
   2205	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
   2206	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
   2207	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
   2208
   2209	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
   2210
   2211	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
   2212			  NULL, 0, NULL);
   2213	return rc;
   2214}
   2215
   2216static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
   2217{
   2218	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
   2219	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
   2220	size_t outlen;
   2221	int rc, rc2;
   2222
   2223	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
   2224	/* Always set this flag. Old firmware ignores it */
   2225	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
   2226			      NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
   2227			      1);
   2228
   2229	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
   2230			  outbuf, sizeof(outbuf), &outlen);
   2231	if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
   2232		rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
   2233		if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
   2234			netif_err(efx, drv, efx->net_dev,
   2235				  "NVRAM update failed verification with code 0x%x\n",
   2236				  rc2);
   2237		switch (rc2) {
   2238		case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
   2239			break;
   2240		case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
   2241		case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
   2242		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
   2243		case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
   2244		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
   2245			rc = -EIO;
   2246			break;
   2247		case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
   2248		case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
   2249			rc = -EINVAL;
   2250			break;
   2251		case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
   2252		case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
   2253		case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
   2254			rc = -EPERM;
   2255			break;
   2256		default:
   2257			netif_err(efx, drv, efx->net_dev,
   2258				  "Unknown response to NVRAM_UPDATE_FINISH\n");
   2259			rc = -EIO;
   2260		}
   2261	}
   2262
   2263	return rc;
   2264}
   2265
   2266int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
   2267		      size_t len, size_t *retlen, u8 *buffer)
   2268{
   2269	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2270	struct efx_nic *efx = mtd->priv;
   2271	loff_t offset = start;
   2272	loff_t end = min_t(loff_t, start + len, mtd->size);
   2273	size_t chunk;
   2274	int rc = 0;
   2275
   2276	while (offset < end) {
   2277		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
   2278		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
   2279					 buffer, chunk);
   2280		if (rc)
   2281			goto out;
   2282		offset += chunk;
   2283		buffer += chunk;
   2284	}
   2285out:
   2286	*retlen = offset - start;
   2287	return rc;
   2288}
   2289
   2290int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
   2291{
   2292	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2293	struct efx_nic *efx = mtd->priv;
   2294	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
   2295	loff_t end = min_t(loff_t, start + len, mtd->size);
   2296	size_t chunk = part->common.mtd.erasesize;
   2297	int rc = 0;
   2298
   2299	if (!part->updating) {
   2300		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
   2301		if (rc)
   2302			goto out;
   2303		part->updating = true;
   2304	}
   2305
   2306	/* The MCDI interface can in fact do multiple erase blocks at once;
   2307	 * but erasing may be slow, so we make multiple calls here to avoid
   2308	 * tripping the MCDI RPC timeout. */
   2309	while (offset < end) {
   2310		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
   2311					  chunk);
   2312		if (rc)
   2313			goto out;
   2314		offset += chunk;
   2315	}
   2316out:
   2317	return rc;
   2318}
   2319
   2320int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
   2321		       size_t len, size_t *retlen, const u8 *buffer)
   2322{
   2323	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2324	struct efx_nic *efx = mtd->priv;
   2325	loff_t offset = start;
   2326	loff_t end = min_t(loff_t, start + len, mtd->size);
   2327	size_t chunk;
   2328	int rc = 0;
   2329
   2330	if (!part->updating) {
   2331		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
   2332		if (rc)
   2333			goto out;
   2334		part->updating = true;
   2335	}
   2336
   2337	while (offset < end) {
   2338		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
   2339		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
   2340					  buffer, chunk);
   2341		if (rc)
   2342			goto out;
   2343		offset += chunk;
   2344		buffer += chunk;
   2345	}
   2346out:
   2347	*retlen = offset - start;
   2348	return rc;
   2349}
   2350
   2351int efx_mcdi_mtd_sync(struct mtd_info *mtd)
   2352{
   2353	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2354	struct efx_nic *efx = mtd->priv;
   2355	int rc = 0;
   2356
   2357	if (part->updating) {
   2358		part->updating = false;
   2359		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
   2360	}
   2361
   2362	return rc;
   2363}
   2364
   2365void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
   2366{
   2367	struct efx_mcdi_mtd_partition *mcdi_part =
   2368		container_of(part, struct efx_mcdi_mtd_partition, common);
   2369	struct efx_nic *efx = part->mtd.priv;
   2370
   2371	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
   2372		 efx->name, part->type_name, mcdi_part->fw_subtype);
   2373}
   2374
   2375#endif /* CONFIG_SFC_MTD */