cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

mcdi.c (63669B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/****************************************************************************
      3 * Driver for Solarflare network controllers and boards
      4 * Copyright 2008-2013 Solarflare Communications Inc.
      5 */
      6
      7#include <linux/delay.h>
      8#include <linux/moduleparam.h>
      9#include <linux/atomic.h>
     10#include "net_driver.h"
     11#include "nic.h"
     12#include "io.h"
     13#include "farch_regs.h"
     14#include "mcdi_pcol.h"
     15
     16/**************************************************************************
     17 *
     18 * Management-Controller-to-Driver Interface
     19 *
     20 **************************************************************************
     21 */
     22
     23#define MCDI_RPC_TIMEOUT       (10 * HZ)
     24
     25/* A reboot/assertion causes the MCDI status word to be set after the
     26 * command word is set or a REBOOT event is sent. If we notice a reboot
     27 * via these mechanisms then wait 250ms for the status word to be set.
     28 */
     29#define MCDI_STATUS_DELAY_US		100
     30#define MCDI_STATUS_DELAY_COUNT		2500
     31#define MCDI_STATUS_SLEEP_MS						\
     32	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
     33
     34#define SEQ_MASK							\
     35	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
     36
     37struct efx_mcdi_async_param {
     38	struct list_head list;
     39	unsigned int cmd;
     40	size_t inlen;
     41	size_t outlen;
     42	bool quiet;
     43	efx_mcdi_async_completer *complete;
     44	unsigned long cookie;
     45	/* followed by request/response buffer */
     46};
     47
     48static void efx_mcdi_timeout_async(struct timer_list *t);
     49static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
     50			       bool *was_attached_out);
     51static bool efx_mcdi_poll_once(struct efx_nic *efx);
     52static void efx_mcdi_abandon(struct efx_nic *efx);
     53
     54#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
     55static bool efx_siena_mcdi_logging_default;
     56module_param_named(mcdi_logging_default, efx_siena_mcdi_logging_default,
     57		   bool, 0644);
     58MODULE_PARM_DESC(mcdi_logging_default,
     59		 "Enable MCDI logging on newly-probed functions");
     60#endif
     61
     62int efx_siena_mcdi_init(struct efx_nic *efx)
     63{
     64	struct efx_mcdi_iface *mcdi;
     65	bool already_attached;
     66	int rc = -ENOMEM;
     67
     68	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
     69	if (!efx->mcdi)
     70		goto fail;
     71
     72	mcdi = efx_mcdi(efx);
     73	mcdi->efx = efx;
     74#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
     75	/* consuming code assumes buffer is page-sized */
     76	mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
     77	if (!mcdi->logging_buffer)
     78		goto fail1;
     79	mcdi->logging_enabled = efx_siena_mcdi_logging_default;
     80#endif
     81	init_waitqueue_head(&mcdi->wq);
     82	init_waitqueue_head(&mcdi->proxy_rx_wq);
     83	spin_lock_init(&mcdi->iface_lock);
     84	mcdi->state = MCDI_STATE_QUIESCENT;
     85	mcdi->mode = MCDI_MODE_POLL;
     86	spin_lock_init(&mcdi->async_lock);
     87	INIT_LIST_HEAD(&mcdi->async_list);
     88	timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
     89
     90	(void)efx_siena_mcdi_poll_reboot(efx);
     91	mcdi->new_epoch = true;
     92
     93	/* Recover from a failed assertion before probing */
     94	rc = efx_siena_mcdi_handle_assertion(efx);
     95	if (rc)
     96		goto fail2;
     97
     98	/* Let the MC (and BMC, if this is a LOM) know that the driver
     99	 * is loaded. We should do this before we reset the NIC.
    100	 */
    101	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
    102	if (rc) {
    103		netif_err(efx, probe, efx->net_dev,
    104			  "Unable to register driver with MCPU\n");
    105		goto fail2;
    106	}
    107	if (already_attached)
    108		/* Not a fatal error */
    109		netif_err(efx, probe, efx->net_dev,
    110			  "Host already registered with MCPU\n");
    111
    112	if (efx->mcdi->fn_flags &
    113	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
    114		efx->primary = efx;
    115
    116	return 0;
    117fail2:
    118#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
    119	free_page((unsigned long)mcdi->logging_buffer);
    120fail1:
    121#endif
    122	kfree(efx->mcdi);
    123	efx->mcdi = NULL;
    124fail:
    125	return rc;
    126}
    127
    128void efx_siena_mcdi_detach(struct efx_nic *efx)
    129{
    130	if (!efx->mcdi)
    131		return;
    132
    133	BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
    134
    135	/* Relinquish the device (back to the BMC, if this is a LOM) */
    136	efx_mcdi_drv_attach(efx, false, NULL);
    137}
    138
    139void efx_siena_mcdi_fini(struct efx_nic *efx)
    140{
    141	if (!efx->mcdi)
    142		return;
    143
    144#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
    145	free_page((unsigned long)efx->mcdi->iface.logging_buffer);
    146#endif
    147
    148	kfree(efx->mcdi);
    149}
    150
    151static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
    152				  const efx_dword_t *inbuf, size_t inlen)
    153{
    154	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    155#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
    156	char *buf = mcdi->logging_buffer; /* page-sized */
    157#endif
    158	efx_dword_t hdr[2];
    159	size_t hdr_len;
    160	u32 xflags, seqno;
    161
    162	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
    163
    164	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
    165	spin_lock_bh(&mcdi->iface_lock);
    166	++mcdi->seqno;
    167	seqno = mcdi->seqno & SEQ_MASK;
    168	spin_unlock_bh(&mcdi->iface_lock);
    169
    170	xflags = 0;
    171	if (mcdi->mode == MCDI_MODE_EVENTS)
    172		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
    173
    174	if (efx->type->mcdi_max_ver == 1) {
    175		/* MCDI v1 */
    176		EFX_POPULATE_DWORD_7(hdr[0],
    177				     MCDI_HEADER_RESPONSE, 0,
    178				     MCDI_HEADER_RESYNC, 1,
    179				     MCDI_HEADER_CODE, cmd,
    180				     MCDI_HEADER_DATALEN, inlen,
    181				     MCDI_HEADER_SEQ, seqno,
    182				     MCDI_HEADER_XFLAGS, xflags,
    183				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
    184		hdr_len = 4;
    185	} else {
    186		/* MCDI v2 */
    187		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
    188		EFX_POPULATE_DWORD_7(hdr[0],
    189				     MCDI_HEADER_RESPONSE, 0,
    190				     MCDI_HEADER_RESYNC, 1,
    191				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
    192				     MCDI_HEADER_DATALEN, 0,
    193				     MCDI_HEADER_SEQ, seqno,
    194				     MCDI_HEADER_XFLAGS, xflags,
    195				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
    196		EFX_POPULATE_DWORD_2(hdr[1],
    197				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
    198				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
    199		hdr_len = 8;
    200	}
    201
    202#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
    203	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
    204		int bytes = 0;
    205		int i;
    206		/* Lengths should always be a whole number of dwords, so scream
    207		 * if they're not.
    208		 */
    209		WARN_ON_ONCE(hdr_len % 4);
    210		WARN_ON_ONCE(inlen % 4);
    211
    212		/* We own the logging buffer, as only one MCDI can be in
    213		 * progress on a NIC at any one time.  So no need for locking.
    214		 */
    215		for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
    216			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    217					   " %08x",
    218					   le32_to_cpu(hdr[i].u32[0]));
    219
    220		for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
    221			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    222					   " %08x",
    223					   le32_to_cpu(inbuf[i].u32[0]));
    224
    225		netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
    226	}
    227#endif
    228
    229	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
    230
    231	mcdi->new_epoch = false;
    232}
    233
    234static int efx_mcdi_errno(unsigned int mcdi_err)
    235{
    236	switch (mcdi_err) {
    237	case 0:
    238		return 0;
    239#define TRANSLATE_ERROR(name)					\
    240	case MC_CMD_ERR_ ## name:				\
    241		return -name;
    242	TRANSLATE_ERROR(EPERM);
    243	TRANSLATE_ERROR(ENOENT);
    244	TRANSLATE_ERROR(EINTR);
    245	TRANSLATE_ERROR(EAGAIN);
    246	TRANSLATE_ERROR(EACCES);
    247	TRANSLATE_ERROR(EBUSY);
    248	TRANSLATE_ERROR(EINVAL);
    249	TRANSLATE_ERROR(EDEADLK);
    250	TRANSLATE_ERROR(ENOSYS);
    251	TRANSLATE_ERROR(ETIME);
    252	TRANSLATE_ERROR(EALREADY);
    253	TRANSLATE_ERROR(ENOSPC);
    254#undef TRANSLATE_ERROR
    255	case MC_CMD_ERR_ENOTSUP:
    256		return -EOPNOTSUPP;
    257	case MC_CMD_ERR_ALLOC_FAIL:
    258		return -ENOBUFS;
    259	case MC_CMD_ERR_MAC_EXIST:
    260		return -EADDRINUSE;
    261	default:
    262		return -EPROTO;
    263	}
    264}
    265
    266static void efx_mcdi_read_response_header(struct efx_nic *efx)
    267{
    268	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    269	unsigned int respseq, respcmd, error;
    270#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
    271	char *buf = mcdi->logging_buffer; /* page-sized */
    272#endif
    273	efx_dword_t hdr;
    274
    275	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
    276	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
    277	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
    278	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
    279
    280	if (respcmd != MC_CMD_V2_EXTN) {
    281		mcdi->resp_hdr_len = 4;
    282		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
    283	} else {
    284		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
    285		mcdi->resp_hdr_len = 8;
    286		mcdi->resp_data_len =
    287			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
    288	}
    289
    290#ifdef CONFIG_SFC_SIENA_MCDI_LOGGING
    291	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
    292		size_t hdr_len, data_len;
    293		int bytes = 0;
    294		int i;
    295
    296		WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
    297		hdr_len = mcdi->resp_hdr_len / 4;
    298		/* MCDI_DECLARE_BUF ensures that underlying buffer is padded
    299		 * to dword size, and the MCDI buffer is always dword size
    300		 */
    301		data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
    302
    303		/* We own the logging buffer, as only one MCDI can be in
    304		 * progress on a NIC at any one time.  So no need for locking.
    305		 */
    306		for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
    307			efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
    308			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    309					   " %08x", le32_to_cpu(hdr.u32[0]));
    310		}
    311
    312		for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
    313			efx->type->mcdi_read_response(efx, &hdr,
    314					mcdi->resp_hdr_len + (i * 4), 4);
    315			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
    316					   " %08x", le32_to_cpu(hdr.u32[0]));
    317		}
    318
    319		netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
    320	}
    321#endif
    322
    323	mcdi->resprc_raw = 0;
    324	if (error && mcdi->resp_data_len == 0) {
    325		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
    326		mcdi->resprc = -EIO;
    327	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
    328		netif_err(efx, hw, efx->net_dev,
    329			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
    330			  respseq, mcdi->seqno);
    331		mcdi->resprc = -EIO;
    332	} else if (error) {
    333		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
    334		mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
    335		mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
    336	} else {
    337		mcdi->resprc = 0;
    338	}
    339}
    340
    341static bool efx_mcdi_poll_once(struct efx_nic *efx)
    342{
    343	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    344
    345	rmb();
    346	if (!efx->type->mcdi_poll_response(efx))
    347		return false;
    348
    349	spin_lock_bh(&mcdi->iface_lock);
    350	efx_mcdi_read_response_header(efx);
    351	spin_unlock_bh(&mcdi->iface_lock);
    352
    353	return true;
    354}
    355
    356static int efx_mcdi_poll(struct efx_nic *efx)
    357{
    358	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    359	unsigned long time, finish;
    360	unsigned int spins;
    361	int rc;
    362
    363	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
    364	rc = efx_siena_mcdi_poll_reboot(efx);
    365	if (rc) {
    366		spin_lock_bh(&mcdi->iface_lock);
    367		mcdi->resprc = rc;
    368		mcdi->resp_hdr_len = 0;
    369		mcdi->resp_data_len = 0;
    370		spin_unlock_bh(&mcdi->iface_lock);
    371		return 0;
    372	}
    373
    374	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
    375	 * because generally mcdi responses are fast. After that, back off
    376	 * and poll once a jiffy (approximately)
    377	 */
    378	spins = USER_TICK_USEC;
    379	finish = jiffies + MCDI_RPC_TIMEOUT;
    380
    381	while (1) {
    382		if (spins != 0) {
    383			--spins;
    384			udelay(1);
    385		} else {
    386			schedule_timeout_uninterruptible(1);
    387		}
    388
    389		time = jiffies;
    390
    391		if (efx_mcdi_poll_once(efx))
    392			break;
    393
    394		if (time_after(time, finish))
    395			return -ETIMEDOUT;
    396	}
    397
    398	/* Return rc=0 like wait_event_timeout() */
    399	return 0;
    400}
    401
    402/* Test and clear MC-rebooted flag for this port/function; reset
    403 * software state as necessary.
    404 */
    405int efx_siena_mcdi_poll_reboot(struct efx_nic *efx)
    406{
    407	if (!efx->mcdi)
    408		return 0;
    409
    410	return efx->type->mcdi_poll_reboot(efx);
    411}
    412
    413static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
    414{
    415	return cmpxchg(&mcdi->state,
    416		       MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
    417		MCDI_STATE_QUIESCENT;
    418}
    419
    420static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
    421{
    422	/* Wait until the interface becomes QUIESCENT and we win the race
    423	 * to mark it RUNNING_SYNC.
    424	 */
    425	wait_event(mcdi->wq,
    426		   cmpxchg(&mcdi->state,
    427			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
    428		   MCDI_STATE_QUIESCENT);
    429}
    430
    431static int efx_mcdi_await_completion(struct efx_nic *efx)
    432{
    433	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    434
    435	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
    436			       MCDI_RPC_TIMEOUT) == 0)
    437		return -ETIMEDOUT;
    438
    439	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
    440	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
    441	 * completed the request first, then we'll just end up completing the
    442	 * request again, which is safe.
    443	 *
    444	 * We need an smp_rmb() to synchronise with efx_siena_mcdi_mode_poll(), which
    445	 * wait_event_timeout() implicitly provides.
    446	 */
    447	if (mcdi->mode == MCDI_MODE_POLL)
    448		return efx_mcdi_poll(efx);
    449
    450	return 0;
    451}
    452
    453/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
    454 * requester.  Return whether this was done.  Does not take any locks.
    455 */
    456static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
    457{
    458	if (cmpxchg(&mcdi->state,
    459		    MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
    460	    MCDI_STATE_RUNNING_SYNC) {
    461		wake_up(&mcdi->wq);
    462		return true;
    463	}
    464
    465	return false;
    466}
    467
    468static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
    469{
    470	if (mcdi->mode == MCDI_MODE_EVENTS) {
    471		struct efx_mcdi_async_param *async;
    472		struct efx_nic *efx = mcdi->efx;
    473
    474		/* Process the asynchronous request queue */
    475		spin_lock_bh(&mcdi->async_lock);
    476		async = list_first_entry_or_null(
    477			&mcdi->async_list, struct efx_mcdi_async_param, list);
    478		if (async) {
    479			mcdi->state = MCDI_STATE_RUNNING_ASYNC;
    480			efx_mcdi_send_request(efx, async->cmd,
    481					      (const efx_dword_t *)(async + 1),
    482					      async->inlen);
    483			mod_timer(&mcdi->async_timer,
    484				  jiffies + MCDI_RPC_TIMEOUT);
    485		}
    486		spin_unlock_bh(&mcdi->async_lock);
    487
    488		if (async)
    489			return;
    490	}
    491
    492	mcdi->state = MCDI_STATE_QUIESCENT;
    493	wake_up(&mcdi->wq);
    494}
    495
    496/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
    497 * asynchronous completion function, and release the interface.
    498 * Return whether this was done.  Must be called in bh-disabled
    499 * context.  Will take iface_lock and async_lock.
    500 */
    501static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
    502{
    503	struct efx_nic *efx = mcdi->efx;
    504	struct efx_mcdi_async_param *async;
    505	size_t hdr_len, data_len, err_len;
    506	efx_dword_t *outbuf;
    507	MCDI_DECLARE_BUF_ERR(errbuf);
    508	int rc;
    509
    510	if (cmpxchg(&mcdi->state,
    511		    MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
    512	    MCDI_STATE_RUNNING_ASYNC)
    513		return false;
    514
    515	spin_lock(&mcdi->iface_lock);
    516	if (timeout) {
    517		/* Ensure that if the completion event arrives later,
    518		 * the seqno check in efx_mcdi_ev_cpl() will fail
    519		 */
    520		++mcdi->seqno;
    521		++mcdi->credits;
    522		rc = -ETIMEDOUT;
    523		hdr_len = 0;
    524		data_len = 0;
    525	} else {
    526		rc = mcdi->resprc;
    527		hdr_len = mcdi->resp_hdr_len;
    528		data_len = mcdi->resp_data_len;
    529	}
    530	spin_unlock(&mcdi->iface_lock);
    531
    532	/* Stop the timer.  In case the timer function is running, we
    533	 * must wait for it to return so that there is no possibility
    534	 * of it aborting the next request.
    535	 */
    536	if (!timeout)
    537		del_timer_sync(&mcdi->async_timer);
    538
    539	spin_lock(&mcdi->async_lock);
    540	async = list_first_entry(&mcdi->async_list,
    541				 struct efx_mcdi_async_param, list);
    542	list_del(&async->list);
    543	spin_unlock(&mcdi->async_lock);
    544
    545	outbuf = (efx_dword_t *)(async + 1);
    546	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
    547				      min(async->outlen, data_len));
    548	if (!timeout && rc && !async->quiet) {
    549		err_len = min(sizeof(errbuf), data_len);
    550		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
    551					      sizeof(errbuf));
    552		efx_siena_mcdi_display_error(efx, async->cmd, async->inlen,
    553					     errbuf, err_len, rc);
    554	}
    555
    556	if (async->complete)
    557		async->complete(efx, async->cookie, rc, outbuf,
    558				min(async->outlen, data_len));
    559	kfree(async);
    560
    561	efx_mcdi_release(mcdi);
    562
    563	return true;
    564}
    565
    566static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
    567			    unsigned int datalen, unsigned int mcdi_err)
    568{
    569	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    570	bool wake = false;
    571
    572	spin_lock(&mcdi->iface_lock);
    573
    574	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
    575		if (mcdi->credits)
    576			/* The request has been cancelled */
    577			--mcdi->credits;
    578		else
    579			netif_err(efx, hw, efx->net_dev,
    580				  "MC response mismatch tx seq 0x%x rx "
    581				  "seq 0x%x\n", seqno, mcdi->seqno);
    582	} else {
    583		if (efx->type->mcdi_max_ver >= 2) {
    584			/* MCDI v2 responses don't fit in an event */
    585			efx_mcdi_read_response_header(efx);
    586		} else {
    587			mcdi->resprc = efx_mcdi_errno(mcdi_err);
    588			mcdi->resp_hdr_len = 4;
    589			mcdi->resp_data_len = datalen;
    590		}
    591
    592		wake = true;
    593	}
    594
    595	spin_unlock(&mcdi->iface_lock);
    596
    597	if (wake) {
    598		if (!efx_mcdi_complete_async(mcdi, false))
    599			(void) efx_mcdi_complete_sync(mcdi);
    600
    601		/* If the interface isn't RUNNING_ASYNC or
    602		 * RUNNING_SYNC then we've received a duplicate
    603		 * completion after we've already transitioned back to
    604		 * QUIESCENT. [A subsequent invocation would increment
    605		 * seqno, so would have failed the seqno check].
    606		 */
    607	}
    608}
    609
    610static void efx_mcdi_timeout_async(struct timer_list *t)
    611{
    612	struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
    613
    614	efx_mcdi_complete_async(mcdi, true);
    615}
    616
    617static int
    618efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
    619{
    620	if (efx->type->mcdi_max_ver < 0 ||
    621	     (efx->type->mcdi_max_ver < 2 &&
    622	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
    623		return -EINVAL;
    624
    625	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
    626	    (efx->type->mcdi_max_ver < 2 &&
    627	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
    628		return -EMSGSIZE;
    629
    630	return 0;
    631}
    632
    633static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
    634				      size_t hdr_len, size_t data_len,
    635				      u32 *proxy_handle)
    636{
    637	MCDI_DECLARE_BUF_ERR(testbuf);
    638	const size_t buflen = sizeof(testbuf);
    639
    640	if (!proxy_handle || data_len < buflen)
    641		return false;
    642
    643	efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
    644	if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
    645		*proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
    646		return true;
    647	}
    648
    649	return false;
    650}
    651
    652static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
    653				size_t inlen,
    654				efx_dword_t *outbuf, size_t outlen,
    655				size_t *outlen_actual, bool quiet,
    656				u32 *proxy_handle, int *raw_rc)
    657{
    658	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    659	MCDI_DECLARE_BUF_ERR(errbuf);
    660	int rc;
    661
    662	if (mcdi->mode == MCDI_MODE_POLL)
    663		rc = efx_mcdi_poll(efx);
    664	else
    665		rc = efx_mcdi_await_completion(efx);
    666
    667	if (rc != 0) {
    668		netif_err(efx, hw, efx->net_dev,
    669			  "MC command 0x%x inlen %d mode %d timed out\n",
    670			  cmd, (int)inlen, mcdi->mode);
    671
    672		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
    673			netif_err(efx, hw, efx->net_dev,
    674				  "MCDI request was completed without an event\n");
    675			rc = 0;
    676		}
    677
    678		efx_mcdi_abandon(efx);
    679
    680		/* Close the race with efx_mcdi_ev_cpl() executing just too late
    681		 * and completing a request we've just cancelled, by ensuring
    682		 * that the seqno check therein fails.
    683		 */
    684		spin_lock_bh(&mcdi->iface_lock);
    685		++mcdi->seqno;
    686		++mcdi->credits;
    687		spin_unlock_bh(&mcdi->iface_lock);
    688	}
    689
    690	if (proxy_handle)
    691		*proxy_handle = 0;
    692
    693	if (rc != 0) {
    694		if (outlen_actual)
    695			*outlen_actual = 0;
    696	} else {
    697		size_t hdr_len, data_len, err_len;
    698
    699		/* At the very least we need a memory barrier here to ensure
    700		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
    701		 * a spurious efx_mcdi_ev_cpl() running concurrently by
    702		 * acquiring the iface_lock. */
    703		spin_lock_bh(&mcdi->iface_lock);
    704		rc = mcdi->resprc;
    705		if (raw_rc)
    706			*raw_rc = mcdi->resprc_raw;
    707		hdr_len = mcdi->resp_hdr_len;
    708		data_len = mcdi->resp_data_len;
    709		err_len = min(sizeof(errbuf), data_len);
    710		spin_unlock_bh(&mcdi->iface_lock);
    711
    712		BUG_ON(rc > 0);
    713
    714		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
    715					      min(outlen, data_len));
    716		if (outlen_actual)
    717			*outlen_actual = data_len;
    718
    719		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
    720
    721		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
    722			/* Don't reset if MC_CMD_REBOOT returns EIO */
    723		} else if (rc == -EIO || rc == -EINTR) {
    724			netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
    725			netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
    726				  cmd, -rc);
    727			if (efx->type->mcdi_reboot_detected)
    728				efx->type->mcdi_reboot_detected(efx);
    729			efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
    730		} else if (proxy_handle && (rc == -EPROTO) &&
    731			   efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
    732						     proxy_handle)) {
    733			mcdi->proxy_rx_status = 0;
    734			mcdi->proxy_rx_handle = 0;
    735			mcdi->state = MCDI_STATE_PROXY_WAIT;
    736		} else if (rc && !quiet) {
    737			efx_siena_mcdi_display_error(efx, cmd, inlen, errbuf,
    738						     err_len, rc);
    739		}
    740
    741		if (rc == -EIO || rc == -EINTR) {
    742			msleep(MCDI_STATUS_SLEEP_MS);
    743			efx_siena_mcdi_poll_reboot(efx);
    744			mcdi->new_epoch = true;
    745		}
    746	}
    747
    748	if (!proxy_handle || !*proxy_handle)
    749		efx_mcdi_release(mcdi);
    750	return rc;
    751}
    752
    753static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
    754{
    755	if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
    756		/* Interrupt the proxy wait. */
    757		mcdi->proxy_rx_status = -EINTR;
    758		wake_up(&mcdi->proxy_rx_wq);
    759	}
    760}
    761
    762static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
    763				       u32 handle, int status)
    764{
    765	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    766
    767	WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
    768
    769	mcdi->proxy_rx_status = efx_mcdi_errno(status);
    770	/* Ensure the status is written before we update the handle, since the
    771	 * latter is used to check if we've finished.
    772	 */
    773	wmb();
    774	mcdi->proxy_rx_handle = handle;
    775	wake_up(&mcdi->proxy_rx_wq);
    776}
    777
    778static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
    779{
    780	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    781	int rc;
    782
    783	/* Wait for a proxy event, or timeout. */
    784	rc = wait_event_timeout(mcdi->proxy_rx_wq,
    785				mcdi->proxy_rx_handle != 0 ||
    786				mcdi->proxy_rx_status == -EINTR,
    787				MCDI_RPC_TIMEOUT);
    788
    789	if (rc <= 0) {
    790		netif_dbg(efx, hw, efx->net_dev,
    791			  "MCDI proxy timeout %d\n", handle);
    792		return -ETIMEDOUT;
    793	} else if (mcdi->proxy_rx_handle != handle) {
    794		netif_warn(efx, hw, efx->net_dev,
    795			   "MCDI proxy unexpected handle %d (expected %d)\n",
    796			   mcdi->proxy_rx_handle, handle);
    797		return -EINVAL;
    798	}
    799
    800	return mcdi->proxy_rx_status;
    801}
    802
    803static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
    804			 const efx_dword_t *inbuf, size_t inlen,
    805			 efx_dword_t *outbuf, size_t outlen,
    806			 size_t *outlen_actual, bool quiet, int *raw_rc)
    807{
    808	u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
    809	int rc;
    810
    811	if (inbuf && inlen && (inbuf == outbuf)) {
    812		/* The input buffer can't be aliased with the output. */
    813		WARN_ON(1);
    814		return -EINVAL;
    815	}
    816
    817	rc = efx_siena_mcdi_rpc_start(efx, cmd, inbuf, inlen);
    818	if (rc)
    819		return rc;
    820
    821	rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
    822				  outlen_actual, quiet, &proxy_handle, raw_rc);
    823
    824	if (proxy_handle) {
    825		/* Handle proxy authorisation. This allows approval of MCDI
    826		 * operations to be delegated to the admin function, allowing
    827		 * fine control over (eg) multicast subscriptions.
    828		 */
    829		struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    830
    831		netif_dbg(efx, hw, efx->net_dev,
    832			  "MCDI waiting for proxy auth %d\n",
    833			  proxy_handle);
    834		rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
    835
    836		if (rc == 0) {
    837			netif_dbg(efx, hw, efx->net_dev,
    838				  "MCDI proxy retry %d\n", proxy_handle);
    839
    840			/* We now retry the original request. */
    841			mcdi->state = MCDI_STATE_RUNNING_SYNC;
    842			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
    843
    844			rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
    845						  outbuf, outlen, outlen_actual,
    846						  quiet, NULL, raw_rc);
    847		} else {
    848			netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
    849				       "MC command 0x%x failed after proxy auth rc=%d\n",
    850				       cmd, rc);
    851
    852			if (rc == -EINTR || rc == -EIO)
    853				efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
    854			efx_mcdi_release(mcdi);
    855		}
    856	}
    857
    858	return rc;
    859}
    860
    861static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
    862				   const efx_dword_t *inbuf, size_t inlen,
    863				   efx_dword_t *outbuf, size_t outlen,
    864				   size_t *outlen_actual, bool quiet)
    865{
    866	int raw_rc = 0;
    867	int rc;
    868
    869	rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
    870			   outbuf, outlen, outlen_actual, true, &raw_rc);
    871
    872	if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
    873	    efx->type->is_vf) {
    874		/* If the EVB port isn't available within a VF this may
    875		 * mean the PF is still bringing the switch up. We should
    876		 * retry our request shortly.
    877		 */
    878		unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
    879		unsigned int delay_us = 10000;
    880
    881		netif_dbg(efx, hw, efx->net_dev,
    882			  "%s: NO_EVB_PORT; will retry request\n",
    883			  __func__);
    884
    885		do {
    886			usleep_range(delay_us, delay_us + 10000);
    887			rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
    888					   outbuf, outlen, outlen_actual,
    889					   true, &raw_rc);
    890			if (delay_us < 100000)
    891				delay_us <<= 1;
    892		} while ((rc == -EPROTO) &&
    893			 (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
    894			 time_before(jiffies, abort_time));
    895	}
    896
    897	if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
    898		efx_siena_mcdi_display_error(efx, cmd, inlen,
    899					     outbuf, outlen, rc);
    900
    901	return rc;
    902}
    903
    904/**
    905 * efx_siena_mcdi_rpc - Issue an MCDI command and wait for completion
    906 * @efx: NIC through which to issue the command
    907 * @cmd: Command type number
    908 * @inbuf: Command parameters
    909 * @inlen: Length of command parameters, in bytes.  Must be a multiple
    910 *	of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
    911 * @outbuf: Response buffer.  May be %NULL if @outlen is 0.
    912 * @outlen: Length of response buffer, in bytes.  If the actual
    913 *	response is longer than @outlen & ~3, it will be truncated
    914 *	to that length.
    915 * @outlen_actual: Pointer through which to return the actual response
    916 *	length.  May be %NULL if this is not needed.
    917 *
    918 * This function may sleep and therefore must be called in an appropriate
    919 * context.
    920 *
    921 * Return: A negative error code, or zero if successful.  The error
    922 *	code may come from the MCDI response or may indicate a failure
    923 *	to communicate with the MC.  In the former case, the response
    924 *	will still be copied to @outbuf and *@outlen_actual will be
    925 *	set accordingly.  In the latter case, *@outlen_actual will be
    926 *	set to zero.
    927 */
    928int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
    929		       const efx_dword_t *inbuf, size_t inlen,
    930		       efx_dword_t *outbuf, size_t outlen,
    931		       size_t *outlen_actual)
    932{
    933	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
    934				       outlen_actual, false);
    935}
    936
    937/* Normally, on receiving an error code in the MCDI response,
    938 * efx_siena_mcdi_rpc will log an error message containing (among other
    939 * things) the raw error code, by means of efx_siena_mcdi_display_error.
    940 * This _quiet version suppresses that; if the caller wishes to log
    941 * the error conditionally on the return code, it should call this
    942 * function and is then responsible for calling efx_siena_mcdi_display_error
    943 * as needed.
    944 */
    945int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd,
    946			     const efx_dword_t *inbuf, size_t inlen,
    947			     efx_dword_t *outbuf, size_t outlen,
    948			     size_t *outlen_actual)
    949{
    950	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
    951				       outlen_actual, true);
    952}
    953
    954int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd,
    955			     const efx_dword_t *inbuf, size_t inlen)
    956{
    957	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    958	int rc;
    959
    960	rc = efx_mcdi_check_supported(efx, cmd, inlen);
    961	if (rc)
    962		return rc;
    963
    964	if (efx->mc_bist_for_other_fn)
    965		return -ENETDOWN;
    966
    967	if (mcdi->mode == MCDI_MODE_FAIL)
    968		return -ENETDOWN;
    969
    970	efx_mcdi_acquire_sync(mcdi);
    971	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
    972	return 0;
    973}
    974
    975static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
    976			       const efx_dword_t *inbuf, size_t inlen,
    977			       size_t outlen,
    978			       efx_mcdi_async_completer *complete,
    979			       unsigned long cookie, bool quiet)
    980{
    981	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
    982	struct efx_mcdi_async_param *async;
    983	int rc;
    984
    985	rc = efx_mcdi_check_supported(efx, cmd, inlen);
    986	if (rc)
    987		return rc;
    988
    989	if (efx->mc_bist_for_other_fn)
    990		return -ENETDOWN;
    991
    992	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
    993			GFP_ATOMIC);
    994	if (!async)
    995		return -ENOMEM;
    996
    997	async->cmd = cmd;
    998	async->inlen = inlen;
    999	async->outlen = outlen;
   1000	async->quiet = quiet;
   1001	async->complete = complete;
   1002	async->cookie = cookie;
   1003	memcpy(async + 1, inbuf, inlen);
   1004
   1005	spin_lock_bh(&mcdi->async_lock);
   1006
   1007	if (mcdi->mode == MCDI_MODE_EVENTS) {
   1008		list_add_tail(&async->list, &mcdi->async_list);
   1009
   1010		/* If this is at the front of the queue, try to start it
   1011		 * immediately
   1012		 */
   1013		if (mcdi->async_list.next == &async->list &&
   1014		    efx_mcdi_acquire_async(mcdi)) {
   1015			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
   1016			mod_timer(&mcdi->async_timer,
   1017				  jiffies + MCDI_RPC_TIMEOUT);
   1018		}
   1019	} else {
   1020		kfree(async);
   1021		rc = -ENETDOWN;
   1022	}
   1023
   1024	spin_unlock_bh(&mcdi->async_lock);
   1025
   1026	return rc;
   1027}
   1028
   1029/**
   1030 * efx_siena_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
   1031 * @efx: NIC through which to issue the command
   1032 * @cmd: Command type number
   1033 * @inbuf: Command parameters
   1034 * @inlen: Length of command parameters, in bytes
   1035 * @outlen: Length to allocate for response buffer, in bytes
   1036 * @complete: Function to be called on completion or cancellation.
   1037 * @cookie: Arbitrary value to be passed to @complete.
   1038 *
   1039 * This function does not sleep and therefore may be called in atomic
   1040 * context.  It will fail if event queues are disabled or if MCDI
   1041 * event completions have been disabled due to an error.
   1042 *
   1043 * If it succeeds, the @complete function will be called exactly once
   1044 * in atomic context, when one of the following occurs:
   1045 * (a) the completion event is received (in NAPI context)
   1046 * (b) event queues are disabled (in the process that disables them)
   1047 * (c) the request times-out (in timer context)
   1048 */
   1049int
   1050efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
   1051			 const efx_dword_t *inbuf, size_t inlen, size_t outlen,
   1052			 efx_mcdi_async_completer *complete,
   1053			 unsigned long cookie)
   1054{
   1055	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
   1056				   cookie, false);
   1057}
   1058
   1059int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
   1060				   const efx_dword_t *inbuf, size_t inlen,
   1061				   size_t outlen,
   1062				   efx_mcdi_async_completer *complete,
   1063				   unsigned long cookie)
   1064{
   1065	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
   1066				   cookie, true);
   1067}
   1068
   1069int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
   1070			      size_t inlen, efx_dword_t *outbuf, size_t outlen,
   1071			      size_t *outlen_actual)
   1072{
   1073	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
   1074				    outlen_actual, false, NULL, NULL);
   1075}
   1076
   1077int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd,
   1078				    size_t inlen, efx_dword_t *outbuf,
   1079				    size_t outlen, size_t *outlen_actual)
   1080{
   1081	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
   1082				    outlen_actual, true, NULL, NULL);
   1083}
   1084
   1085void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd,
   1086				  size_t inlen, efx_dword_t *outbuf,
   1087				  size_t outlen, int rc)
   1088{
   1089	int code = 0, err_arg = 0;
   1090
   1091	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
   1092		code = MCDI_DWORD(outbuf, ERR_CODE);
   1093	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
   1094		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
   1095	netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
   1096		       "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
   1097		       cmd, inlen, rc, code, err_arg);
   1098}
   1099
   1100/* Switch to polled MCDI completions.  This can be called in various
   1101 * error conditions with various locks held, so it must be lockless.
   1102 * Caller is responsible for flushing asynchronous requests later.
   1103 */
   1104void efx_siena_mcdi_mode_poll(struct efx_nic *efx)
   1105{
   1106	struct efx_mcdi_iface *mcdi;
   1107
   1108	if (!efx->mcdi)
   1109		return;
   1110
   1111	mcdi = efx_mcdi(efx);
   1112	/* If already in polling mode, nothing to do.
   1113	 * If in fail-fast state, don't switch to polled completion.
   1114	 * FLR recovery will do that later.
   1115	 */
   1116	if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
   1117		return;
   1118
   1119	/* We can switch from event completion to polled completion, because
   1120	 * mcdi requests are always completed in shared memory. We do this by
   1121	 * switching the mode to POLL'd then completing the request.
   1122	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
   1123	 *
   1124	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
   1125	 * which efx_mcdi_complete_sync() provides for us.
   1126	 */
   1127	mcdi->mode = MCDI_MODE_POLL;
   1128
   1129	efx_mcdi_complete_sync(mcdi);
   1130}
   1131
   1132/* Flush any running or queued asynchronous requests, after event processing
   1133 * is stopped
   1134 */
   1135void efx_siena_mcdi_flush_async(struct efx_nic *efx)
   1136{
   1137	struct efx_mcdi_async_param *async, *next;
   1138	struct efx_mcdi_iface *mcdi;
   1139
   1140	if (!efx->mcdi)
   1141		return;
   1142
   1143	mcdi = efx_mcdi(efx);
   1144
   1145	/* We must be in poll or fail mode so no more requests can be queued */
   1146	BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
   1147
   1148	del_timer_sync(&mcdi->async_timer);
   1149
   1150	/* If a request is still running, make sure we give the MC
   1151	 * time to complete it so that the response won't overwrite our
   1152	 * next request.
   1153	 */
   1154	if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
   1155		efx_mcdi_poll(efx);
   1156		mcdi->state = MCDI_STATE_QUIESCENT;
   1157	}
   1158
   1159	/* Nothing else will access the async list now, so it is safe
   1160	 * to walk it without holding async_lock.  If we hold it while
   1161	 * calling a completer then lockdep may warn that we have
   1162	 * acquired locks in the wrong order.
   1163	 */
   1164	list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
   1165		if (async->complete)
   1166			async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
   1167		list_del(&async->list);
   1168		kfree(async);
   1169	}
   1170}
   1171
   1172void efx_siena_mcdi_mode_event(struct efx_nic *efx)
   1173{
   1174	struct efx_mcdi_iface *mcdi;
   1175
   1176	if (!efx->mcdi)
   1177		return;
   1178
   1179	mcdi = efx_mcdi(efx);
   1180	/* If already in event completion mode, nothing to do.
   1181	 * If in fail-fast state, don't switch to event completion.  FLR
   1182	 * recovery will do that later.
   1183	 */
   1184	if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
   1185		return;
   1186
   1187	/* We can't switch from polled to event completion in the middle of a
   1188	 * request, because the completion method is specified in the request.
   1189	 * So acquire the interface to serialise the requestors. We don't need
   1190	 * to acquire the iface_lock to change the mode here, but we do need a
   1191	 * write memory barrier ensure that efx_siena_mcdi_rpc() sees it, which
   1192	 * efx_mcdi_acquire() provides.
   1193	 */
   1194	efx_mcdi_acquire_sync(mcdi);
   1195	mcdi->mode = MCDI_MODE_EVENTS;
   1196	efx_mcdi_release(mcdi);
   1197}
   1198
   1199static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
   1200{
   1201	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1202
   1203	/* If there is an outstanding MCDI request, it has been terminated
   1204	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
   1205	 * in polled mode, then do nothing because the MC reboot handler will
   1206	 * set the header correctly. However, if the mcdi interface is waiting
   1207	 * for a CMDDONE event it won't receive it [and since all MCDI events
   1208	 * are sent to the same queue, we can't be racing with
   1209	 * efx_mcdi_ev_cpl()]
   1210	 *
   1211	 * If there is an outstanding asynchronous request, we can't
   1212	 * complete it now (efx_mcdi_complete() would deadlock).  The
   1213	 * reset process will take care of this.
   1214	 *
   1215	 * There's a race here with efx_mcdi_send_request(), because
   1216	 * we might receive a REBOOT event *before* the request has
   1217	 * been copied out. In polled mode (during startup) this is
   1218	 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
   1219	 * event mode, this condition is just an edge-case of
   1220	 * receiving a REBOOT event after posting the MCDI
   1221	 * request. Did the mc reboot before or after the copyout? The
   1222	 * best we can do always is just return failure.
   1223	 *
   1224	 * If there is an outstanding proxy response expected it is not going
   1225	 * to arrive. We should thus abort it.
   1226	 */
   1227	spin_lock(&mcdi->iface_lock);
   1228	efx_mcdi_proxy_abort(mcdi);
   1229
   1230	if (efx_mcdi_complete_sync(mcdi)) {
   1231		if (mcdi->mode == MCDI_MODE_EVENTS) {
   1232			mcdi->resprc = rc;
   1233			mcdi->resp_hdr_len = 0;
   1234			mcdi->resp_data_len = 0;
   1235			++mcdi->credits;
   1236		}
   1237	} else {
   1238		int count;
   1239
   1240		/* Consume the status word since efx_siena_mcdi_rpc_finish() won't */
   1241		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
   1242			rc = efx_siena_mcdi_poll_reboot(efx);
   1243			if (rc)
   1244				break;
   1245			udelay(MCDI_STATUS_DELAY_US);
   1246		}
   1247
   1248		/* On EF10, a CODE_MC_REBOOT event can be received without the
   1249		 * reboot detection in efx_siena_mcdi_poll_reboot() being triggered.
   1250		 * If zero was returned from the final call to
   1251		 * efx_siena_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
   1252		 * MC has definitely rebooted so prepare for the reset.
   1253		 */
   1254		if (!rc && efx->type->mcdi_reboot_detected)
   1255			efx->type->mcdi_reboot_detected(efx);
   1256
   1257		mcdi->new_epoch = true;
   1258
   1259		/* Nobody was waiting for an MCDI request, so trigger a reset */
   1260		efx_siena_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
   1261	}
   1262
   1263	spin_unlock(&mcdi->iface_lock);
   1264}
   1265
   1266/* The MC is going down in to BIST mode. set the BIST flag to block
   1267 * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
   1268 * (which doesn't actually execute a reset, it waits for the controlling
   1269 * function to reset it).
   1270 */
   1271static void efx_mcdi_ev_bist(struct efx_nic *efx)
   1272{
   1273	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1274
   1275	spin_lock(&mcdi->iface_lock);
   1276	efx->mc_bist_for_other_fn = true;
   1277	efx_mcdi_proxy_abort(mcdi);
   1278
   1279	if (efx_mcdi_complete_sync(mcdi)) {
   1280		if (mcdi->mode == MCDI_MODE_EVENTS) {
   1281			mcdi->resprc = -EIO;
   1282			mcdi->resp_hdr_len = 0;
   1283			mcdi->resp_data_len = 0;
   1284			++mcdi->credits;
   1285		}
   1286	}
   1287	mcdi->new_epoch = true;
   1288	efx_siena_schedule_reset(efx, RESET_TYPE_MC_BIST);
   1289	spin_unlock(&mcdi->iface_lock);
   1290}
   1291
   1292/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
   1293 * to recover.
   1294 */
   1295static void efx_mcdi_abandon(struct efx_nic *efx)
   1296{
   1297	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1298
   1299	if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
   1300		return; /* it had already been done */
   1301	netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
   1302	efx_siena_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
   1303}
   1304
   1305static void efx_handle_drain_event(struct efx_nic *efx)
   1306{
   1307	if (atomic_dec_and_test(&efx->active_queues))
   1308		wake_up(&efx->flush_wq);
   1309
   1310	WARN_ON(atomic_read(&efx->active_queues) < 0);
   1311}
   1312
   1313/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
   1314void efx_siena_mcdi_process_event(struct efx_channel *channel,
   1315				  efx_qword_t *event)
   1316{
   1317	struct efx_nic *efx = channel->efx;
   1318	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
   1319	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
   1320
   1321	switch (code) {
   1322	case MCDI_EVENT_CODE_BADSSERT:
   1323		netif_err(efx, hw, efx->net_dev,
   1324			  "MC watchdog or assertion failure at 0x%x\n", data);
   1325		efx_mcdi_ev_death(efx, -EINTR);
   1326		break;
   1327
   1328	case MCDI_EVENT_CODE_PMNOTICE:
   1329		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
   1330		break;
   1331
   1332	case MCDI_EVENT_CODE_CMDDONE:
   1333		efx_mcdi_ev_cpl(efx,
   1334				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
   1335				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
   1336				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
   1337		break;
   1338
   1339	case MCDI_EVENT_CODE_LINKCHANGE:
   1340		efx_siena_mcdi_process_link_change(efx, event);
   1341		break;
   1342	case MCDI_EVENT_CODE_SENSOREVT:
   1343		efx_sensor_event(efx, event);
   1344		break;
   1345	case MCDI_EVENT_CODE_SCHEDERR:
   1346		netif_dbg(efx, hw, efx->net_dev,
   1347			  "MC Scheduler alert (0x%x)\n", data);
   1348		break;
   1349	case MCDI_EVENT_CODE_REBOOT:
   1350	case MCDI_EVENT_CODE_MC_REBOOT:
   1351		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
   1352		efx_mcdi_ev_death(efx, -EIO);
   1353		break;
   1354	case MCDI_EVENT_CODE_MC_BIST:
   1355		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
   1356		efx_mcdi_ev_bist(efx);
   1357		break;
   1358	case MCDI_EVENT_CODE_MAC_STATS_DMA:
   1359		/* MAC stats are gather lazily.  We can ignore this. */
   1360		break;
   1361	case MCDI_EVENT_CODE_FLR:
   1362		if (efx->type->sriov_flr)
   1363			efx->type->sriov_flr(efx,
   1364					     MCDI_EVENT_FIELD(*event, FLR_VF));
   1365		break;
   1366	case MCDI_EVENT_CODE_PTP_RX:
   1367	case MCDI_EVENT_CODE_PTP_FAULT:
   1368	case MCDI_EVENT_CODE_PTP_PPS:
   1369		efx_siena_ptp_event(efx, event);
   1370		break;
   1371	case MCDI_EVENT_CODE_PTP_TIME:
   1372		efx_siena_time_sync_event(channel, event);
   1373		break;
   1374	case MCDI_EVENT_CODE_TX_FLUSH:
   1375	case MCDI_EVENT_CODE_RX_FLUSH:
   1376		/* Two flush events will be sent: one to the same event
   1377		 * queue as completions, and one to event queue 0.
   1378		 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
   1379		 * flag will be set, and we should ignore the event
   1380		 * because we want to wait for all completions.
   1381		 */
   1382		BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
   1383			     MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
   1384		if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
   1385			efx_handle_drain_event(efx);
   1386		break;
   1387	case MCDI_EVENT_CODE_TX_ERR:
   1388	case MCDI_EVENT_CODE_RX_ERR:
   1389		netif_err(efx, hw, efx->net_dev,
   1390			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
   1391			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
   1392			  EFX_QWORD_VAL(*event));
   1393		efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
   1394		break;
   1395	case MCDI_EVENT_CODE_PROXY_RESPONSE:
   1396		efx_mcdi_ev_proxy_response(efx,
   1397				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
   1398				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
   1399		break;
   1400	default:
   1401		netif_err(efx, hw, efx->net_dev,
   1402			  "Unknown MCDI event " EFX_QWORD_FMT "\n",
   1403			  EFX_QWORD_VAL(*event));
   1404	}
   1405}
   1406
   1407/**************************************************************************
   1408 *
   1409 * Specific request functions
   1410 *
   1411 **************************************************************************
   1412 */
   1413
   1414void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
   1415{
   1416	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
   1417	size_t outlength;
   1418	const __le16 *ver_words;
   1419	size_t offset;
   1420	int rc;
   1421
   1422	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
   1423	rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
   1424				outbuf, sizeof(outbuf), &outlength);
   1425	if (rc)
   1426		goto fail;
   1427	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
   1428		rc = -EIO;
   1429		goto fail;
   1430	}
   1431
   1432	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
   1433	offset = scnprintf(buf, len, "%u.%u.%u.%u",
   1434			   le16_to_cpu(ver_words[0]),
   1435			   le16_to_cpu(ver_words[1]),
   1436			   le16_to_cpu(ver_words[2]),
   1437			   le16_to_cpu(ver_words[3]));
   1438
   1439	if (efx->type->print_additional_fwver)
   1440		offset += efx->type->print_additional_fwver(efx, buf + offset,
   1441							    len - offset);
   1442
   1443	/* It's theoretically possible for the string to exceed 31
   1444	 * characters, though in practice the first three version
   1445	 * components are short enough that this doesn't happen.
   1446	 */
   1447	if (WARN_ON(offset >= len))
   1448		buf[0] = 0;
   1449
   1450	return;
   1451
   1452fail:
   1453	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1454	buf[0] = 0;
   1455}
   1456
   1457static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
   1458			       bool *was_attached)
   1459{
   1460	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
   1461	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
   1462	size_t outlen;
   1463	int rc;
   1464
   1465	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
   1466		       driver_operating ? 1 : 0);
   1467	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
   1468	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
   1469
   1470	rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
   1471				      sizeof(inbuf), outbuf, sizeof(outbuf),
   1472				      &outlen);
   1473	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
   1474	 * specified will fail with EPERM, and we have to tell the MC we don't
   1475	 * care what firmware we get.
   1476	 */
   1477	if (rc == -EPERM) {
   1478		netif_dbg(efx, probe, efx->net_dev,
   1479			  "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
   1480		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
   1481			       MC_CMD_FW_DONT_CARE);
   1482		rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
   1483					      sizeof(inbuf), outbuf,
   1484					      sizeof(outbuf), &outlen);
   1485	}
   1486	if (rc) {
   1487		efx_siena_mcdi_display_error(efx, MC_CMD_DRV_ATTACH,
   1488					     sizeof(inbuf), outbuf, outlen, rc);
   1489		goto fail;
   1490	}
   1491	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
   1492		rc = -EIO;
   1493		goto fail;
   1494	}
   1495
   1496	if (driver_operating) {
   1497		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
   1498			efx->mcdi->fn_flags =
   1499				MCDI_DWORD(outbuf,
   1500					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
   1501		} else {
   1502			/* Synthesise flags for Siena */
   1503			efx->mcdi->fn_flags =
   1504				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
   1505				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
   1506				(efx_port_num(efx) == 0) <<
   1507				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
   1508		}
   1509	}
   1510
   1511	/* We currently assume we have control of the external link
   1512	 * and are completely trusted by firmware.  Abort probing
   1513	 * if that's not true for this function.
   1514	 */
   1515
   1516	if (was_attached != NULL)
   1517		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
   1518	return 0;
   1519
   1520fail:
   1521	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1522	return rc;
   1523}
   1524
   1525int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
   1526				 u16 *fw_subtype_list, u32 *capabilities)
   1527{
   1528	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
   1529	size_t outlen, i;
   1530	int port_num = efx_port_num(efx);
   1531	int rc;
   1532
   1533	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
   1534	/* we need __aligned(2) for ether_addr_copy */
   1535	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
   1536	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
   1537
   1538	rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
   1539				outbuf, sizeof(outbuf), &outlen);
   1540	if (rc)
   1541		goto fail;
   1542
   1543	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
   1544		rc = -EIO;
   1545		goto fail;
   1546	}
   1547
   1548	if (mac_address)
   1549		ether_addr_copy(mac_address,
   1550				port_num ?
   1551				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
   1552				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
   1553	if (fw_subtype_list) {
   1554		for (i = 0;
   1555		     i < MCDI_VAR_ARRAY_LEN(outlen,
   1556					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
   1557		     i++)
   1558			fw_subtype_list[i] = MCDI_ARRAY_WORD(
   1559				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
   1560		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
   1561			fw_subtype_list[i] = 0;
   1562	}
   1563	if (capabilities) {
   1564		if (port_num)
   1565			*capabilities = MCDI_DWORD(outbuf,
   1566					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
   1567		else
   1568			*capabilities = MCDI_DWORD(outbuf,
   1569					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
   1570	}
   1571
   1572	return 0;
   1573
   1574fail:
   1575	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
   1576		  __func__, rc, (int)outlen);
   1577
   1578	return rc;
   1579}
   1580
   1581int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
   1582			    u32 dest_evq)
   1583{
   1584	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
   1585	u32 dest = 0;
   1586	int rc;
   1587
   1588	if (uart)
   1589		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
   1590	if (evq)
   1591		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
   1592
   1593	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
   1594	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
   1595
   1596	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
   1597
   1598	rc = efx_siena_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
   1599				NULL, 0, NULL);
   1600	return rc;
   1601}
   1602
   1603int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
   1604{
   1605	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
   1606	size_t outlen;
   1607	int rc;
   1608
   1609	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
   1610
   1611	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
   1612				outbuf, sizeof(outbuf), &outlen);
   1613	if (rc)
   1614		goto fail;
   1615	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
   1616		rc = -EIO;
   1617		goto fail;
   1618	}
   1619
   1620	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
   1621	return 0;
   1622
   1623fail:
   1624	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
   1625		  __func__, rc);
   1626	return rc;
   1627}
   1628
   1629int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
   1630			      size_t *size_out, size_t *erase_size_out,
   1631			      bool *protected_out)
   1632{
   1633	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
   1634	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
   1635	size_t outlen;
   1636	int rc;
   1637
   1638	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
   1639
   1640	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
   1641				outbuf, sizeof(outbuf), &outlen);
   1642	if (rc)
   1643		goto fail;
   1644	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
   1645		rc = -EIO;
   1646		goto fail;
   1647	}
   1648
   1649	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
   1650	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
   1651	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
   1652				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
   1653	return 0;
   1654
   1655fail:
   1656	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1657	return rc;
   1658}
   1659
   1660static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
   1661{
   1662	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
   1663	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
   1664	int rc;
   1665
   1666	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
   1667
   1668	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
   1669				outbuf, sizeof(outbuf), NULL);
   1670	if (rc)
   1671		return rc;
   1672
   1673	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
   1674	case MC_CMD_NVRAM_TEST_PASS:
   1675	case MC_CMD_NVRAM_TEST_NOTSUPP:
   1676		return 0;
   1677	default:
   1678		return -EIO;
   1679	}
   1680}
   1681
   1682int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx)
   1683{
   1684	u32 nvram_types;
   1685	unsigned int type;
   1686	int rc;
   1687
   1688	rc = efx_siena_mcdi_nvram_types(efx, &nvram_types);
   1689	if (rc)
   1690		goto fail1;
   1691
   1692	type = 0;
   1693	while (nvram_types != 0) {
   1694		if (nvram_types & 1) {
   1695			rc = efx_mcdi_nvram_test(efx, type);
   1696			if (rc)
   1697				goto fail2;
   1698		}
   1699		type++;
   1700		nvram_types >>= 1;
   1701	}
   1702
   1703	return 0;
   1704
   1705fail2:
   1706	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
   1707		  __func__, type);
   1708fail1:
   1709	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1710	return rc;
   1711}
   1712
   1713/* Returns 1 if an assertion was read, 0 if no assertion had fired,
   1714 * negative on error.
   1715 */
   1716static int efx_mcdi_read_assertion(struct efx_nic *efx)
   1717{
   1718	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
   1719	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
   1720	unsigned int flags, index;
   1721	const char *reason;
   1722	size_t outlen;
   1723	int retry;
   1724	int rc;
   1725
   1726	/* Attempt to read any stored assertion state before we reboot
   1727	 * the mcfw out of the assertion handler. Retry twice, once
   1728	 * because a boot-time assertion might cause this command to fail
   1729	 * with EINTR. And once again because GET_ASSERTS can race with
   1730	 * MC_CMD_REBOOT running on the other port. */
   1731	retry = 2;
   1732	do {
   1733		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
   1734		rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
   1735					      inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
   1736					      outbuf, sizeof(outbuf), &outlen);
   1737		if (rc == -EPERM)
   1738			return 0;
   1739	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
   1740
   1741	if (rc) {
   1742		efx_siena_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
   1743					     MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
   1744					     outlen, rc);
   1745		return rc;
   1746	}
   1747	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
   1748		return -EIO;
   1749
   1750	/* Print out any recorded assertion state */
   1751	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
   1752	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
   1753		return 0;
   1754
   1755	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
   1756		? "system-level assertion"
   1757		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
   1758		? "thread-level assertion"
   1759		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
   1760		? "watchdog reset"
   1761		: "unknown assertion";
   1762	netif_err(efx, hw, efx->net_dev,
   1763		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
   1764		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
   1765		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
   1766
   1767	/* Print out the registers */
   1768	for (index = 0;
   1769	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
   1770	     index++)
   1771		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
   1772			  1 + index,
   1773			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
   1774					   index));
   1775
   1776	return 1;
   1777}
   1778
   1779static int efx_mcdi_exit_assertion(struct efx_nic *efx)
   1780{
   1781	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
   1782	int rc;
   1783
   1784	/* If the MC is running debug firmware, it might now be
   1785	 * waiting for a debugger to attach, but we just want it to
   1786	 * reboot.  We set a flag that makes the command a no-op if it
   1787	 * has already done so.
   1788	 * The MCDI will thus return either 0 or -EIO.
   1789	 */
   1790	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
   1791	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
   1792		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
   1793	rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf,
   1794				      MC_CMD_REBOOT_IN_LEN, NULL, 0, NULL);
   1795	if (rc == -EIO)
   1796		rc = 0;
   1797	if (rc)
   1798		efx_siena_mcdi_display_error(efx, MC_CMD_REBOOT,
   1799					     MC_CMD_REBOOT_IN_LEN, NULL, 0, rc);
   1800	return rc;
   1801}
   1802
   1803int efx_siena_mcdi_handle_assertion(struct efx_nic *efx)
   1804{
   1805	int rc;
   1806
   1807	rc = efx_mcdi_read_assertion(efx);
   1808	if (rc <= 0)
   1809		return rc;
   1810
   1811	return efx_mcdi_exit_assertion(efx);
   1812}
   1813
   1814int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
   1815{
   1816	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
   1817
   1818	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
   1819	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
   1820	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
   1821
   1822	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
   1823
   1824	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
   1825
   1826	return efx_siena_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
   1827				  NULL, 0, NULL);
   1828}
   1829
   1830static int efx_mcdi_reset_func(struct efx_nic *efx)
   1831{
   1832	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
   1833	int rc;
   1834
   1835	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
   1836	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
   1837			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
   1838	rc = efx_siena_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
   1839				NULL, 0, NULL);
   1840	return rc;
   1841}
   1842
   1843static int efx_mcdi_reset_mc(struct efx_nic *efx)
   1844{
   1845	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
   1846	int rc;
   1847
   1848	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
   1849	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
   1850	rc = efx_siena_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
   1851				NULL, 0, NULL);
   1852	/* White is black, and up is down */
   1853	if (rc == -EIO)
   1854		return 0;
   1855	if (rc == 0)
   1856		rc = -EIO;
   1857	return rc;
   1858}
   1859
   1860enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason)
   1861{
   1862	return RESET_TYPE_RECOVER_OR_ALL;
   1863}
   1864
   1865int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method)
   1866{
   1867	int rc;
   1868
   1869	/* If MCDI is down, we can't handle_assertion */
   1870	if (method == RESET_TYPE_MCDI_TIMEOUT) {
   1871		rc = pci_reset_function(efx->pci_dev);
   1872		if (rc)
   1873			return rc;
   1874		/* Re-enable polled MCDI completion */
   1875		if (efx->mcdi) {
   1876			struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
   1877			mcdi->mode = MCDI_MODE_POLL;
   1878		}
   1879		return 0;
   1880	}
   1881
   1882	/* Recover from a failed assertion pre-reset */
   1883	rc = efx_siena_mcdi_handle_assertion(efx);
   1884	if (rc)
   1885		return rc;
   1886
   1887	if (method == RESET_TYPE_DATAPATH)
   1888		return 0;
   1889	else if (method == RESET_TYPE_WORLD)
   1890		return efx_mcdi_reset_mc(efx);
   1891	else
   1892		return efx_mcdi_reset_func(efx);
   1893}
   1894
   1895static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
   1896				   const u8 *mac, int *id_out)
   1897{
   1898	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
   1899	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
   1900	size_t outlen;
   1901	int rc;
   1902
   1903	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
   1904	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
   1905		       MC_CMD_FILTER_MODE_SIMPLE);
   1906	ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
   1907
   1908	rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf,
   1909				sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
   1910	if (rc)
   1911		goto fail;
   1912
   1913	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
   1914		rc = -EIO;
   1915		goto fail;
   1916	}
   1917
   1918	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
   1919
   1920	return 0;
   1921
   1922fail:
   1923	*id_out = -1;
   1924	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1925	return rc;
   1926
   1927}
   1928
   1929
   1930int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac,
   1931					int *id_out)
   1932{
   1933	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
   1934}
   1935
   1936
   1937int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
   1938{
   1939	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
   1940	size_t outlen;
   1941	int rc;
   1942
   1943	rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
   1944				outbuf, sizeof(outbuf), &outlen);
   1945	if (rc)
   1946		goto fail;
   1947
   1948	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
   1949		rc = -EIO;
   1950		goto fail;
   1951	}
   1952
   1953	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
   1954
   1955	return 0;
   1956
   1957fail:
   1958	*id_out = -1;
   1959	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
   1960	return rc;
   1961}
   1962
   1963
   1964int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
   1965{
   1966	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
   1967	int rc;
   1968
   1969	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
   1970
   1971	rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf,
   1972				sizeof(inbuf), NULL, 0, NULL);
   1973	return rc;
   1974}
   1975
   1976int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx)
   1977{
   1978	struct efx_channel *channel;
   1979	struct efx_rx_queue *rx_queue;
   1980	MCDI_DECLARE_BUF(inbuf,
   1981			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
   1982	int rc, count;
   1983
   1984	BUILD_BUG_ON(EFX_MAX_CHANNELS >
   1985		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
   1986
   1987	count = 0;
   1988	efx_for_each_channel(channel, efx) {
   1989		efx_for_each_channel_rx_queue(rx_queue, channel) {
   1990			if (rx_queue->flush_pending) {
   1991				rx_queue->flush_pending = false;
   1992				atomic_dec(&efx->rxq_flush_pending);
   1993				MCDI_SET_ARRAY_DWORD(
   1994					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
   1995					count, efx_rx_queue_index(rx_queue));
   1996				count++;
   1997			}
   1998		}
   1999	}
   2000
   2001	rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
   2002				MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count),
   2003				NULL, 0, NULL);
   2004	WARN_ON(rc < 0);
   2005
   2006	return rc;
   2007}
   2008
   2009int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx)
   2010{
   2011	int rc;
   2012
   2013	rc = efx_siena_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0,
   2014				NULL, 0, NULL);
   2015	return rc;
   2016}
   2017
   2018#ifdef CONFIG_SFC_SIENA_MTD
   2019
   2020#define EFX_MCDI_NVRAM_LEN_MAX 128
   2021
   2022static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
   2023{
   2024	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
   2025	int rc;
   2026
   2027	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
   2028	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
   2029			      NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
   2030			      1);
   2031
   2032	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
   2033
   2034	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf,
   2035				sizeof(inbuf), NULL, 0, NULL);
   2036
   2037	return rc;
   2038}
   2039
   2040static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
   2041			       loff_t offset, u8 *buffer, size_t length)
   2042{
   2043	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
   2044	MCDI_DECLARE_BUF(outbuf,
   2045			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
   2046	size_t outlen;
   2047	int rc;
   2048
   2049	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
   2050	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
   2051	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
   2052	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
   2053		       MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
   2054
   2055	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
   2056				outbuf, sizeof(outbuf), &outlen);
   2057	if (rc)
   2058		return rc;
   2059
   2060	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
   2061	return 0;
   2062}
   2063
   2064static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
   2065				loff_t offset, const u8 *buffer, size_t length)
   2066{
   2067	MCDI_DECLARE_BUF(inbuf,
   2068			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
   2069	int rc;
   2070
   2071	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
   2072	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
   2073	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
   2074	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
   2075
   2076	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
   2077
   2078	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
   2079				ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
   2080				NULL, 0, NULL);
   2081	return rc;
   2082}
   2083
   2084static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
   2085				loff_t offset, size_t length)
   2086{
   2087	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
   2088	int rc;
   2089
   2090	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
   2091	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
   2092	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
   2093
   2094	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
   2095
   2096	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
   2097				NULL, 0, NULL);
   2098	return rc;
   2099}
   2100
   2101static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
   2102{
   2103	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
   2104	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
   2105	size_t outlen;
   2106	int rc, rc2;
   2107
   2108	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
   2109	/* Always set this flag. Old firmware ignores it */
   2110	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
   2111			      NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
   2112			      1);
   2113
   2114	rc = efx_siena_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf,
   2115				sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
   2116	if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
   2117		rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
   2118		if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
   2119			netif_err(efx, drv, efx->net_dev,
   2120				  "NVRAM update failed verification with code 0x%x\n",
   2121				  rc2);
   2122		switch (rc2) {
   2123		case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
   2124			break;
   2125		case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
   2126		case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
   2127		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
   2128		case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
   2129		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
   2130			rc = -EIO;
   2131			break;
   2132		case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
   2133		case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
   2134			rc = -EINVAL;
   2135			break;
   2136		case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
   2137		case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
   2138		case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
   2139			rc = -EPERM;
   2140			break;
   2141		default:
   2142			netif_err(efx, drv, efx->net_dev,
   2143				  "Unknown response to NVRAM_UPDATE_FINISH\n");
   2144			rc = -EIO;
   2145		}
   2146	}
   2147
   2148	return rc;
   2149}
   2150
   2151int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
   2152			    size_t len, size_t *retlen, u8 *buffer)
   2153{
   2154	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2155	struct efx_nic *efx = mtd->priv;
   2156	loff_t offset = start;
   2157	loff_t end = min_t(loff_t, start + len, mtd->size);
   2158	size_t chunk;
   2159	int rc = 0;
   2160
   2161	while (offset < end) {
   2162		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
   2163		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
   2164					 buffer, chunk);
   2165		if (rc)
   2166			goto out;
   2167		offset += chunk;
   2168		buffer += chunk;
   2169	}
   2170out:
   2171	*retlen = offset - start;
   2172	return rc;
   2173}
   2174
   2175int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
   2176{
   2177	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2178	struct efx_nic *efx = mtd->priv;
   2179	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
   2180	loff_t end = min_t(loff_t, start + len, mtd->size);
   2181	size_t chunk = part->common.mtd.erasesize;
   2182	int rc = 0;
   2183
   2184	if (!part->updating) {
   2185		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
   2186		if (rc)
   2187			goto out;
   2188		part->updating = true;
   2189	}
   2190
   2191	/* The MCDI interface can in fact do multiple erase blocks at once;
   2192	 * but erasing may be slow, so we make multiple calls here to avoid
   2193	 * tripping the MCDI RPC timeout. */
   2194	while (offset < end) {
   2195		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
   2196					  chunk);
   2197		if (rc)
   2198			goto out;
   2199		offset += chunk;
   2200	}
   2201out:
   2202	return rc;
   2203}
   2204
   2205int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
   2206			     size_t len, size_t *retlen, const u8 *buffer)
   2207{
   2208	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2209	struct efx_nic *efx = mtd->priv;
   2210	loff_t offset = start;
   2211	loff_t end = min_t(loff_t, start + len, mtd->size);
   2212	size_t chunk;
   2213	int rc = 0;
   2214
   2215	if (!part->updating) {
   2216		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
   2217		if (rc)
   2218			goto out;
   2219		part->updating = true;
   2220	}
   2221
   2222	while (offset < end) {
   2223		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
   2224		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
   2225					  buffer, chunk);
   2226		if (rc)
   2227			goto out;
   2228		offset += chunk;
   2229		buffer += chunk;
   2230	}
   2231out:
   2232	*retlen = offset - start;
   2233	return rc;
   2234}
   2235
   2236int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd)
   2237{
   2238	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
   2239	struct efx_nic *efx = mtd->priv;
   2240	int rc = 0;
   2241
   2242	if (part->updating) {
   2243		part->updating = false;
   2244		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
   2245	}
   2246
   2247	return rc;
   2248}
   2249
   2250void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part)
   2251{
   2252	struct efx_mcdi_mtd_partition *mcdi_part =
   2253		container_of(part, struct efx_mcdi_mtd_partition, common);
   2254	struct efx_nic *efx = part->mtd.priv;
   2255
   2256	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
   2257		 efx->name, part->type_name, mcdi_part->fw_subtype);
   2258}
   2259
   2260#endif /* CONFIG_SFC_SIENA_MTD */