cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

selftest.c (22581B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/****************************************************************************
      3 * Driver for Solarflare network controllers and boards
      4 * Copyright 2005-2006 Fen Systems Ltd.
      5 * Copyright 2006-2012 Solarflare Communications Inc.
      6 */
      7
      8#include <linux/netdevice.h>
      9#include <linux/module.h>
     10#include <linux/delay.h>
     11#include <linux/kernel_stat.h>
     12#include <linux/pci.h>
     13#include <linux/ethtool.h>
     14#include <linux/ip.h>
     15#include <linux/in.h>
     16#include <linux/udp.h>
     17#include <linux/rtnetlink.h>
     18#include <linux/slab.h>
     19#include "net_driver.h"
     20#include "efx.h"
     21#include "efx_common.h"
     22#include "efx_channels.h"
     23#include "nic.h"
     24#include "mcdi_port_common.h"
     25#include "selftest.h"
     26#include "workarounds.h"
     27
     28/* IRQ latency can be enormous because:
     29 * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
     30 *   slow serial console or an old IDE driver doing error recovery
     31 * - The PREEMPT_RT patches mostly deal with this, but also allow a
     32 *   tasklet or normal task to be given higher priority than our IRQ
     33 *   threads
     34 * Try to avoid blaming the hardware for this.
     35 */
     36#define IRQ_TIMEOUT HZ
     37
     38/*
     39 * Loopback test packet structure
     40 *
     41 * The self-test should stress every RSS vector, and unfortunately
     42 * Falcon only performs RSS on TCP/UDP packets.
     43 */
     44struct efx_loopback_payload {
     45	struct ethhdr header;
     46	struct iphdr ip;
     47	struct udphdr udp;
     48	__be16 iteration;
     49	char msg[64];
     50} __packed;
     51
     52/* Loopback test source MAC address */
     53static const u8 payload_source[ETH_ALEN] __aligned(2) = {
     54	0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
     55};
     56
     57static const char payload_msg[] =
     58	"Hello world! This is an Efx loopback test in progress!";
     59
     60/* Interrupt mode names */
     61static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX;
     62static const char *const efx_siena_interrupt_mode_names[] = {
     63	[EFX_INT_MODE_MSIX]   = "MSI-X",
     64	[EFX_INT_MODE_MSI]    = "MSI",
     65	[EFX_INT_MODE_LEGACY] = "legacy",
     66};
     67#define INT_MODE(efx) \
     68	STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode)
     69
     70/**
     71 * struct efx_loopback_state - persistent state during a loopback selftest
     72 * @flush:		Drop all packets in efx_siena_loopback_rx_packet
     73 * @packet_count:	Number of packets being used in this test
     74 * @skbs:		An array of skbs transmitted
     75 * @offload_csum:	Checksums are being offloaded
     76 * @rx_good:		RX good packet count
     77 * @rx_bad:		RX bad packet count
     78 * @payload:		Payload used in tests
     79 */
     80struct efx_loopback_state {
     81	bool flush;
     82	int packet_count;
     83	struct sk_buff **skbs;
     84	bool offload_csum;
     85	atomic_t rx_good;
     86	atomic_t rx_bad;
     87	struct efx_loopback_payload payload;
     88};
     89
     90/* How long to wait for all the packets to arrive (in ms) */
     91#define LOOPBACK_TIMEOUT_MS 1000
     92
     93/**************************************************************************
     94 *
     95 * MII, NVRAM and register tests
     96 *
     97 **************************************************************************/
     98
     99static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
    100{
    101	int rc = 0;
    102
    103	rc = efx_siena_mcdi_phy_test_alive(efx);
    104	tests->phy_alive = rc ? -1 : 1;
    105
    106	return rc;
    107}
    108
    109static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
    110{
    111	int rc = 0;
    112
    113	if (efx->type->test_nvram) {
    114		rc = efx->type->test_nvram(efx);
    115		if (rc == -EPERM)
    116			rc = 0;
    117		else
    118			tests->nvram = rc ? -1 : 1;
    119	}
    120
    121	return rc;
    122}
    123
    124/**************************************************************************
    125 *
    126 * Interrupt and event queue testing
    127 *
    128 **************************************************************************/
    129
    130/* Test generation and receipt of interrupts */
    131static int efx_test_interrupts(struct efx_nic *efx,
    132			       struct efx_self_tests *tests)
    133{
    134	unsigned long timeout, wait;
    135	int cpu;
    136	int rc;
    137
    138	netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
    139	tests->interrupt = -1;
    140
    141	rc = efx_siena_irq_test_start(efx);
    142	if (rc == -ENOTSUPP) {
    143		netif_dbg(efx, drv, efx->net_dev,
    144			  "direct interrupt testing not supported\n");
    145		tests->interrupt = 0;
    146		return 0;
    147	}
    148
    149	timeout = jiffies + IRQ_TIMEOUT;
    150	wait = 1;
    151
    152	/* Wait for arrival of test interrupt. */
    153	netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
    154	do {
    155		schedule_timeout_uninterruptible(wait);
    156		cpu = efx_nic_irq_test_irq_cpu(efx);
    157		if (cpu >= 0)
    158			goto success;
    159		wait *= 2;
    160	} while (time_before(jiffies, timeout));
    161
    162	netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
    163	return -ETIMEDOUT;
    164
    165 success:
    166	netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
    167		  INT_MODE(efx), cpu);
    168	tests->interrupt = 1;
    169	return 0;
    170}
    171
    172/* Test generation and receipt of interrupting events */
    173static int efx_test_eventq_irq(struct efx_nic *efx,
    174			       struct efx_self_tests *tests)
    175{
    176	struct efx_channel *channel;
    177	unsigned int read_ptr[EFX_MAX_CHANNELS];
    178	unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
    179	unsigned long timeout, wait;
    180
    181	BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
    182
    183	efx_for_each_channel(channel, efx) {
    184		read_ptr[channel->channel] = channel->eventq_read_ptr;
    185		set_bit(channel->channel, &dma_pend);
    186		set_bit(channel->channel, &int_pend);
    187		efx_siena_event_test_start(channel);
    188	}
    189
    190	timeout = jiffies + IRQ_TIMEOUT;
    191	wait = 1;
    192
    193	/* Wait for arrival of interrupts.  NAPI processing may or may
    194	 * not complete in time, but we can cope in any case.
    195	 */
    196	do {
    197		schedule_timeout_uninterruptible(wait);
    198
    199		efx_for_each_channel(channel, efx) {
    200			efx_siena_stop_eventq(channel);
    201			if (channel->eventq_read_ptr !=
    202			    read_ptr[channel->channel]) {
    203				set_bit(channel->channel, &napi_ran);
    204				clear_bit(channel->channel, &dma_pend);
    205				clear_bit(channel->channel, &int_pend);
    206			} else {
    207				if (efx_siena_event_present(channel))
    208					clear_bit(channel->channel, &dma_pend);
    209				if (efx_nic_event_test_irq_cpu(channel) >= 0)
    210					clear_bit(channel->channel, &int_pend);
    211			}
    212			efx_siena_start_eventq(channel);
    213		}
    214
    215		wait *= 2;
    216	} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
    217
    218	efx_for_each_channel(channel, efx) {
    219		bool dma_seen = !test_bit(channel->channel, &dma_pend);
    220		bool int_seen = !test_bit(channel->channel, &int_pend);
    221
    222		tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
    223		tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
    224
    225		if (dma_seen && int_seen) {
    226			netif_dbg(efx, drv, efx->net_dev,
    227				  "channel %d event queue passed (with%s NAPI)\n",
    228				  channel->channel,
    229				  test_bit(channel->channel, &napi_ran) ?
    230				  "" : "out");
    231		} else {
    232			/* Report failure and whether either interrupt or DMA
    233			 * worked
    234			 */
    235			netif_err(efx, drv, efx->net_dev,
    236				  "channel %d timed out waiting for event queue\n",
    237				  channel->channel);
    238			if (int_seen)
    239				netif_err(efx, drv, efx->net_dev,
    240					  "channel %d saw interrupt "
    241					  "during event queue test\n",
    242					  channel->channel);
    243			if (dma_seen)
    244				netif_err(efx, drv, efx->net_dev,
    245					  "channel %d event was generated, but "
    246					  "failed to trigger an interrupt\n",
    247					  channel->channel);
    248		}
    249	}
    250
    251	return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
    252}
    253
    254static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
    255			unsigned flags)
    256{
    257	int rc;
    258
    259	mutex_lock(&efx->mac_lock);
    260	rc = efx_siena_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
    261	mutex_unlock(&efx->mac_lock);
    262	if (rc == -EPERM)
    263		rc = 0;
    264	else
    265		netif_info(efx, drv, efx->net_dev,
    266			   "%s phy selftest\n", rc ? "Failed" : "Passed");
    267
    268	return rc;
    269}
    270
    271/**************************************************************************
    272 *
    273 * Loopback testing
    274 * NB Only one loopback test can be executing concurrently.
    275 *
    276 **************************************************************************/
    277
    278/* Loopback test RX callback
    279 * This is called for each received packet during loopback testing.
    280 */
    281void efx_siena_loopback_rx_packet(struct efx_nic *efx,
    282				  const char *buf_ptr, int pkt_len)
    283{
    284	struct efx_loopback_state *state = efx->loopback_selftest;
    285	struct efx_loopback_payload *received;
    286	struct efx_loopback_payload *payload;
    287
    288	BUG_ON(!buf_ptr);
    289
    290	/* If we are just flushing, then drop the packet */
    291	if ((state == NULL) || state->flush)
    292		return;
    293
    294	payload = &state->payload;
    295
    296	received = (struct efx_loopback_payload *) buf_ptr;
    297	received->ip.saddr = payload->ip.saddr;
    298	if (state->offload_csum)
    299		received->ip.check = payload->ip.check;
    300
    301	/* Check that header exists */
    302	if (pkt_len < sizeof(received->header)) {
    303		netif_err(efx, drv, efx->net_dev,
    304			  "saw runt RX packet (length %d) in %s loopback "
    305			  "test\n", pkt_len, LOOPBACK_MODE(efx));
    306		goto err;
    307	}
    308
    309	/* Check that the ethernet header exists */
    310	if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
    311		netif_err(efx, drv, efx->net_dev,
    312			  "saw non-loopback RX packet in %s loopback test\n",
    313			  LOOPBACK_MODE(efx));
    314		goto err;
    315	}
    316
    317	/* Check packet length */
    318	if (pkt_len != sizeof(*payload)) {
    319		netif_err(efx, drv, efx->net_dev,
    320			  "saw incorrect RX packet length %d (wanted %d) in "
    321			  "%s loopback test\n", pkt_len, (int)sizeof(*payload),
    322			  LOOPBACK_MODE(efx));
    323		goto err;
    324	}
    325
    326	/* Check that IP header matches */
    327	if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
    328		netif_err(efx, drv, efx->net_dev,
    329			  "saw corrupted IP header in %s loopback test\n",
    330			  LOOPBACK_MODE(efx));
    331		goto err;
    332	}
    333
    334	/* Check that msg and padding matches */
    335	if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
    336		netif_err(efx, drv, efx->net_dev,
    337			  "saw corrupted RX packet in %s loopback test\n",
    338			  LOOPBACK_MODE(efx));
    339		goto err;
    340	}
    341
    342	/* Check that iteration matches */
    343	if (received->iteration != payload->iteration) {
    344		netif_err(efx, drv, efx->net_dev,
    345			  "saw RX packet from iteration %d (wanted %d) in "
    346			  "%s loopback test\n", ntohs(received->iteration),
    347			  ntohs(payload->iteration), LOOPBACK_MODE(efx));
    348		goto err;
    349	}
    350
    351	/* Increase correct RX count */
    352	netif_vdbg(efx, drv, efx->net_dev,
    353		   "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
    354
    355	atomic_inc(&state->rx_good);
    356	return;
    357
    358 err:
    359#ifdef DEBUG
    360	if (atomic_read(&state->rx_bad) == 0) {
    361		netif_err(efx, drv, efx->net_dev, "received packet:\n");
    362		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
    363			       buf_ptr, pkt_len, 0);
    364		netif_err(efx, drv, efx->net_dev, "expected packet:\n");
    365		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
    366			       &state->payload, sizeof(state->payload), 0);
    367	}
    368#endif
    369	atomic_inc(&state->rx_bad);
    370}
    371
    372/* Initialise an efx_siena_selftest_state for a new iteration */
    373static void efx_iterate_state(struct efx_nic *efx)
    374{
    375	struct efx_loopback_state *state = efx->loopback_selftest;
    376	struct net_device *net_dev = efx->net_dev;
    377	struct efx_loopback_payload *payload = &state->payload;
    378
    379	/* Initialise the layerII header */
    380	ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
    381	ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
    382	payload->header.h_proto = htons(ETH_P_IP);
    383
    384	/* saddr set later and used as incrementing count */
    385	payload->ip.daddr = htonl(INADDR_LOOPBACK);
    386	payload->ip.ihl = 5;
    387	payload->ip.check = (__force __sum16) htons(0xdead);
    388	payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
    389	payload->ip.version = IPVERSION;
    390	payload->ip.protocol = IPPROTO_UDP;
    391
    392	/* Initialise udp header */
    393	payload->udp.source = 0;
    394	payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
    395				 sizeof(struct iphdr));
    396	payload->udp.check = 0;	/* checksum ignored */
    397
    398	/* Fill out payload */
    399	payload->iteration = htons(ntohs(payload->iteration) + 1);
    400	memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
    401
    402	/* Fill out remaining state members */
    403	atomic_set(&state->rx_good, 0);
    404	atomic_set(&state->rx_bad, 0);
    405	smp_wmb();
    406}
    407
    408static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
    409{
    410	struct efx_nic *efx = tx_queue->efx;
    411	struct efx_loopback_state *state = efx->loopback_selftest;
    412	struct efx_loopback_payload *payload;
    413	struct sk_buff *skb;
    414	int i;
    415	netdev_tx_t rc;
    416
    417	/* Transmit N copies of buffer */
    418	for (i = 0; i < state->packet_count; i++) {
    419		/* Allocate an skb, holding an extra reference for
    420		 * transmit completion counting */
    421		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
    422		if (!skb)
    423			return -ENOMEM;
    424		state->skbs[i] = skb;
    425		skb_get(skb);
    426
    427		/* Copy the payload in, incrementing the source address to
    428		 * exercise the rss vectors */
    429		payload = skb_put(skb, sizeof(state->payload));
    430		memcpy(payload, &state->payload, sizeof(state->payload));
    431		payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
    432
    433		/* Ensure everything we've written is visible to the
    434		 * interrupt handler. */
    435		smp_wmb();
    436
    437		netif_tx_lock_bh(efx->net_dev);
    438		rc = efx_enqueue_skb(tx_queue, skb);
    439		netif_tx_unlock_bh(efx->net_dev);
    440
    441		if (rc != NETDEV_TX_OK) {
    442			netif_err(efx, drv, efx->net_dev,
    443				  "TX queue %d could not transmit packet %d of "
    444				  "%d in %s loopback test\n", tx_queue->label,
    445				  i + 1, state->packet_count,
    446				  LOOPBACK_MODE(efx));
    447
    448			/* Defer cleaning up the other skbs for the caller */
    449			kfree_skb(skb);
    450			return -EPIPE;
    451		}
    452	}
    453
    454	return 0;
    455}
    456
    457static int efx_poll_loopback(struct efx_nic *efx)
    458{
    459	struct efx_loopback_state *state = efx->loopback_selftest;
    460
    461	return atomic_read(&state->rx_good) == state->packet_count;
    462}
    463
    464static int efx_end_loopback(struct efx_tx_queue *tx_queue,
    465			    struct efx_loopback_self_tests *lb_tests)
    466{
    467	struct efx_nic *efx = tx_queue->efx;
    468	struct efx_loopback_state *state = efx->loopback_selftest;
    469	struct sk_buff *skb;
    470	int tx_done = 0, rx_good, rx_bad;
    471	int i, rc = 0;
    472
    473	netif_tx_lock_bh(efx->net_dev);
    474
    475	/* Count the number of tx completions, and decrement the refcnt. Any
    476	 * skbs not already completed will be free'd when the queue is flushed */
    477	for (i = 0; i < state->packet_count; i++) {
    478		skb = state->skbs[i];
    479		if (skb && !skb_shared(skb))
    480			++tx_done;
    481		dev_kfree_skb(skb);
    482	}
    483
    484	netif_tx_unlock_bh(efx->net_dev);
    485
    486	/* Check TX completion and received packet counts */
    487	rx_good = atomic_read(&state->rx_good);
    488	rx_bad = atomic_read(&state->rx_bad);
    489	if (tx_done != state->packet_count) {
    490		/* Don't free the skbs; they will be picked up on TX
    491		 * overflow or channel teardown.
    492		 */
    493		netif_err(efx, drv, efx->net_dev,
    494			  "TX queue %d saw only %d out of an expected %d "
    495			  "TX completion events in %s loopback test\n",
    496			  tx_queue->label, tx_done, state->packet_count,
    497			  LOOPBACK_MODE(efx));
    498		rc = -ETIMEDOUT;
    499		/* Allow to fall through so we see the RX errors as well */
    500	}
    501
    502	/* We may always be up to a flush away from our desired packet total */
    503	if (rx_good != state->packet_count) {
    504		netif_dbg(efx, drv, efx->net_dev,
    505			  "TX queue %d saw only %d out of an expected %d "
    506			  "received packets in %s loopback test\n",
    507			  tx_queue->label, rx_good, state->packet_count,
    508			  LOOPBACK_MODE(efx));
    509		rc = -ETIMEDOUT;
    510		/* Fall through */
    511	}
    512
    513	/* Update loopback test structure */
    514	lb_tests->tx_sent[tx_queue->label] += state->packet_count;
    515	lb_tests->tx_done[tx_queue->label] += tx_done;
    516	lb_tests->rx_good += rx_good;
    517	lb_tests->rx_bad += rx_bad;
    518
    519	return rc;
    520}
    521
    522static int
    523efx_test_loopback(struct efx_tx_queue *tx_queue,
    524		  struct efx_loopback_self_tests *lb_tests)
    525{
    526	struct efx_nic *efx = tx_queue->efx;
    527	struct efx_loopback_state *state = efx->loopback_selftest;
    528	int i, begin_rc, end_rc;
    529
    530	for (i = 0; i < 3; i++) {
    531		/* Determine how many packets to send */
    532		state->packet_count = efx->txq_entries / 3;
    533		state->packet_count = min(1 << (i << 2), state->packet_count);
    534		state->skbs = kcalloc(state->packet_count,
    535				      sizeof(state->skbs[0]), GFP_KERNEL);
    536		if (!state->skbs)
    537			return -ENOMEM;
    538		state->flush = false;
    539
    540		netif_dbg(efx, drv, efx->net_dev,
    541			  "TX queue %d (hw %d) testing %s loopback with %d packets\n",
    542			  tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
    543			  state->packet_count);
    544
    545		efx_iterate_state(efx);
    546		begin_rc = efx_begin_loopback(tx_queue);
    547
    548		/* This will normally complete very quickly, but be
    549		 * prepared to wait much longer. */
    550		msleep(1);
    551		if (!efx_poll_loopback(efx)) {
    552			msleep(LOOPBACK_TIMEOUT_MS);
    553			efx_poll_loopback(efx);
    554		}
    555
    556		end_rc = efx_end_loopback(tx_queue, lb_tests);
    557		kfree(state->skbs);
    558
    559		if (begin_rc || end_rc) {
    560			/* Wait a while to ensure there are no packets
    561			 * floating around after a failure. */
    562			schedule_timeout_uninterruptible(HZ / 10);
    563			return begin_rc ? begin_rc : end_rc;
    564		}
    565	}
    566
    567	netif_dbg(efx, drv, efx->net_dev,
    568		  "TX queue %d passed %s loopback test with a burst length "
    569		  "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
    570		  state->packet_count);
    571
    572	return 0;
    573}
    574
    575/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
    576 * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
    577 * to delay and retry. Therefore, it's safer to just poll directly. Wait
    578 * for link up and any faults to dissipate. */
    579static int efx_wait_for_link(struct efx_nic *efx)
    580{
    581	struct efx_link_state *link_state = &efx->link_state;
    582	int count, link_up_count = 0;
    583	bool link_up;
    584
    585	for (count = 0; count < 40; count++) {
    586		schedule_timeout_uninterruptible(HZ / 10);
    587
    588		if (efx->type->monitor != NULL) {
    589			mutex_lock(&efx->mac_lock);
    590			efx->type->monitor(efx);
    591			mutex_unlock(&efx->mac_lock);
    592		}
    593
    594		mutex_lock(&efx->mac_lock);
    595		link_up = link_state->up;
    596		if (link_up)
    597			link_up = !efx->type->check_mac_fault(efx);
    598		mutex_unlock(&efx->mac_lock);
    599
    600		if (link_up) {
    601			if (++link_up_count == 2)
    602				return 0;
    603		} else {
    604			link_up_count = 0;
    605		}
    606	}
    607
    608	return -ETIMEDOUT;
    609}
    610
    611static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
    612			      unsigned int loopback_modes)
    613{
    614	enum efx_loopback_mode mode;
    615	struct efx_loopback_state *state;
    616	struct efx_channel *channel =
    617		efx_get_channel(efx, efx->tx_channel_offset);
    618	struct efx_tx_queue *tx_queue;
    619	int rc = 0;
    620
    621	/* Set the port loopback_selftest member. From this point on
    622	 * all received packets will be dropped. Mark the state as
    623	 * "flushing" so all inflight packets are dropped */
    624	state = kzalloc(sizeof(*state), GFP_KERNEL);
    625	if (state == NULL)
    626		return -ENOMEM;
    627	BUG_ON(efx->loopback_selftest);
    628	state->flush = true;
    629	efx->loopback_selftest = state;
    630
    631	/* Test all supported loopback modes */
    632	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
    633		if (!(loopback_modes & (1 << mode)))
    634			continue;
    635
    636		/* Move the port into the specified loopback mode. */
    637		state->flush = true;
    638		mutex_lock(&efx->mac_lock);
    639		efx->loopback_mode = mode;
    640		rc = __efx_siena_reconfigure_port(efx);
    641		mutex_unlock(&efx->mac_lock);
    642		if (rc) {
    643			netif_err(efx, drv, efx->net_dev,
    644				  "unable to move into %s loopback\n",
    645				  LOOPBACK_MODE(efx));
    646			goto out;
    647		}
    648
    649		rc = efx_wait_for_link(efx);
    650		if (rc) {
    651			netif_err(efx, drv, efx->net_dev,
    652				  "loopback %s never came up\n",
    653				  LOOPBACK_MODE(efx));
    654			goto out;
    655		}
    656
    657		/* Test all enabled types of TX queue */
    658		efx_for_each_channel_tx_queue(tx_queue, channel) {
    659			state->offload_csum = (tx_queue->type &
    660					       EFX_TXQ_TYPE_OUTER_CSUM);
    661			rc = efx_test_loopback(tx_queue,
    662					       &tests->loopback[mode]);
    663			if (rc)
    664				goto out;
    665		}
    666	}
    667
    668 out:
    669	/* Remove the flush. The caller will remove the loopback setting */
    670	state->flush = true;
    671	efx->loopback_selftest = NULL;
    672	wmb();
    673	kfree(state);
    674
    675	if (rc == -EPERM)
    676		rc = 0;
    677
    678	return rc;
    679}
    680
    681/**************************************************************************
    682 *
    683 * Entry point
    684 *
    685 *************************************************************************/
    686
    687int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
    688		       unsigned int flags)
    689{
    690	enum efx_loopback_mode loopback_mode = efx->loopback_mode;
    691	int phy_mode = efx->phy_mode;
    692	int rc_test = 0, rc_reset, rc;
    693
    694	efx_siena_selftest_async_cancel(efx);
    695
    696	/* Online (i.e. non-disruptive) testing
    697	 * This checks interrupt generation, event delivery and PHY presence. */
    698
    699	rc = efx_test_phy_alive(efx, tests);
    700	if (rc && !rc_test)
    701		rc_test = rc;
    702
    703	rc = efx_test_nvram(efx, tests);
    704	if (rc && !rc_test)
    705		rc_test = rc;
    706
    707	rc = efx_test_interrupts(efx, tests);
    708	if (rc && !rc_test)
    709		rc_test = rc;
    710
    711	rc = efx_test_eventq_irq(efx, tests);
    712	if (rc && !rc_test)
    713		rc_test = rc;
    714
    715	if (rc_test)
    716		return rc_test;
    717
    718	if (!(flags & ETH_TEST_FL_OFFLINE))
    719		return efx_test_phy(efx, tests, flags);
    720
    721	/* Offline (i.e. disruptive) testing
    722	 * This checks MAC and PHY loopback on the specified port. */
    723
    724	/* Detach the device so the kernel doesn't transmit during the
    725	 * loopback test and the watchdog timeout doesn't fire.
    726	 */
    727	efx_device_detach_sync(efx);
    728
    729	if (efx->type->test_chip) {
    730		rc_reset = efx->type->test_chip(efx, tests);
    731		if (rc_reset) {
    732			netif_err(efx, hw, efx->net_dev,
    733				  "Unable to recover from chip test\n");
    734			efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
    735			return rc_reset;
    736		}
    737
    738		if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
    739			rc_test = -EIO;
    740	}
    741
    742	/* Ensure that the phy is powered and out of loopback
    743	 * for the bist and loopback tests */
    744	mutex_lock(&efx->mac_lock);
    745	efx->phy_mode &= ~PHY_MODE_LOW_POWER;
    746	efx->loopback_mode = LOOPBACK_NONE;
    747	__efx_siena_reconfigure_port(efx);
    748	mutex_unlock(&efx->mac_lock);
    749
    750	rc = efx_test_phy(efx, tests, flags);
    751	if (rc && !rc_test)
    752		rc_test = rc;
    753
    754	rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
    755	if (rc && !rc_test)
    756		rc_test = rc;
    757
    758	/* restore the PHY to the previous state */
    759	mutex_lock(&efx->mac_lock);
    760	efx->phy_mode = phy_mode;
    761	efx->loopback_mode = loopback_mode;
    762	__efx_siena_reconfigure_port(efx);
    763	mutex_unlock(&efx->mac_lock);
    764
    765	efx_device_attach_if_not_resetting(efx);
    766
    767	return rc_test;
    768}
    769
    770void efx_siena_selftest_async_start(struct efx_nic *efx)
    771{
    772	struct efx_channel *channel;
    773
    774	efx_for_each_channel(channel, efx)
    775		efx_siena_event_test_start(channel);
    776	schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
    777}
    778
    779void efx_siena_selftest_async_cancel(struct efx_nic *efx)
    780{
    781	cancel_delayed_work_sync(&efx->selftest_work);
    782}
    783
    784static void efx_siena_selftest_async_work(struct work_struct *data)
    785{
    786	struct efx_nic *efx = container_of(data, struct efx_nic,
    787					   selftest_work.work);
    788	struct efx_channel *channel;
    789	int cpu;
    790
    791	efx_for_each_channel(channel, efx) {
    792		cpu = efx_nic_event_test_irq_cpu(channel);
    793		if (cpu < 0)
    794			netif_err(efx, ifup, efx->net_dev,
    795				  "channel %d failed to trigger an interrupt\n",
    796				  channel->channel);
    797		else
    798			netif_dbg(efx, ifup, efx->net_dev,
    799				  "channel %d triggered interrupt on CPU %d\n",
    800				  channel->channel, cpu);
    801	}
    802}
    803
    804void efx_siena_selftest_async_init(struct efx_nic *efx)
    805{
    806	INIT_DELAYED_WORK(&efx->selftest_work, efx_siena_selftest_async_work);
    807}