cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

rx_common.c (31591B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/****************************************************************************
      3 * Driver for Solarflare network controllers and boards
      4 * Copyright 2018 Solarflare Communications Inc.
      5 *
      6 * This program is free software; you can redistribute it and/or modify it
      7 * under the terms of the GNU General Public License version 2 as published
      8 * by the Free Software Foundation, incorporated herein by reference.
      9 */
     10
     11#include "net_driver.h"
     12#include <linux/module.h>
     13#include <linux/iommu.h>
     14#include "efx.h"
     15#include "nic.h"
     16#include "rx_common.h"
     17
     18/* This is the percentage fill level below which new RX descriptors
     19 * will be added to the RX descriptor ring.
     20 */
     21static unsigned int rx_refill_threshold;
     22module_param(rx_refill_threshold, uint, 0444);
     23MODULE_PARM_DESC(rx_refill_threshold,
     24		 "RX descriptor ring refill threshold (%)");
     25
     26/* RX maximum head room required.
     27 *
     28 * This must be at least 1 to prevent overflow, plus one packet-worth
     29 * to allow pipelined receives.
     30 */
     31#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
     32
     33static void efx_unmap_rx_buffer(struct efx_nic *efx,
     34				struct efx_rx_buffer *rx_buf);
     35
     36/* Check the RX page recycle ring for a page that can be reused. */
     37static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
     38{
     39	struct efx_nic *efx = rx_queue->efx;
     40	struct efx_rx_page_state *state;
     41	unsigned int index;
     42	struct page *page;
     43
     44	if (unlikely(!rx_queue->page_ring))
     45		return NULL;
     46	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
     47	page = rx_queue->page_ring[index];
     48	if (page == NULL)
     49		return NULL;
     50
     51	rx_queue->page_ring[index] = NULL;
     52	/* page_remove cannot exceed page_add. */
     53	if (rx_queue->page_remove != rx_queue->page_add)
     54		++rx_queue->page_remove;
     55
     56	/* If page_count is 1 then we hold the only reference to this page. */
     57	if (page_count(page) == 1) {
     58		++rx_queue->page_recycle_count;
     59		return page;
     60	} else {
     61		state = page_address(page);
     62		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
     63			       PAGE_SIZE << efx->rx_buffer_order,
     64			       DMA_FROM_DEVICE);
     65		put_page(page);
     66		++rx_queue->page_recycle_failed;
     67	}
     68
     69	return NULL;
     70}
     71
     72/* Attempt to recycle the page if there is an RX recycle ring; the page can
     73 * only be added if this is the final RX buffer, to prevent pages being used in
     74 * the descriptor ring and appearing in the recycle ring simultaneously.
     75 */
     76static void efx_recycle_rx_page(struct efx_channel *channel,
     77				struct efx_rx_buffer *rx_buf)
     78{
     79	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
     80	struct efx_nic *efx = rx_queue->efx;
     81	struct page *page = rx_buf->page;
     82	unsigned int index;
     83
     84	/* Only recycle the page after processing the final buffer. */
     85	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
     86		return;
     87
     88	index = rx_queue->page_add & rx_queue->page_ptr_mask;
     89	if (rx_queue->page_ring[index] == NULL) {
     90		unsigned int read_index = rx_queue->page_remove &
     91			rx_queue->page_ptr_mask;
     92
     93		/* The next slot in the recycle ring is available, but
     94		 * increment page_remove if the read pointer currently
     95		 * points here.
     96		 */
     97		if (read_index == index)
     98			++rx_queue->page_remove;
     99		rx_queue->page_ring[index] = page;
    100		++rx_queue->page_add;
    101		return;
    102	}
    103	++rx_queue->page_recycle_full;
    104	efx_unmap_rx_buffer(efx, rx_buf);
    105	put_page(rx_buf->page);
    106}
    107
    108/* Recycle the pages that are used by buffers that have just been received. */
    109void efx_siena_recycle_rx_pages(struct efx_channel *channel,
    110				struct efx_rx_buffer *rx_buf,
    111				unsigned int n_frags)
    112{
    113	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
    114
    115	if (unlikely(!rx_queue->page_ring))
    116		return;
    117
    118	do {
    119		efx_recycle_rx_page(channel, rx_buf);
    120		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
    121	} while (--n_frags);
    122}
    123
    124void efx_siena_discard_rx_packet(struct efx_channel *channel,
    125				 struct efx_rx_buffer *rx_buf,
    126				 unsigned int n_frags)
    127{
    128	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
    129
    130	efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
    131
    132	efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
    133}
    134
    135static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
    136{
    137	unsigned int bufs_in_recycle_ring, page_ring_size;
    138	struct efx_nic *efx = rx_queue->efx;
    139
    140	bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
    141	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
    142					    efx->rx_bufs_per_page);
    143	rx_queue->page_ring = kcalloc(page_ring_size,
    144				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
    145	if (!rx_queue->page_ring)
    146		rx_queue->page_ptr_mask = 0;
    147	else
    148		rx_queue->page_ptr_mask = page_ring_size - 1;
    149}
    150
    151static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
    152{
    153	struct efx_nic *efx = rx_queue->efx;
    154	int i;
    155
    156	if (unlikely(!rx_queue->page_ring))
    157		return;
    158
    159	/* Unmap and release the pages in the recycle ring. Remove the ring. */
    160	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
    161		struct page *page = rx_queue->page_ring[i];
    162		struct efx_rx_page_state *state;
    163
    164		if (page == NULL)
    165			continue;
    166
    167		state = page_address(page);
    168		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
    169			       PAGE_SIZE << efx->rx_buffer_order,
    170			       DMA_FROM_DEVICE);
    171		put_page(page);
    172	}
    173	kfree(rx_queue->page_ring);
    174	rx_queue->page_ring = NULL;
    175}
    176
    177static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
    178			       struct efx_rx_buffer *rx_buf)
    179{
    180	/* Release the page reference we hold for the buffer. */
    181	if (rx_buf->page)
    182		put_page(rx_buf->page);
    183
    184	/* If this is the last buffer in a page, unmap and free it. */
    185	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
    186		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
    187		efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
    188	}
    189	rx_buf->page = NULL;
    190}
    191
    192int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue)
    193{
    194	struct efx_nic *efx = rx_queue->efx;
    195	unsigned int entries;
    196	int rc;
    197
    198	/* Create the smallest power-of-two aligned ring */
    199	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
    200	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
    201	rx_queue->ptr_mask = entries - 1;
    202
    203	netif_dbg(efx, probe, efx->net_dev,
    204		  "creating RX queue %d size %#x mask %#x\n",
    205		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
    206		  rx_queue->ptr_mask);
    207
    208	/* Allocate RX buffers */
    209	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
    210				   GFP_KERNEL);
    211	if (!rx_queue->buffer)
    212		return -ENOMEM;
    213
    214	rc = efx_nic_probe_rx(rx_queue);
    215	if (rc) {
    216		kfree(rx_queue->buffer);
    217		rx_queue->buffer = NULL;
    218	}
    219
    220	return rc;
    221}
    222
    223void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
    224{
    225	unsigned int max_fill, trigger, max_trigger;
    226	struct efx_nic *efx = rx_queue->efx;
    227	int rc = 0;
    228
    229	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
    230		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
    231
    232	/* Initialise ptr fields */
    233	rx_queue->added_count = 0;
    234	rx_queue->notified_count = 0;
    235	rx_queue->removed_count = 0;
    236	rx_queue->min_fill = -1U;
    237	efx_init_rx_recycle_ring(rx_queue);
    238
    239	rx_queue->page_remove = 0;
    240	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
    241	rx_queue->page_recycle_count = 0;
    242	rx_queue->page_recycle_failed = 0;
    243	rx_queue->page_recycle_full = 0;
    244
    245	/* Initialise limit fields */
    246	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
    247	max_trigger =
    248		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
    249	if (rx_refill_threshold != 0) {
    250		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
    251		if (trigger > max_trigger)
    252			trigger = max_trigger;
    253	} else {
    254		trigger = max_trigger;
    255	}
    256
    257	rx_queue->max_fill = max_fill;
    258	rx_queue->fast_fill_trigger = trigger;
    259	rx_queue->refill_enabled = true;
    260
    261	/* Initialise XDP queue information */
    262	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
    263			      rx_queue->core_index, 0);
    264
    265	if (rc) {
    266		netif_err(efx, rx_err, efx->net_dev,
    267			  "Failure to initialise XDP queue information rc=%d\n",
    268			  rc);
    269		efx->xdp_rxq_info_failed = true;
    270	} else {
    271		rx_queue->xdp_rxq_info_valid = true;
    272	}
    273
    274	/* Set up RX descriptor ring */
    275	efx_nic_init_rx(rx_queue);
    276}
    277
    278void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
    279{
    280	struct efx_rx_buffer *rx_buf;
    281	int i;
    282
    283	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
    284		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
    285
    286	del_timer_sync(&rx_queue->slow_fill);
    287
    288	/* Release RX buffers from the current read ptr to the write ptr */
    289	if (rx_queue->buffer) {
    290		for (i = rx_queue->removed_count; i < rx_queue->added_count;
    291		     i++) {
    292			unsigned int index = i & rx_queue->ptr_mask;
    293
    294			rx_buf = efx_rx_buffer(rx_queue, index);
    295			efx_fini_rx_buffer(rx_queue, rx_buf);
    296		}
    297	}
    298
    299	efx_fini_rx_recycle_ring(rx_queue);
    300
    301	if (rx_queue->xdp_rxq_info_valid)
    302		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
    303
    304	rx_queue->xdp_rxq_info_valid = false;
    305}
    306
    307void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
    308{
    309	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
    310		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
    311
    312	efx_nic_remove_rx(rx_queue);
    313
    314	kfree(rx_queue->buffer);
    315	rx_queue->buffer = NULL;
    316}
    317
    318/* Unmap a DMA-mapped page.  This function is only called for the final RX
    319 * buffer in a page.
    320 */
    321static void efx_unmap_rx_buffer(struct efx_nic *efx,
    322				struct efx_rx_buffer *rx_buf)
    323{
    324	struct page *page = rx_buf->page;
    325
    326	if (page) {
    327		struct efx_rx_page_state *state = page_address(page);
    328
    329		dma_unmap_page(&efx->pci_dev->dev,
    330			       state->dma_addr,
    331			       PAGE_SIZE << efx->rx_buffer_order,
    332			       DMA_FROM_DEVICE);
    333	}
    334}
    335
    336void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
    337			       struct efx_rx_buffer *rx_buf,
    338			       unsigned int num_bufs)
    339{
    340	do {
    341		if (rx_buf->page) {
    342			put_page(rx_buf->page);
    343			rx_buf->page = NULL;
    344		}
    345		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
    346	} while (--num_bufs);
    347}
    348
    349void efx_siena_rx_slow_fill(struct timer_list *t)
    350{
    351	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
    352
    353	/* Post an event to cause NAPI to run and refill the queue */
    354	efx_nic_generate_fill_event(rx_queue);
    355	++rx_queue->slow_fill_count;
    356}
    357
    358static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
    359{
    360	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
    361}
    362
    363/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
    364 *
    365 * @rx_queue:		Efx RX queue
    366 *
    367 * This allocates a batch of pages, maps them for DMA, and populates
    368 * struct efx_rx_buffers for each one. Return a negative error code or
    369 * 0 on success. If a single page can be used for multiple buffers,
    370 * then the page will either be inserted fully, or not at all.
    371 */
    372static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
    373{
    374	unsigned int page_offset, index, count;
    375	struct efx_nic *efx = rx_queue->efx;
    376	struct efx_rx_page_state *state;
    377	struct efx_rx_buffer *rx_buf;
    378	dma_addr_t dma_addr;
    379	struct page *page;
    380
    381	count = 0;
    382	do {
    383		page = efx_reuse_page(rx_queue);
    384		if (page == NULL) {
    385			page = alloc_pages(__GFP_COMP |
    386					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
    387					   efx->rx_buffer_order);
    388			if (unlikely(page == NULL))
    389				return -ENOMEM;
    390			dma_addr =
    391				dma_map_page(&efx->pci_dev->dev, page, 0,
    392					     PAGE_SIZE << efx->rx_buffer_order,
    393					     DMA_FROM_DEVICE);
    394			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
    395						       dma_addr))) {
    396				__free_pages(page, efx->rx_buffer_order);
    397				return -EIO;
    398			}
    399			state = page_address(page);
    400			state->dma_addr = dma_addr;
    401		} else {
    402			state = page_address(page);
    403			dma_addr = state->dma_addr;
    404		}
    405
    406		dma_addr += sizeof(struct efx_rx_page_state);
    407		page_offset = sizeof(struct efx_rx_page_state);
    408
    409		do {
    410			index = rx_queue->added_count & rx_queue->ptr_mask;
    411			rx_buf = efx_rx_buffer(rx_queue, index);
    412			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
    413					   EFX_XDP_HEADROOM;
    414			rx_buf->page = page;
    415			rx_buf->page_offset = page_offset + efx->rx_ip_align +
    416					      EFX_XDP_HEADROOM;
    417			rx_buf->len = efx->rx_dma_len;
    418			rx_buf->flags = 0;
    419			++rx_queue->added_count;
    420			get_page(page);
    421			dma_addr += efx->rx_page_buf_step;
    422			page_offset += efx->rx_page_buf_step;
    423		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
    424
    425		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
    426	} while (++count < efx->rx_pages_per_batch);
    427
    428	return 0;
    429}
    430
    431void efx_siena_rx_config_page_split(struct efx_nic *efx)
    432{
    433	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
    434				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
    435				      EFX_RX_BUF_ALIGNMENT);
    436	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
    437		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
    438		efx->rx_page_buf_step);
    439	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
    440		efx->rx_bufs_per_page;
    441	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
    442					       efx->rx_bufs_per_page);
    443}
    444
    445/* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly
    446 * @rx_queue:		RX descriptor queue
    447 *
    448 * This will aim to fill the RX descriptor queue up to
    449 * @rx_queue->@max_fill. If there is insufficient atomic
    450 * memory to do so, a slow fill will be scheduled.
    451 *
    452 * The caller must provide serialisation (none is used here). In practise,
    453 * this means this function must run from the NAPI handler, or be called
    454 * when NAPI is disabled.
    455 */
    456void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
    457					bool atomic)
    458{
    459	struct efx_nic *efx = rx_queue->efx;
    460	unsigned int fill_level, batch_size;
    461	int space, rc = 0;
    462
    463	if (!rx_queue->refill_enabled)
    464		return;
    465
    466	/* Calculate current fill level, and exit if we don't need to fill */
    467	fill_level = (rx_queue->added_count - rx_queue->removed_count);
    468	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
    469	if (fill_level >= rx_queue->fast_fill_trigger)
    470		goto out;
    471
    472	/* Record minimum fill level */
    473	if (unlikely(fill_level < rx_queue->min_fill)) {
    474		if (fill_level)
    475			rx_queue->min_fill = fill_level;
    476	}
    477
    478	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
    479	space = rx_queue->max_fill - fill_level;
    480	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
    481
    482	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
    483		   "RX queue %d fast-filling descriptor ring from"
    484		   " level %d to level %d\n",
    485		   efx_rx_queue_index(rx_queue), fill_level,
    486		   rx_queue->max_fill);
    487
    488	do {
    489		rc = efx_init_rx_buffers(rx_queue, atomic);
    490		if (unlikely(rc)) {
    491			/* Ensure that we don't leave the rx queue empty */
    492			efx_schedule_slow_fill(rx_queue);
    493			goto out;
    494		}
    495	} while ((space -= batch_size) >= batch_size);
    496
    497	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
    498		   "RX queue %d fast-filled descriptor ring "
    499		   "to level %d\n", efx_rx_queue_index(rx_queue),
    500		   rx_queue->added_count - rx_queue->removed_count);
    501
    502 out:
    503	if (rx_queue->notified_count != rx_queue->added_count)
    504		efx_nic_notify_rx_desc(rx_queue);
    505}
    506
    507/* Pass a received packet up through GRO.  GRO can handle pages
    508 * regardless of checksum state and skbs with a good checksum.
    509 */
    510void
    511efx_siena_rx_packet_gro(struct efx_channel *channel,
    512			struct efx_rx_buffer *rx_buf,
    513			unsigned int n_frags, u8 *eh, __wsum csum)
    514{
    515	struct napi_struct *napi = &channel->napi_str;
    516	struct efx_nic *efx = channel->efx;
    517	struct sk_buff *skb;
    518
    519	skb = napi_get_frags(napi);
    520	if (unlikely(!skb)) {
    521		struct efx_rx_queue *rx_queue;
    522
    523		rx_queue = efx_channel_get_rx_queue(channel);
    524		efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
    525		return;
    526	}
    527
    528	if (efx->net_dev->features & NETIF_F_RXHASH)
    529		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
    530			     PKT_HASH_TYPE_L3);
    531	if (csum) {
    532		skb->csum = csum;
    533		skb->ip_summed = CHECKSUM_COMPLETE;
    534	} else {
    535		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
    536				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
    537	}
    538	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
    539
    540	for (;;) {
    541		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
    542				   rx_buf->page, rx_buf->page_offset,
    543				   rx_buf->len);
    544		rx_buf->page = NULL;
    545		skb->len += rx_buf->len;
    546		if (skb_shinfo(skb)->nr_frags == n_frags)
    547			break;
    548
    549		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
    550	}
    551
    552	skb->data_len = skb->len;
    553	skb->truesize += n_frags * efx->rx_buffer_truesize;
    554
    555	skb_record_rx_queue(skb, channel->rx_queue.core_index);
    556
    557	napi_gro_frags(napi);
    558}
    559
    560/* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
    561 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
    562 */
    563struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx)
    564{
    565	struct list_head *head = &efx->rss_context.list;
    566	struct efx_rss_context *ctx, *new;
    567	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
    568
    569	WARN_ON(!mutex_is_locked(&efx->rss_lock));
    570
    571	/* Search for first gap in the numbering */
    572	list_for_each_entry(ctx, head, list) {
    573		if (ctx->user_id != id)
    574			break;
    575		id++;
    576		/* Check for wrap.  If this happens, we have nearly 2^32
    577		 * allocated RSS contexts, which seems unlikely.
    578		 */
    579		if (WARN_ON_ONCE(!id))
    580			return NULL;
    581	}
    582
    583	/* Create the new entry */
    584	new = kmalloc(sizeof(*new), GFP_KERNEL);
    585	if (!new)
    586		return NULL;
    587	new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
    588	new->rx_hash_udp_4tuple = false;
    589
    590	/* Insert the new entry into the gap */
    591	new->user_id = id;
    592	list_add_tail(&new->list, &ctx->list);
    593	return new;
    594}
    595
    596struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
    597							 u32 id)
    598{
    599	struct list_head *head = &efx->rss_context.list;
    600	struct efx_rss_context *ctx;
    601
    602	WARN_ON(!mutex_is_locked(&efx->rss_lock));
    603
    604	list_for_each_entry(ctx, head, list)
    605		if (ctx->user_id == id)
    606			return ctx;
    607	return NULL;
    608}
    609
    610void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx)
    611{
    612	list_del(&ctx->list);
    613	kfree(ctx);
    614}
    615
    616void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
    617					  struct efx_rss_context *ctx)
    618{
    619	size_t i;
    620
    621	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
    622		ctx->rx_indir_table[i] =
    623			ethtool_rxfh_indir_default(i, efx->rss_spread);
    624}
    625
    626/**
    627 * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient
    628 * @spec: Specification to test
    629 *
    630 * Return: %true if the specification is a non-drop RX filter that
    631 * matches a local MAC address I/G bit value of 1 or matches a local
    632 * IPv4 or IPv6 address value in the respective multicast address
    633 * range.  Otherwise %false.
    634 */
    635bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec)
    636{
    637	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
    638	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
    639		return false;
    640
    641	if (spec->match_flags &
    642	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
    643	    is_multicast_ether_addr(spec->loc_mac))
    644		return true;
    645
    646	if ((spec->match_flags &
    647	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
    648	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
    649		if (spec->ether_type == htons(ETH_P_IP) &&
    650		    ipv4_is_multicast(spec->loc_host[0]))
    651			return true;
    652		if (spec->ether_type == htons(ETH_P_IPV6) &&
    653		    ((const u8 *)spec->loc_host)[0] == 0xff)
    654			return true;
    655	}
    656
    657	return false;
    658}
    659
    660bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
    661				 const struct efx_filter_spec *right)
    662{
    663	if ((left->match_flags ^ right->match_flags) |
    664	    ((left->flags ^ right->flags) &
    665	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
    666		return false;
    667
    668	return memcmp(&left->outer_vid, &right->outer_vid,
    669		      sizeof(struct efx_filter_spec) -
    670		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
    671}
    672
    673u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec)
    674{
    675	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
    676	return jhash2((const u32 *)&spec->outer_vid,
    677		      (sizeof(struct efx_filter_spec) -
    678		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
    679		      0);
    680}
    681
    682#ifdef CONFIG_RFS_ACCEL
    683bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
    684			      unsigned int filter_idx, bool *force)
    685{
    686	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
    687		/* ARFS is currently updating this entry, leave it */
    688		return false;
    689	}
    690	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
    691		/* ARFS tried and failed to update this, so it's probably out
    692		 * of date.  Remove the filter and the ARFS rule entry.
    693		 */
    694		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
    695		*force = true;
    696		return true;
    697	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
    698		/* ARFS has moved on, so old filter is not needed.  Since we did
    699		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
    700		 * not be removed by efx_siena_rps_hash_del() subsequently.
    701		 */
    702		*force = true;
    703		return true;
    704	}
    705	/* Remove it iff ARFS wants to. */
    706	return true;
    707}
    708
    709static
    710struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
    711				       const struct efx_filter_spec *spec)
    712{
    713	u32 hash = efx_siena_filter_spec_hash(spec);
    714
    715	lockdep_assert_held(&efx->rps_hash_lock);
    716	if (!efx->rps_hash_table)
    717		return NULL;
    718	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
    719}
    720
    721struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
    722					const struct efx_filter_spec *spec)
    723{
    724	struct efx_arfs_rule *rule;
    725	struct hlist_head *head;
    726	struct hlist_node *node;
    727
    728	head = efx_rps_hash_bucket(efx, spec);
    729	if (!head)
    730		return NULL;
    731	hlist_for_each(node, head) {
    732		rule = container_of(node, struct efx_arfs_rule, node);
    733		if (efx_siena_filter_spec_equal(spec, &rule->spec))
    734			return rule;
    735	}
    736	return NULL;
    737}
    738
    739static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
    740					const struct efx_filter_spec *spec,
    741					bool *new)
    742{
    743	struct efx_arfs_rule *rule;
    744	struct hlist_head *head;
    745	struct hlist_node *node;
    746
    747	head = efx_rps_hash_bucket(efx, spec);
    748	if (!head)
    749		return NULL;
    750	hlist_for_each(node, head) {
    751		rule = container_of(node, struct efx_arfs_rule, node);
    752		if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
    753			*new = false;
    754			return rule;
    755		}
    756	}
    757	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
    758	*new = true;
    759	if (rule) {
    760		memcpy(&rule->spec, spec, sizeof(rule->spec));
    761		hlist_add_head(&rule->node, head);
    762	}
    763	return rule;
    764}
    765
    766void efx_siena_rps_hash_del(struct efx_nic *efx,
    767			    const struct efx_filter_spec *spec)
    768{
    769	struct efx_arfs_rule *rule;
    770	struct hlist_head *head;
    771	struct hlist_node *node;
    772
    773	head = efx_rps_hash_bucket(efx, spec);
    774	if (WARN_ON(!head))
    775		return;
    776	hlist_for_each(node, head) {
    777		rule = container_of(node, struct efx_arfs_rule, node);
    778		if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
    779			/* Someone already reused the entry.  We know that if
    780			 * this check doesn't fire (i.e. filter_id == REMOVING)
    781			 * then the REMOVING mark was put there by our caller,
    782			 * because caller is holding a lock on filter table and
    783			 * only holders of that lock set REMOVING.
    784			 */
    785			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
    786				return;
    787			hlist_del(node);
    788			kfree(rule);
    789			return;
    790		}
    791	}
    792	/* We didn't find it. */
    793	WARN_ON(1);
    794}
    795#endif
    796
    797int efx_siena_probe_filters(struct efx_nic *efx)
    798{
    799	int rc;
    800
    801	mutex_lock(&efx->mac_lock);
    802	down_write(&efx->filter_sem);
    803	rc = efx->type->filter_table_probe(efx);
    804	if (rc)
    805		goto out_unlock;
    806
    807#ifdef CONFIG_RFS_ACCEL
    808	if (efx->type->offload_features & NETIF_F_NTUPLE) {
    809		struct efx_channel *channel;
    810		int i, success = 1;
    811
    812		efx_for_each_channel(channel, efx) {
    813			channel->rps_flow_id =
    814				kcalloc(efx->type->max_rx_ip_filters,
    815					sizeof(*channel->rps_flow_id),
    816					GFP_KERNEL);
    817			if (!channel->rps_flow_id)
    818				success = 0;
    819			else
    820				for (i = 0;
    821				     i < efx->type->max_rx_ip_filters;
    822				     ++i)
    823					channel->rps_flow_id[i] =
    824						RPS_FLOW_ID_INVALID;
    825			channel->rfs_expire_index = 0;
    826			channel->rfs_filter_count = 0;
    827		}
    828
    829		if (!success) {
    830			efx_for_each_channel(channel, efx)
    831				kfree(channel->rps_flow_id);
    832			efx->type->filter_table_remove(efx);
    833			rc = -ENOMEM;
    834			goto out_unlock;
    835		}
    836	}
    837#endif
    838out_unlock:
    839	up_write(&efx->filter_sem);
    840	mutex_unlock(&efx->mac_lock);
    841	return rc;
    842}
    843
    844void efx_siena_remove_filters(struct efx_nic *efx)
    845{
    846#ifdef CONFIG_RFS_ACCEL
    847	struct efx_channel *channel;
    848
    849	efx_for_each_channel(channel, efx) {
    850		cancel_delayed_work_sync(&channel->filter_work);
    851		kfree(channel->rps_flow_id);
    852		channel->rps_flow_id = NULL;
    853	}
    854#endif
    855	down_write(&efx->filter_sem);
    856	efx->type->filter_table_remove(efx);
    857	up_write(&efx->filter_sem);
    858}
    859
    860#ifdef CONFIG_RFS_ACCEL
    861
    862static void efx_filter_rfs_work(struct work_struct *data)
    863{
    864	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
    865							      work);
    866	struct efx_nic *efx = netdev_priv(req->net_dev);
    867	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
    868	int slot_idx = req - efx->rps_slot;
    869	struct efx_arfs_rule *rule;
    870	u16 arfs_id = 0;
    871	int rc;
    872
    873	rc = efx->type->filter_insert(efx, &req->spec, true);
    874	if (rc >= 0)
    875		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
    876		rc %= efx->type->max_rx_ip_filters;
    877	if (efx->rps_hash_table) {
    878		spin_lock_bh(&efx->rps_hash_lock);
    879		rule = efx_siena_rps_hash_find(efx, &req->spec);
    880		/* The rule might have already gone, if someone else's request
    881		 * for the same spec was already worked and then expired before
    882		 * we got around to our work.  In that case we have nothing
    883		 * tying us to an arfs_id, meaning that as soon as the filter
    884		 * is considered for expiry it will be removed.
    885		 */
    886		if (rule) {
    887			if (rc < 0)
    888				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
    889			else
    890				rule->filter_id = rc;
    891			arfs_id = rule->arfs_id;
    892		}
    893		spin_unlock_bh(&efx->rps_hash_lock);
    894	}
    895	if (rc >= 0) {
    896		/* Remember this so we can check whether to expire the filter
    897		 * later.
    898		 */
    899		mutex_lock(&efx->rps_mutex);
    900		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
    901			channel->rfs_filter_count++;
    902		channel->rps_flow_id[rc] = req->flow_id;
    903		mutex_unlock(&efx->rps_mutex);
    904
    905		if (req->spec.ether_type == htons(ETH_P_IP))
    906			netif_info(efx, rx_status, efx->net_dev,
    907				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
    908				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
    909				   req->spec.rem_host, ntohs(req->spec.rem_port),
    910				   req->spec.loc_host, ntohs(req->spec.loc_port),
    911				   req->rxq_index, req->flow_id, rc, arfs_id);
    912		else
    913			netif_info(efx, rx_status, efx->net_dev,
    914				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
    915				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
    916				   req->spec.rem_host, ntohs(req->spec.rem_port),
    917				   req->spec.loc_host, ntohs(req->spec.loc_port),
    918				   req->rxq_index, req->flow_id, rc, arfs_id);
    919		channel->n_rfs_succeeded++;
    920	} else {
    921		if (req->spec.ether_type == htons(ETH_P_IP))
    922			netif_dbg(efx, rx_status, efx->net_dev,
    923				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
    924				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
    925				  req->spec.rem_host, ntohs(req->spec.rem_port),
    926				  req->spec.loc_host, ntohs(req->spec.loc_port),
    927				  req->rxq_index, req->flow_id, rc, arfs_id);
    928		else
    929			netif_dbg(efx, rx_status, efx->net_dev,
    930				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
    931				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
    932				  req->spec.rem_host, ntohs(req->spec.rem_port),
    933				  req->spec.loc_host, ntohs(req->spec.loc_port),
    934				  req->rxq_index, req->flow_id, rc, arfs_id);
    935		channel->n_rfs_failed++;
    936		/* We're overloading the NIC's filter tables, so let's do a
    937		 * chunk of extra expiry work.
    938		 */
    939		__efx_siena_filter_rfs_expire(channel,
    940					      min(channel->rfs_filter_count,
    941						  100u));
    942	}
    943
    944	/* Release references */
    945	clear_bit(slot_idx, &efx->rps_slot_map);
    946	dev_put(req->net_dev);
    947}
    948
    949int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
    950			 u16 rxq_index, u32 flow_id)
    951{
    952	struct efx_nic *efx = netdev_priv(net_dev);
    953	struct efx_async_filter_insertion *req;
    954	struct efx_arfs_rule *rule;
    955	struct flow_keys fk;
    956	int slot_idx;
    957	bool new;
    958	int rc;
    959
    960	/* find a free slot */
    961	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
    962		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
    963			break;
    964	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
    965		return -EBUSY;
    966
    967	if (flow_id == RPS_FLOW_ID_INVALID) {
    968		rc = -EINVAL;
    969		goto out_clear;
    970	}
    971
    972	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
    973		rc = -EPROTONOSUPPORT;
    974		goto out_clear;
    975	}
    976
    977	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
    978		rc = -EPROTONOSUPPORT;
    979		goto out_clear;
    980	}
    981	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
    982		rc = -EPROTONOSUPPORT;
    983		goto out_clear;
    984	}
    985
    986	req = efx->rps_slot + slot_idx;
    987	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
    988			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
    989			   rxq_index);
    990	req->spec.match_flags =
    991		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
    992		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
    993		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
    994	req->spec.ether_type = fk.basic.n_proto;
    995	req->spec.ip_proto = fk.basic.ip_proto;
    996
    997	if (fk.basic.n_proto == htons(ETH_P_IP)) {
    998		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
    999		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
   1000	} else {
   1001		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
   1002		       sizeof(struct in6_addr));
   1003		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
   1004		       sizeof(struct in6_addr));
   1005	}
   1006
   1007	req->spec.rem_port = fk.ports.src;
   1008	req->spec.loc_port = fk.ports.dst;
   1009
   1010	if (efx->rps_hash_table) {
   1011		/* Add it to ARFS hash table */
   1012		spin_lock(&efx->rps_hash_lock);
   1013		rule = efx_rps_hash_add(efx, &req->spec, &new);
   1014		if (!rule) {
   1015			rc = -ENOMEM;
   1016			goto out_unlock;
   1017		}
   1018		if (new)
   1019			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
   1020		rc = rule->arfs_id;
   1021		/* Skip if existing or pending filter already does the right thing */
   1022		if (!new && rule->rxq_index == rxq_index &&
   1023		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
   1024			goto out_unlock;
   1025		rule->rxq_index = rxq_index;
   1026		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
   1027		spin_unlock(&efx->rps_hash_lock);
   1028	} else {
   1029		/* Without an ARFS hash table, we just use arfs_id 0 for all
   1030		 * filters.  This means if multiple flows hash to the same
   1031		 * flow_id, all but the most recently touched will be eligible
   1032		 * for expiry.
   1033		 */
   1034		rc = 0;
   1035	}
   1036
   1037	/* Queue the request */
   1038	dev_hold(req->net_dev = net_dev);
   1039	INIT_WORK(&req->work, efx_filter_rfs_work);
   1040	req->rxq_index = rxq_index;
   1041	req->flow_id = flow_id;
   1042	schedule_work(&req->work);
   1043	return rc;
   1044out_unlock:
   1045	spin_unlock(&efx->rps_hash_lock);
   1046out_clear:
   1047	clear_bit(slot_idx, &efx->rps_slot_map);
   1048	return rc;
   1049}
   1050
   1051bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
   1052				   unsigned int quota)
   1053{
   1054	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
   1055	struct efx_nic *efx = channel->efx;
   1056	unsigned int index, size, start;
   1057	u32 flow_id;
   1058
   1059	if (!mutex_trylock(&efx->rps_mutex))
   1060		return false;
   1061	expire_one = efx->type->filter_rfs_expire_one;
   1062	index = channel->rfs_expire_index;
   1063	start = index;
   1064	size = efx->type->max_rx_ip_filters;
   1065	while (quota) {
   1066		flow_id = channel->rps_flow_id[index];
   1067
   1068		if (flow_id != RPS_FLOW_ID_INVALID) {
   1069			quota--;
   1070			if (expire_one(efx, flow_id, index)) {
   1071				netif_info(efx, rx_status, efx->net_dev,
   1072					   "expired filter %d [channel %u flow %u]\n",
   1073					   index, channel->channel, flow_id);
   1074				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
   1075				channel->rfs_filter_count--;
   1076			}
   1077		}
   1078		if (++index == size)
   1079			index = 0;
   1080		/* If we were called with a quota that exceeds the total number
   1081		 * of filters in the table (which shouldn't happen, but could
   1082		 * if two callers race), ensure that we don't loop forever -
   1083		 * stop when we've examined every row of the table.
   1084		 */
   1085		if (index == start)
   1086			break;
   1087	}
   1088
   1089	channel->rfs_expire_index = index;
   1090	mutex_unlock(&efx->rps_mutex);
   1091	return true;
   1092}
   1093
   1094#endif /* CONFIG_RFS_ACCEL */