cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gve_utils.c (2611B)


      1// SPDX-License-Identifier: (GPL-2.0 OR MIT)
      2/* Google virtual Ethernet (gve) driver
      3 *
      4 * Copyright (C) 2015-2021 Google, Inc.
      5 */
      6
      7#include "gve.h"
      8#include "gve_adminq.h"
      9#include "gve_utils.h"
     10
     11void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
     12{
     13	struct gve_notify_block *block =
     14			&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
     15
     16	block->tx = NULL;
     17}
     18
     19void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
     20{
     21	unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
     22					 num_online_cpus());
     23	int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
     24	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
     25	struct gve_tx_ring *tx = &priv->tx[queue_idx];
     26
     27	block->tx = tx;
     28	tx->ntfy_id = ntfy_idx;
     29	netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
     30			    queue_idx);
     31}
     32
     33void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
     34{
     35	struct gve_notify_block *block =
     36			&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
     37
     38	block->rx = NULL;
     39}
     40
     41void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
     42{
     43	u32 ntfy_idx = gve_rx_idx_to_ntfy(priv, queue_idx);
     44	struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
     45	struct gve_rx_ring *rx = &priv->rx[queue_idx];
     46
     47	block->rx = rx;
     48	rx->ntfy_id = ntfy_idx;
     49}
     50
     51struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
     52			    struct gve_rx_slot_page_info *page_info, u16 len,
     53			    u16 padding, struct gve_rx_ctx *ctx)
     54{
     55	void *va = page_info->page_address + padding + page_info->page_offset;
     56	int skb_linear_offset = 0;
     57	bool set_protocol = false;
     58	struct sk_buff *skb;
     59
     60	if (ctx) {
     61		if (!ctx->skb_head)
     62			ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
     63
     64		if (unlikely(!ctx->skb_head))
     65			return NULL;
     66		skb = ctx->skb_head;
     67		skb_linear_offset = skb->len;
     68		set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
     69	} else {
     70		skb = napi_alloc_skb(napi, len);
     71
     72		if (unlikely(!skb))
     73			return NULL;
     74		set_protocol = true;
     75	}
     76	__skb_put(skb, len);
     77	skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
     78
     79	if (set_protocol)
     80		skb->protocol = eth_type_trans(skb, dev);
     81
     82	return skb;
     83}
     84
     85void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info)
     86{
     87	page_info->pagecnt_bias--;
     88	if (page_info->pagecnt_bias == 0) {
     89		int pagecount = page_count(page_info->page);
     90
     91		/* If we have run out of bias - set it back up to INT_MAX
     92		 * minus the existing refs.
     93		 */
     94		page_info->pagecnt_bias = INT_MAX - pagecount;
     95
     96		/* Set pagecount back up to max. */
     97		page_ref_add(page_info->page, INT_MAX - pagecount);
     98	}
     99}