cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

xdp.c (17320B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* net/core/xdp.c
      3 *
      4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
      5 */
      6#include <linux/bpf.h>
      7#include <linux/filter.h>
      8#include <linux/types.h>
      9#include <linux/mm.h>
     10#include <linux/netdevice.h>
     11#include <linux/slab.h>
     12#include <linux/idr.h>
     13#include <linux/rhashtable.h>
     14#include <linux/bug.h>
     15#include <net/page_pool.h>
     16
     17#include <net/xdp.h>
     18#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
     19#include <trace/events/xdp.h>
     20#include <net/xdp_sock_drv.h>
     21
     22#define REG_STATE_NEW		0x0
     23#define REG_STATE_REGISTERED	0x1
     24#define REG_STATE_UNREGISTERED	0x2
     25#define REG_STATE_UNUSED	0x3
     26
     27static DEFINE_IDA(mem_id_pool);
     28static DEFINE_MUTEX(mem_id_lock);
     29#define MEM_ID_MAX 0xFFFE
     30#define MEM_ID_MIN 1
     31static int mem_id_next = MEM_ID_MIN;
     32
     33static bool mem_id_init; /* false */
     34static struct rhashtable *mem_id_ht;
     35
     36static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
     37{
     38	const u32 *k = data;
     39	const u32 key = *k;
     40
     41	BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
     42		     != sizeof(u32));
     43
     44	/* Use cyclic increasing ID as direct hash key */
     45	return key;
     46}
     47
     48static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
     49			  const void *ptr)
     50{
     51	const struct xdp_mem_allocator *xa = ptr;
     52	u32 mem_id = *(u32 *)arg->key;
     53
     54	return xa->mem.id != mem_id;
     55}
     56
     57static const struct rhashtable_params mem_id_rht_params = {
     58	.nelem_hint = 64,
     59	.head_offset = offsetof(struct xdp_mem_allocator, node),
     60	.key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
     61	.key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
     62	.max_size = MEM_ID_MAX,
     63	.min_size = 8,
     64	.automatic_shrinking = true,
     65	.hashfn    = xdp_mem_id_hashfn,
     66	.obj_cmpfn = xdp_mem_id_cmp,
     67};
     68
     69static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
     70{
     71	struct xdp_mem_allocator *xa;
     72
     73	xa = container_of(rcu, struct xdp_mem_allocator, rcu);
     74
     75	/* Allow this ID to be reused */
     76	ida_simple_remove(&mem_id_pool, xa->mem.id);
     77
     78	kfree(xa);
     79}
     80
     81static void mem_xa_remove(struct xdp_mem_allocator *xa)
     82{
     83	trace_mem_disconnect(xa);
     84
     85	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
     86		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
     87}
     88
     89static void mem_allocator_disconnect(void *allocator)
     90{
     91	struct xdp_mem_allocator *xa;
     92	struct rhashtable_iter iter;
     93
     94	mutex_lock(&mem_id_lock);
     95
     96	rhashtable_walk_enter(mem_id_ht, &iter);
     97	do {
     98		rhashtable_walk_start(&iter);
     99
    100		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
    101			if (xa->allocator == allocator)
    102				mem_xa_remove(xa);
    103		}
    104
    105		rhashtable_walk_stop(&iter);
    106
    107	} while (xa == ERR_PTR(-EAGAIN));
    108	rhashtable_walk_exit(&iter);
    109
    110	mutex_unlock(&mem_id_lock);
    111}
    112
    113void xdp_unreg_mem_model(struct xdp_mem_info *mem)
    114{
    115	struct xdp_mem_allocator *xa;
    116	int type = mem->type;
    117	int id = mem->id;
    118
    119	/* Reset mem info to defaults */
    120	mem->id = 0;
    121	mem->type = 0;
    122
    123	if (id == 0)
    124		return;
    125
    126	if (type == MEM_TYPE_PAGE_POOL) {
    127		rcu_read_lock();
    128		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
    129		page_pool_destroy(xa->page_pool);
    130		rcu_read_unlock();
    131	}
    132}
    133EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
    134
    135void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
    136{
    137	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
    138		WARN(1, "Missing register, driver bug");
    139		return;
    140	}
    141
    142	xdp_unreg_mem_model(&xdp_rxq->mem);
    143}
    144EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
    145
    146void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
    147{
    148	/* Simplify driver cleanup code paths, allow unreg "unused" */
    149	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
    150		return;
    151
    152	xdp_rxq_info_unreg_mem_model(xdp_rxq);
    153
    154	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
    155	xdp_rxq->dev = NULL;
    156}
    157EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
    158
    159static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
    160{
    161	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
    162}
    163
    164/* Returns 0 on success, negative on failure */
    165int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
    166		       struct net_device *dev, u32 queue_index,
    167		       unsigned int napi_id, u32 frag_size)
    168{
    169	if (!dev) {
    170		WARN(1, "Missing net_device from driver");
    171		return -ENODEV;
    172	}
    173
    174	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
    175		WARN(1, "Driver promised not to register this");
    176		return -EINVAL;
    177	}
    178
    179	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
    180		WARN(1, "Missing unregister, handled but fix driver");
    181		xdp_rxq_info_unreg(xdp_rxq);
    182	}
    183
    184	/* State either UNREGISTERED or NEW */
    185	xdp_rxq_info_init(xdp_rxq);
    186	xdp_rxq->dev = dev;
    187	xdp_rxq->queue_index = queue_index;
    188	xdp_rxq->napi_id = napi_id;
    189	xdp_rxq->frag_size = frag_size;
    190
    191	xdp_rxq->reg_state = REG_STATE_REGISTERED;
    192	return 0;
    193}
    194EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg);
    195
    196void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
    197{
    198	xdp_rxq->reg_state = REG_STATE_UNUSED;
    199}
    200EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
    201
    202bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
    203{
    204	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
    205}
    206EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
    207
    208static int __mem_id_init_hash_table(void)
    209{
    210	struct rhashtable *rht;
    211	int ret;
    212
    213	if (unlikely(mem_id_init))
    214		return 0;
    215
    216	rht = kzalloc(sizeof(*rht), GFP_KERNEL);
    217	if (!rht)
    218		return -ENOMEM;
    219
    220	ret = rhashtable_init(rht, &mem_id_rht_params);
    221	if (ret < 0) {
    222		kfree(rht);
    223		return ret;
    224	}
    225	mem_id_ht = rht;
    226	smp_mb(); /* mutex lock should provide enough pairing */
    227	mem_id_init = true;
    228
    229	return 0;
    230}
    231
    232/* Allocate a cyclic ID that maps to allocator pointer.
    233 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
    234 *
    235 * Caller must lock mem_id_lock.
    236 */
    237static int __mem_id_cyclic_get(gfp_t gfp)
    238{
    239	int retries = 1;
    240	int id;
    241
    242again:
    243	id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
    244	if (id < 0) {
    245		if (id == -ENOSPC) {
    246			/* Cyclic allocator, reset next id */
    247			if (retries--) {
    248				mem_id_next = MEM_ID_MIN;
    249				goto again;
    250			}
    251		}
    252		return id; /* errno */
    253	}
    254	mem_id_next = id + 1;
    255
    256	return id;
    257}
    258
    259static bool __is_supported_mem_type(enum xdp_mem_type type)
    260{
    261	if (type == MEM_TYPE_PAGE_POOL)
    262		return is_page_pool_compiled_in();
    263
    264	if (type >= MEM_TYPE_MAX)
    265		return false;
    266
    267	return true;
    268}
    269
    270static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
    271						     enum xdp_mem_type type,
    272						     void *allocator)
    273{
    274	struct xdp_mem_allocator *xdp_alloc;
    275	gfp_t gfp = GFP_KERNEL;
    276	int id, errno, ret;
    277	void *ptr;
    278
    279	if (!__is_supported_mem_type(type))
    280		return ERR_PTR(-EOPNOTSUPP);
    281
    282	mem->type = type;
    283
    284	if (!allocator) {
    285		if (type == MEM_TYPE_PAGE_POOL)
    286			return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
    287		return NULL;
    288	}
    289
    290	/* Delay init of rhashtable to save memory if feature isn't used */
    291	if (!mem_id_init) {
    292		mutex_lock(&mem_id_lock);
    293		ret = __mem_id_init_hash_table();
    294		mutex_unlock(&mem_id_lock);
    295		if (ret < 0) {
    296			WARN_ON(1);
    297			return ERR_PTR(ret);
    298		}
    299	}
    300
    301	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
    302	if (!xdp_alloc)
    303		return ERR_PTR(-ENOMEM);
    304
    305	mutex_lock(&mem_id_lock);
    306	id = __mem_id_cyclic_get(gfp);
    307	if (id < 0) {
    308		errno = id;
    309		goto err;
    310	}
    311	mem->id = id;
    312	xdp_alloc->mem = *mem;
    313	xdp_alloc->allocator = allocator;
    314
    315	/* Insert allocator into ID lookup table */
    316	ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
    317	if (IS_ERR(ptr)) {
    318		ida_simple_remove(&mem_id_pool, mem->id);
    319		mem->id = 0;
    320		errno = PTR_ERR(ptr);
    321		goto err;
    322	}
    323
    324	if (type == MEM_TYPE_PAGE_POOL)
    325		page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
    326
    327	mutex_unlock(&mem_id_lock);
    328
    329	return xdp_alloc;
    330err:
    331	mutex_unlock(&mem_id_lock);
    332	kfree(xdp_alloc);
    333	return ERR_PTR(errno);
    334}
    335
    336int xdp_reg_mem_model(struct xdp_mem_info *mem,
    337		      enum xdp_mem_type type, void *allocator)
    338{
    339	struct xdp_mem_allocator *xdp_alloc;
    340
    341	xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
    342	if (IS_ERR(xdp_alloc))
    343		return PTR_ERR(xdp_alloc);
    344	return 0;
    345}
    346EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
    347
    348int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
    349			       enum xdp_mem_type type, void *allocator)
    350{
    351	struct xdp_mem_allocator *xdp_alloc;
    352
    353	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
    354		WARN(1, "Missing register, driver bug");
    355		return -EFAULT;
    356	}
    357
    358	xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
    359	if (IS_ERR(xdp_alloc))
    360		return PTR_ERR(xdp_alloc);
    361
    362	if (trace_mem_connect_enabled() && xdp_alloc)
    363		trace_mem_connect(xdp_alloc, xdp_rxq);
    364	return 0;
    365}
    366
    367EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
    368
    369/* XDP RX runs under NAPI protection, and in different delivery error
    370 * scenarios (e.g. queue full), it is possible to return the xdp_frame
    371 * while still leveraging this protection.  The @napi_direct boolean
    372 * is used for those calls sites.  Thus, allowing for faster recycling
    373 * of xdp_frames/pages in those cases.
    374 */
    375void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
    376		  struct xdp_buff *xdp)
    377{
    378	struct xdp_mem_allocator *xa;
    379	struct page *page;
    380
    381	switch (mem->type) {
    382	case MEM_TYPE_PAGE_POOL:
    383		rcu_read_lock();
    384		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
    385		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
    386		page = virt_to_head_page(data);
    387		if (napi_direct && xdp_return_frame_no_direct())
    388			napi_direct = false;
    389		page_pool_put_full_page(xa->page_pool, page, napi_direct);
    390		rcu_read_unlock();
    391		break;
    392	case MEM_TYPE_PAGE_SHARED:
    393		page_frag_free(data);
    394		break;
    395	case MEM_TYPE_PAGE_ORDER0:
    396		page = virt_to_page(data); /* Assumes order0 page*/
    397		put_page(page);
    398		break;
    399	case MEM_TYPE_XSK_BUFF_POOL:
    400		/* NB! Only valid from an xdp_buff! */
    401		xsk_buff_free(xdp);
    402		break;
    403	default:
    404		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
    405		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
    406		break;
    407	}
    408}
    409
    410void xdp_return_frame(struct xdp_frame *xdpf)
    411{
    412	struct skb_shared_info *sinfo;
    413	int i;
    414
    415	if (likely(!xdp_frame_has_frags(xdpf)))
    416		goto out;
    417
    418	sinfo = xdp_get_shared_info_from_frame(xdpf);
    419	for (i = 0; i < sinfo->nr_frags; i++) {
    420		struct page *page = skb_frag_page(&sinfo->frags[i]);
    421
    422		__xdp_return(page_address(page), &xdpf->mem, false, NULL);
    423	}
    424out:
    425	__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
    426}
    427EXPORT_SYMBOL_GPL(xdp_return_frame);
    428
    429void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
    430{
    431	struct skb_shared_info *sinfo;
    432	int i;
    433
    434	if (likely(!xdp_frame_has_frags(xdpf)))
    435		goto out;
    436
    437	sinfo = xdp_get_shared_info_from_frame(xdpf);
    438	for (i = 0; i < sinfo->nr_frags; i++) {
    439		struct page *page = skb_frag_page(&sinfo->frags[i]);
    440
    441		__xdp_return(page_address(page), &xdpf->mem, true, NULL);
    442	}
    443out:
    444	__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
    445}
    446EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
    447
    448/* XDP bulk APIs introduce a defer/flush mechanism to return
    449 * pages belonging to the same xdp_mem_allocator object
    450 * (identified via the mem.id field) in bulk to optimize
    451 * I-cache and D-cache.
    452 * The bulk queue size is set to 16 to be aligned to how
    453 * XDP_REDIRECT bulking works. The bulk is flushed when
    454 * it is full or when mem.id changes.
    455 * xdp_frame_bulk is usually stored/allocated on the function
    456 * call-stack to avoid locking penalties.
    457 */
    458void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
    459{
    460	struct xdp_mem_allocator *xa = bq->xa;
    461
    462	if (unlikely(!xa || !bq->count))
    463		return;
    464
    465	page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
    466	/* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
    467	bq->count = 0;
    468}
    469EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
    470
    471/* Must be called with rcu_read_lock held */
    472void xdp_return_frame_bulk(struct xdp_frame *xdpf,
    473			   struct xdp_frame_bulk *bq)
    474{
    475	struct xdp_mem_info *mem = &xdpf->mem;
    476	struct xdp_mem_allocator *xa;
    477
    478	if (mem->type != MEM_TYPE_PAGE_POOL) {
    479		xdp_return_frame(xdpf);
    480		return;
    481	}
    482
    483	xa = bq->xa;
    484	if (unlikely(!xa)) {
    485		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
    486		bq->count = 0;
    487		bq->xa = xa;
    488	}
    489
    490	if (bq->count == XDP_BULK_QUEUE_SIZE)
    491		xdp_flush_frame_bulk(bq);
    492
    493	if (unlikely(mem->id != xa->mem.id)) {
    494		xdp_flush_frame_bulk(bq);
    495		bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
    496	}
    497
    498	if (unlikely(xdp_frame_has_frags(xdpf))) {
    499		struct skb_shared_info *sinfo;
    500		int i;
    501
    502		sinfo = xdp_get_shared_info_from_frame(xdpf);
    503		for (i = 0; i < sinfo->nr_frags; i++) {
    504			skb_frag_t *frag = &sinfo->frags[i];
    505
    506			bq->q[bq->count++] = skb_frag_address(frag);
    507			if (bq->count == XDP_BULK_QUEUE_SIZE)
    508				xdp_flush_frame_bulk(bq);
    509		}
    510	}
    511	bq->q[bq->count++] = xdpf->data;
    512}
    513EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
    514
    515void xdp_return_buff(struct xdp_buff *xdp)
    516{
    517	struct skb_shared_info *sinfo;
    518	int i;
    519
    520	if (likely(!xdp_buff_has_frags(xdp)))
    521		goto out;
    522
    523	sinfo = xdp_get_shared_info_from_buff(xdp);
    524	for (i = 0; i < sinfo->nr_frags; i++) {
    525		struct page *page = skb_frag_page(&sinfo->frags[i]);
    526
    527		__xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
    528	}
    529out:
    530	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
    531}
    532EXPORT_SYMBOL_GPL(xdp_return_buff);
    533
    534/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
    535void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
    536{
    537	struct xdp_mem_allocator *xa;
    538	struct page *page;
    539
    540	rcu_read_lock();
    541	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
    542	page = virt_to_head_page(data);
    543	if (xa)
    544		page_pool_release_page(xa->page_pool, page);
    545	rcu_read_unlock();
    546}
    547EXPORT_SYMBOL_GPL(__xdp_release_frame);
    548
    549void xdp_attachment_setup(struct xdp_attachment_info *info,
    550			  struct netdev_bpf *bpf)
    551{
    552	if (info->prog)
    553		bpf_prog_put(info->prog);
    554	info->prog = bpf->prog;
    555	info->flags = bpf->flags;
    556}
    557EXPORT_SYMBOL_GPL(xdp_attachment_setup);
    558
    559struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
    560{
    561	unsigned int metasize, totsize;
    562	void *addr, *data_to_copy;
    563	struct xdp_frame *xdpf;
    564	struct page *page;
    565
    566	/* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
    567	metasize = xdp_data_meta_unsupported(xdp) ? 0 :
    568		   xdp->data - xdp->data_meta;
    569	totsize = xdp->data_end - xdp->data + metasize;
    570
    571	if (sizeof(*xdpf) + totsize > PAGE_SIZE)
    572		return NULL;
    573
    574	page = dev_alloc_page();
    575	if (!page)
    576		return NULL;
    577
    578	addr = page_to_virt(page);
    579	xdpf = addr;
    580	memset(xdpf, 0, sizeof(*xdpf));
    581
    582	addr += sizeof(*xdpf);
    583	data_to_copy = metasize ? xdp->data_meta : xdp->data;
    584	memcpy(addr, data_to_copy, totsize);
    585
    586	xdpf->data = addr + metasize;
    587	xdpf->len = totsize - metasize;
    588	xdpf->headroom = 0;
    589	xdpf->metasize = metasize;
    590	xdpf->frame_sz = PAGE_SIZE;
    591	xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
    592
    593	xsk_buff_free(xdp);
    594	return xdpf;
    595}
    596EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
    597
    598/* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
    599void xdp_warn(const char *msg, const char *func, const int line)
    600{
    601	WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
    602};
    603EXPORT_SYMBOL_GPL(xdp_warn);
    604
    605int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
    606{
    607	n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp,
    608				      n_skb, skbs);
    609	if (unlikely(!n_skb))
    610		return -ENOMEM;
    611
    612	return 0;
    613}
    614EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
    615
    616struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
    617					   struct sk_buff *skb,
    618					   struct net_device *dev)
    619{
    620	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
    621	unsigned int headroom, frame_size;
    622	void *hard_start;
    623	u8 nr_frags;
    624
    625	/* xdp frags frame */
    626	if (unlikely(xdp_frame_has_frags(xdpf)))
    627		nr_frags = sinfo->nr_frags;
    628
    629	/* Part of headroom was reserved to xdpf */
    630	headroom = sizeof(*xdpf) + xdpf->headroom;
    631
    632	/* Memory size backing xdp_frame data already have reserved
    633	 * room for build_skb to place skb_shared_info in tailroom.
    634	 */
    635	frame_size = xdpf->frame_sz;
    636
    637	hard_start = xdpf->data - headroom;
    638	skb = build_skb_around(skb, hard_start, frame_size);
    639	if (unlikely(!skb))
    640		return NULL;
    641
    642	skb_reserve(skb, headroom);
    643	__skb_put(skb, xdpf->len);
    644	if (xdpf->metasize)
    645		skb_metadata_set(skb, xdpf->metasize);
    646
    647	if (unlikely(xdp_frame_has_frags(xdpf)))
    648		xdp_update_skb_shared_info(skb, nr_frags,
    649					   sinfo->xdp_frags_size,
    650					   nr_frags * xdpf->frame_sz,
    651					   xdp_frame_is_frag_pfmemalloc(xdpf));
    652
    653	/* Essential SKB info: protocol and skb->dev */
    654	skb->protocol = eth_type_trans(skb, dev);
    655
    656	/* Optional SKB info, currently missing:
    657	 * - HW checksum info		(skb->ip_summed)
    658	 * - HW RX hash			(skb_set_hash)
    659	 * - RX ring dev queue index	(skb_record_rx_queue)
    660	 */
    661
    662	/* Until page_pool get SKB return path, release DMA here */
    663	xdp_release_frame(xdpf);
    664
    665	/* Allow SKB to reuse area used by xdp_frame */
    666	xdp_scrub_frame(xdpf);
    667
    668	return skb;
    669}
    670EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
    671
    672struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
    673					 struct net_device *dev)
    674{
    675	struct sk_buff *skb;
    676
    677	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
    678	if (unlikely(!skb))
    679		return NULL;
    680
    681	memset(skb, 0, offsetof(struct sk_buff, tail));
    682
    683	return __xdp_build_skb_from_frame(xdpf, skb, dev);
    684}
    685EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
    686
    687struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
    688{
    689	unsigned int headroom, totalsize;
    690	struct xdp_frame *nxdpf;
    691	struct page *page;
    692	void *addr;
    693
    694	headroom = xdpf->headroom + sizeof(*xdpf);
    695	totalsize = headroom + xdpf->len;
    696
    697	if (unlikely(totalsize > PAGE_SIZE))
    698		return NULL;
    699	page = dev_alloc_page();
    700	if (!page)
    701		return NULL;
    702	addr = page_to_virt(page);
    703
    704	memcpy(addr, xdpf, totalsize);
    705
    706	nxdpf = addr;
    707	nxdpf->data = addr + headroom;
    708	nxdpf->frame_sz = PAGE_SIZE;
    709	nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
    710	nxdpf->mem.id = 0;
    711
    712	return nxdpf;
    713}