cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

page_pool.c (24232B)


      1/* SPDX-License-Identifier: GPL-2.0
      2 *
      3 * page_pool.c
      4 *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
      5 *	Copyright (C) 2016 Red Hat, Inc.
      6 */
      7
      8#include <linux/types.h>
      9#include <linux/kernel.h>
     10#include <linux/slab.h>
     11#include <linux/device.h>
     12
     13#include <net/page_pool.h>
     14#include <net/xdp.h>
     15
     16#include <linux/dma-direction.h>
     17#include <linux/dma-mapping.h>
     18#include <linux/page-flags.h>
     19#include <linux/mm.h> /* for __put_page() */
     20#include <linux/poison.h>
     21#include <linux/ethtool.h>
     22
     23#include <trace/events/page_pool.h>
     24
     25#define DEFER_TIME (msecs_to_jiffies(1000))
     26#define DEFER_WARN_INTERVAL (60 * HZ)
     27
     28#define BIAS_MAX	LONG_MAX
     29
     30#ifdef CONFIG_PAGE_POOL_STATS
     31/* alloc_stat_inc is intended to be used in softirq context */
     32#define alloc_stat_inc(pool, __stat)	(pool->alloc_stats.__stat++)
     33/* recycle_stat_inc is safe to use when preemption is possible. */
     34#define recycle_stat_inc(pool, __stat)							\
     35	do {										\
     36		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
     37		this_cpu_inc(s->__stat);						\
     38	} while (0)
     39
     40#define recycle_stat_add(pool, __stat, val)						\
     41	do {										\
     42		struct page_pool_recycle_stats __percpu *s = pool->recycle_stats;	\
     43		this_cpu_add(s->__stat, val);						\
     44	} while (0)
     45
     46static const char pp_stats[][ETH_GSTRING_LEN] = {
     47	"rx_pp_alloc_fast",
     48	"rx_pp_alloc_slow",
     49	"rx_pp_alloc_slow_ho",
     50	"rx_pp_alloc_empty",
     51	"rx_pp_alloc_refill",
     52	"rx_pp_alloc_waive",
     53	"rx_pp_recycle_cached",
     54	"rx_pp_recycle_cache_full",
     55	"rx_pp_recycle_ring",
     56	"rx_pp_recycle_ring_full",
     57	"rx_pp_recycle_released_ref",
     58};
     59
     60bool page_pool_get_stats(struct page_pool *pool,
     61			 struct page_pool_stats *stats)
     62{
     63	int cpu = 0;
     64
     65	if (!stats)
     66		return false;
     67
     68	/* The caller is responsible to initialize stats. */
     69	stats->alloc_stats.fast += pool->alloc_stats.fast;
     70	stats->alloc_stats.slow += pool->alloc_stats.slow;
     71	stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
     72	stats->alloc_stats.empty += pool->alloc_stats.empty;
     73	stats->alloc_stats.refill += pool->alloc_stats.refill;
     74	stats->alloc_stats.waive += pool->alloc_stats.waive;
     75
     76	for_each_possible_cpu(cpu) {
     77		const struct page_pool_recycle_stats *pcpu =
     78			per_cpu_ptr(pool->recycle_stats, cpu);
     79
     80		stats->recycle_stats.cached += pcpu->cached;
     81		stats->recycle_stats.cache_full += pcpu->cache_full;
     82		stats->recycle_stats.ring += pcpu->ring;
     83		stats->recycle_stats.ring_full += pcpu->ring_full;
     84		stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
     85	}
     86
     87	return true;
     88}
     89EXPORT_SYMBOL(page_pool_get_stats);
     90
     91u8 *page_pool_ethtool_stats_get_strings(u8 *data)
     92{
     93	int i;
     94
     95	for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
     96		memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
     97		data += ETH_GSTRING_LEN;
     98	}
     99
    100	return data;
    101}
    102EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
    103
    104int page_pool_ethtool_stats_get_count(void)
    105{
    106	return ARRAY_SIZE(pp_stats);
    107}
    108EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
    109
    110u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
    111{
    112	struct page_pool_stats *pool_stats = stats;
    113
    114	*data++ = pool_stats->alloc_stats.fast;
    115	*data++ = pool_stats->alloc_stats.slow;
    116	*data++ = pool_stats->alloc_stats.slow_high_order;
    117	*data++ = pool_stats->alloc_stats.empty;
    118	*data++ = pool_stats->alloc_stats.refill;
    119	*data++ = pool_stats->alloc_stats.waive;
    120	*data++ = pool_stats->recycle_stats.cached;
    121	*data++ = pool_stats->recycle_stats.cache_full;
    122	*data++ = pool_stats->recycle_stats.ring;
    123	*data++ = pool_stats->recycle_stats.ring_full;
    124	*data++ = pool_stats->recycle_stats.released_refcnt;
    125
    126	return data;
    127}
    128EXPORT_SYMBOL(page_pool_ethtool_stats_get);
    129
    130#else
    131#define alloc_stat_inc(pool, __stat)
    132#define recycle_stat_inc(pool, __stat)
    133#define recycle_stat_add(pool, __stat, val)
    134#endif
    135
    136static int page_pool_init(struct page_pool *pool,
    137			  const struct page_pool_params *params)
    138{
    139	unsigned int ring_qsize = 1024; /* Default */
    140
    141	memcpy(&pool->p, params, sizeof(pool->p));
    142
    143	/* Validate only known flags were used */
    144	if (pool->p.flags & ~(PP_FLAG_ALL))
    145		return -EINVAL;
    146
    147	if (pool->p.pool_size)
    148		ring_qsize = pool->p.pool_size;
    149
    150	/* Sanity limit mem that can be pinned down */
    151	if (ring_qsize > 32768)
    152		return -E2BIG;
    153
    154	/* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
    155	 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
    156	 * which is the XDP_TX use-case.
    157	 */
    158	if (pool->p.flags & PP_FLAG_DMA_MAP) {
    159		if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
    160		    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
    161			return -EINVAL;
    162	}
    163
    164	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
    165		/* In order to request DMA-sync-for-device the page
    166		 * needs to be mapped
    167		 */
    168		if (!(pool->p.flags & PP_FLAG_DMA_MAP))
    169			return -EINVAL;
    170
    171		if (!pool->p.max_len)
    172			return -EINVAL;
    173
    174		/* pool->p.offset has to be set according to the address
    175		 * offset used by the DMA engine to start copying rx data
    176		 */
    177	}
    178
    179	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
    180	    pool->p.flags & PP_FLAG_PAGE_FRAG)
    181		return -EINVAL;
    182
    183#ifdef CONFIG_PAGE_POOL_STATS
    184	pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
    185	if (!pool->recycle_stats)
    186		return -ENOMEM;
    187#endif
    188
    189	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
    190		return -ENOMEM;
    191
    192	atomic_set(&pool->pages_state_release_cnt, 0);
    193
    194	/* Driver calling page_pool_create() also call page_pool_destroy() */
    195	refcount_set(&pool->user_cnt, 1);
    196
    197	if (pool->p.flags & PP_FLAG_DMA_MAP)
    198		get_device(pool->p.dev);
    199
    200	return 0;
    201}
    202
    203struct page_pool *page_pool_create(const struct page_pool_params *params)
    204{
    205	struct page_pool *pool;
    206	int err;
    207
    208	pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
    209	if (!pool)
    210		return ERR_PTR(-ENOMEM);
    211
    212	err = page_pool_init(pool, params);
    213	if (err < 0) {
    214		pr_warn("%s() gave up with errno %d\n", __func__, err);
    215		kfree(pool);
    216		return ERR_PTR(err);
    217	}
    218
    219	return pool;
    220}
    221EXPORT_SYMBOL(page_pool_create);
    222
    223static void page_pool_return_page(struct page_pool *pool, struct page *page);
    224
    225noinline
    226static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
    227{
    228	struct ptr_ring *r = &pool->ring;
    229	struct page *page;
    230	int pref_nid; /* preferred NUMA node */
    231
    232	/* Quicker fallback, avoid locks when ring is empty */
    233	if (__ptr_ring_empty(r)) {
    234		alloc_stat_inc(pool, empty);
    235		return NULL;
    236	}
    237
    238	/* Softirq guarantee CPU and thus NUMA node is stable. This,
    239	 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
    240	 */
    241#ifdef CONFIG_NUMA
    242	pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
    243#else
    244	/* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
    245	pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
    246#endif
    247
    248	/* Refill alloc array, but only if NUMA match */
    249	do {
    250		page = __ptr_ring_consume(r);
    251		if (unlikely(!page))
    252			break;
    253
    254		if (likely(page_to_nid(page) == pref_nid)) {
    255			pool->alloc.cache[pool->alloc.count++] = page;
    256		} else {
    257			/* NUMA mismatch;
    258			 * (1) release 1 page to page-allocator and
    259			 * (2) break out to fallthrough to alloc_pages_node.
    260			 * This limit stress on page buddy alloactor.
    261			 */
    262			page_pool_return_page(pool, page);
    263			alloc_stat_inc(pool, waive);
    264			page = NULL;
    265			break;
    266		}
    267	} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
    268
    269	/* Return last page */
    270	if (likely(pool->alloc.count > 0)) {
    271		page = pool->alloc.cache[--pool->alloc.count];
    272		alloc_stat_inc(pool, refill);
    273	}
    274
    275	return page;
    276}
    277
    278/* fast path */
    279static struct page *__page_pool_get_cached(struct page_pool *pool)
    280{
    281	struct page *page;
    282
    283	/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
    284	if (likely(pool->alloc.count)) {
    285		/* Fast-path */
    286		page = pool->alloc.cache[--pool->alloc.count];
    287		alloc_stat_inc(pool, fast);
    288	} else {
    289		page = page_pool_refill_alloc_cache(pool);
    290	}
    291
    292	return page;
    293}
    294
    295static void page_pool_dma_sync_for_device(struct page_pool *pool,
    296					  struct page *page,
    297					  unsigned int dma_sync_size)
    298{
    299	dma_addr_t dma_addr = page_pool_get_dma_addr(page);
    300
    301	dma_sync_size = min(dma_sync_size, pool->p.max_len);
    302	dma_sync_single_range_for_device(pool->p.dev, dma_addr,
    303					 pool->p.offset, dma_sync_size,
    304					 pool->p.dma_dir);
    305}
    306
    307static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
    308{
    309	dma_addr_t dma;
    310
    311	/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
    312	 * since dma_addr_t can be either 32 or 64 bits and does not always fit
    313	 * into page private data (i.e 32bit cpu with 64bit DMA caps)
    314	 * This mapping is kept for lifetime of page, until leaving pool.
    315	 */
    316	dma = dma_map_page_attrs(pool->p.dev, page, 0,
    317				 (PAGE_SIZE << pool->p.order),
    318				 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
    319	if (dma_mapping_error(pool->p.dev, dma))
    320		return false;
    321
    322	page_pool_set_dma_addr(page, dma);
    323
    324	if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
    325		page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
    326
    327	return true;
    328}
    329
    330static void page_pool_set_pp_info(struct page_pool *pool,
    331				  struct page *page)
    332{
    333	page->pp = pool;
    334	page->pp_magic |= PP_SIGNATURE;
    335	if (pool->p.init_callback)
    336		pool->p.init_callback(page, pool->p.init_arg);
    337}
    338
    339static void page_pool_clear_pp_info(struct page *page)
    340{
    341	page->pp_magic = 0;
    342	page->pp = NULL;
    343}
    344
    345static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
    346						 gfp_t gfp)
    347{
    348	struct page *page;
    349
    350	gfp |= __GFP_COMP;
    351	page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
    352	if (unlikely(!page))
    353		return NULL;
    354
    355	if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
    356	    unlikely(!page_pool_dma_map(pool, page))) {
    357		put_page(page);
    358		return NULL;
    359	}
    360
    361	alloc_stat_inc(pool, slow_high_order);
    362	page_pool_set_pp_info(pool, page);
    363
    364	/* Track how many pages are held 'in-flight' */
    365	pool->pages_state_hold_cnt++;
    366	trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
    367	return page;
    368}
    369
    370/* slow path */
    371noinline
    372static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
    373						 gfp_t gfp)
    374{
    375	const int bulk = PP_ALLOC_CACHE_REFILL;
    376	unsigned int pp_flags = pool->p.flags;
    377	unsigned int pp_order = pool->p.order;
    378	struct page *page;
    379	int i, nr_pages;
    380
    381	/* Don't support bulk alloc for high-order pages */
    382	if (unlikely(pp_order))
    383		return __page_pool_alloc_page_order(pool, gfp);
    384
    385	/* Unnecessary as alloc cache is empty, but guarantees zero count */
    386	if (unlikely(pool->alloc.count > 0))
    387		return pool->alloc.cache[--pool->alloc.count];
    388
    389	/* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
    390	memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
    391
    392	nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
    393	if (unlikely(!nr_pages))
    394		return NULL;
    395
    396	/* Pages have been filled into alloc.cache array, but count is zero and
    397	 * page element have not been (possibly) DMA mapped.
    398	 */
    399	for (i = 0; i < nr_pages; i++) {
    400		page = pool->alloc.cache[i];
    401		if ((pp_flags & PP_FLAG_DMA_MAP) &&
    402		    unlikely(!page_pool_dma_map(pool, page))) {
    403			put_page(page);
    404			continue;
    405		}
    406
    407		page_pool_set_pp_info(pool, page);
    408		pool->alloc.cache[pool->alloc.count++] = page;
    409		/* Track how many pages are held 'in-flight' */
    410		pool->pages_state_hold_cnt++;
    411		trace_page_pool_state_hold(pool, page,
    412					   pool->pages_state_hold_cnt);
    413	}
    414
    415	/* Return last page */
    416	if (likely(pool->alloc.count > 0)) {
    417		page = pool->alloc.cache[--pool->alloc.count];
    418		alloc_stat_inc(pool, slow);
    419	} else {
    420		page = NULL;
    421	}
    422
    423	/* When page just alloc'ed is should/must have refcnt 1. */
    424	return page;
    425}
    426
    427/* For using page_pool replace: alloc_pages() API calls, but provide
    428 * synchronization guarantee for allocation side.
    429 */
    430struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
    431{
    432	struct page *page;
    433
    434	/* Fast-path: Get a page from cache */
    435	page = __page_pool_get_cached(pool);
    436	if (page)
    437		return page;
    438
    439	/* Slow-path: cache empty, do real allocation */
    440	page = __page_pool_alloc_pages_slow(pool, gfp);
    441	return page;
    442}
    443EXPORT_SYMBOL(page_pool_alloc_pages);
    444
    445/* Calculate distance between two u32 values, valid if distance is below 2^(31)
    446 *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
    447 */
    448#define _distance(a, b)	(s32)((a) - (b))
    449
    450static s32 page_pool_inflight(struct page_pool *pool)
    451{
    452	u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
    453	u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
    454	s32 inflight;
    455
    456	inflight = _distance(hold_cnt, release_cnt);
    457
    458	trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
    459	WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
    460
    461	return inflight;
    462}
    463
    464/* Disconnects a page (from a page_pool).  API users can have a need
    465 * to disconnect a page (from a page_pool), to allow it to be used as
    466 * a regular page (that will eventually be returned to the normal
    467 * page-allocator via put_page).
    468 */
    469void page_pool_release_page(struct page_pool *pool, struct page *page)
    470{
    471	dma_addr_t dma;
    472	int count;
    473
    474	if (!(pool->p.flags & PP_FLAG_DMA_MAP))
    475		/* Always account for inflight pages, even if we didn't
    476		 * map them
    477		 */
    478		goto skip_dma_unmap;
    479
    480	dma = page_pool_get_dma_addr(page);
    481
    482	/* When page is unmapped, it cannot be returned to our pool */
    483	dma_unmap_page_attrs(pool->p.dev, dma,
    484			     PAGE_SIZE << pool->p.order, pool->p.dma_dir,
    485			     DMA_ATTR_SKIP_CPU_SYNC);
    486	page_pool_set_dma_addr(page, 0);
    487skip_dma_unmap:
    488	page_pool_clear_pp_info(page);
    489
    490	/* This may be the last page returned, releasing the pool, so
    491	 * it is not safe to reference pool afterwards.
    492	 */
    493	count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
    494	trace_page_pool_state_release(pool, page, count);
    495}
    496EXPORT_SYMBOL(page_pool_release_page);
    497
    498/* Return a page to the page allocator, cleaning up our state */
    499static void page_pool_return_page(struct page_pool *pool, struct page *page)
    500{
    501	page_pool_release_page(pool, page);
    502
    503	put_page(page);
    504	/* An optimization would be to call __free_pages(page, pool->p.order)
    505	 * knowing page is not part of page-cache (thus avoiding a
    506	 * __page_cache_release() call).
    507	 */
    508}
    509
    510static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
    511{
    512	int ret;
    513	/* BH protection not needed if current is serving softirq */
    514	if (in_serving_softirq())
    515		ret = ptr_ring_produce(&pool->ring, page);
    516	else
    517		ret = ptr_ring_produce_bh(&pool->ring, page);
    518
    519	if (!ret) {
    520		recycle_stat_inc(pool, ring);
    521		return true;
    522	}
    523
    524	return false;
    525}
    526
    527/* Only allow direct recycling in special circumstances, into the
    528 * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
    529 *
    530 * Caller must provide appropriate safe context.
    531 */
    532static bool page_pool_recycle_in_cache(struct page *page,
    533				       struct page_pool *pool)
    534{
    535	if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
    536		recycle_stat_inc(pool, cache_full);
    537		return false;
    538	}
    539
    540	/* Caller MUST have verified/know (page_ref_count(page) == 1) */
    541	pool->alloc.cache[pool->alloc.count++] = page;
    542	recycle_stat_inc(pool, cached);
    543	return true;
    544}
    545
    546/* If the page refcnt == 1, this will try to recycle the page.
    547 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
    548 * the configured size min(dma_sync_size, pool->max_len).
    549 * If the page refcnt != 1, then the page will be returned to memory
    550 * subsystem.
    551 */
    552static __always_inline struct page *
    553__page_pool_put_page(struct page_pool *pool, struct page *page,
    554		     unsigned int dma_sync_size, bool allow_direct)
    555{
    556	/* This allocator is optimized for the XDP mode that uses
    557	 * one-frame-per-page, but have fallbacks that act like the
    558	 * regular page allocator APIs.
    559	 *
    560	 * refcnt == 1 means page_pool owns page, and can recycle it.
    561	 *
    562	 * page is NOT reusable when allocated when system is under
    563	 * some pressure. (page_is_pfmemalloc)
    564	 */
    565	if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
    566		/* Read barrier done in page_ref_count / READ_ONCE */
    567
    568		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
    569			page_pool_dma_sync_for_device(pool, page,
    570						      dma_sync_size);
    571
    572		if (allow_direct && in_serving_softirq() &&
    573		    page_pool_recycle_in_cache(page, pool))
    574			return NULL;
    575
    576		/* Page found as candidate for recycling */
    577		return page;
    578	}
    579	/* Fallback/non-XDP mode: API user have elevated refcnt.
    580	 *
    581	 * Many drivers split up the page into fragments, and some
    582	 * want to keep doing this to save memory and do refcnt based
    583	 * recycling. Support this use case too, to ease drivers
    584	 * switching between XDP/non-XDP.
    585	 *
    586	 * In-case page_pool maintains the DMA mapping, API user must
    587	 * call page_pool_put_page once.  In this elevated refcnt
    588	 * case, the DMA is unmapped/released, as driver is likely
    589	 * doing refcnt based recycle tricks, meaning another process
    590	 * will be invoking put_page.
    591	 */
    592	recycle_stat_inc(pool, released_refcnt);
    593	/* Do not replace this with page_pool_return_page() */
    594	page_pool_release_page(pool, page);
    595	put_page(page);
    596
    597	return NULL;
    598}
    599
    600void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
    601				  unsigned int dma_sync_size, bool allow_direct)
    602{
    603	page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
    604	if (page && !page_pool_recycle_in_ring(pool, page)) {
    605		/* Cache full, fallback to free pages */
    606		recycle_stat_inc(pool, ring_full);
    607		page_pool_return_page(pool, page);
    608	}
    609}
    610EXPORT_SYMBOL(page_pool_put_defragged_page);
    611
    612/* Caller must not use data area after call, as this function overwrites it */
    613void page_pool_put_page_bulk(struct page_pool *pool, void **data,
    614			     int count)
    615{
    616	int i, bulk_len = 0;
    617
    618	for (i = 0; i < count; i++) {
    619		struct page *page = virt_to_head_page(data[i]);
    620
    621		/* It is not the last user for the page frag case */
    622		if (!page_pool_is_last_frag(pool, page))
    623			continue;
    624
    625		page = __page_pool_put_page(pool, page, -1, false);
    626		/* Approved for bulk recycling in ptr_ring cache */
    627		if (page)
    628			data[bulk_len++] = page;
    629	}
    630
    631	if (unlikely(!bulk_len))
    632		return;
    633
    634	/* Bulk producer into ptr_ring page_pool cache */
    635	page_pool_ring_lock(pool);
    636	for (i = 0; i < bulk_len; i++) {
    637		if (__ptr_ring_produce(&pool->ring, data[i])) {
    638			/* ring full */
    639			recycle_stat_inc(pool, ring_full);
    640			break;
    641		}
    642	}
    643	recycle_stat_add(pool, ring, i);
    644	page_pool_ring_unlock(pool);
    645
    646	/* Hopefully all pages was return into ptr_ring */
    647	if (likely(i == bulk_len))
    648		return;
    649
    650	/* ptr_ring cache full, free remaining pages outside producer lock
    651	 * since put_page() with refcnt == 1 can be an expensive operation
    652	 */
    653	for (; i < bulk_len; i++)
    654		page_pool_return_page(pool, data[i]);
    655}
    656EXPORT_SYMBOL(page_pool_put_page_bulk);
    657
    658static struct page *page_pool_drain_frag(struct page_pool *pool,
    659					 struct page *page)
    660{
    661	long drain_count = BIAS_MAX - pool->frag_users;
    662
    663	/* Some user is still using the page frag */
    664	if (likely(page_pool_defrag_page(page, drain_count)))
    665		return NULL;
    666
    667	if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
    668		if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
    669			page_pool_dma_sync_for_device(pool, page, -1);
    670
    671		return page;
    672	}
    673
    674	page_pool_return_page(pool, page);
    675	return NULL;
    676}
    677
    678static void page_pool_free_frag(struct page_pool *pool)
    679{
    680	long drain_count = BIAS_MAX - pool->frag_users;
    681	struct page *page = pool->frag_page;
    682
    683	pool->frag_page = NULL;
    684
    685	if (!page || page_pool_defrag_page(page, drain_count))
    686		return;
    687
    688	page_pool_return_page(pool, page);
    689}
    690
    691struct page *page_pool_alloc_frag(struct page_pool *pool,
    692				  unsigned int *offset,
    693				  unsigned int size, gfp_t gfp)
    694{
    695	unsigned int max_size = PAGE_SIZE << pool->p.order;
    696	struct page *page = pool->frag_page;
    697
    698	if (WARN_ON(!(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
    699		    size > max_size))
    700		return NULL;
    701
    702	size = ALIGN(size, dma_get_cache_alignment());
    703	*offset = pool->frag_offset;
    704
    705	if (page && *offset + size > max_size) {
    706		page = page_pool_drain_frag(pool, page);
    707		if (page) {
    708			alloc_stat_inc(pool, fast);
    709			goto frag_reset;
    710		}
    711	}
    712
    713	if (!page) {
    714		page = page_pool_alloc_pages(pool, gfp);
    715		if (unlikely(!page)) {
    716			pool->frag_page = NULL;
    717			return NULL;
    718		}
    719
    720		pool->frag_page = page;
    721
    722frag_reset:
    723		pool->frag_users = 1;
    724		*offset = 0;
    725		pool->frag_offset = size;
    726		page_pool_fragment_page(page, BIAS_MAX);
    727		return page;
    728	}
    729
    730	pool->frag_users++;
    731	pool->frag_offset = *offset + size;
    732	alloc_stat_inc(pool, fast);
    733	return page;
    734}
    735EXPORT_SYMBOL(page_pool_alloc_frag);
    736
    737static void page_pool_empty_ring(struct page_pool *pool)
    738{
    739	struct page *page;
    740
    741	/* Empty recycle ring */
    742	while ((page = ptr_ring_consume_bh(&pool->ring))) {
    743		/* Verify the refcnt invariant of cached pages */
    744		if (!(page_ref_count(page) == 1))
    745			pr_crit("%s() page_pool refcnt %d violation\n",
    746				__func__, page_ref_count(page));
    747
    748		page_pool_return_page(pool, page);
    749	}
    750}
    751
    752static void page_pool_free(struct page_pool *pool)
    753{
    754	if (pool->disconnect)
    755		pool->disconnect(pool);
    756
    757	ptr_ring_cleanup(&pool->ring, NULL);
    758
    759	if (pool->p.flags & PP_FLAG_DMA_MAP)
    760		put_device(pool->p.dev);
    761
    762#ifdef CONFIG_PAGE_POOL_STATS
    763	free_percpu(pool->recycle_stats);
    764#endif
    765	kfree(pool);
    766}
    767
    768static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
    769{
    770	struct page *page;
    771
    772	if (pool->destroy_cnt)
    773		return;
    774
    775	/* Empty alloc cache, assume caller made sure this is
    776	 * no-longer in use, and page_pool_alloc_pages() cannot be
    777	 * call concurrently.
    778	 */
    779	while (pool->alloc.count) {
    780		page = pool->alloc.cache[--pool->alloc.count];
    781		page_pool_return_page(pool, page);
    782	}
    783}
    784
    785static void page_pool_scrub(struct page_pool *pool)
    786{
    787	page_pool_empty_alloc_cache_once(pool);
    788	pool->destroy_cnt++;
    789
    790	/* No more consumers should exist, but producers could still
    791	 * be in-flight.
    792	 */
    793	page_pool_empty_ring(pool);
    794}
    795
    796static int page_pool_release(struct page_pool *pool)
    797{
    798	int inflight;
    799
    800	page_pool_scrub(pool);
    801	inflight = page_pool_inflight(pool);
    802	if (!inflight)
    803		page_pool_free(pool);
    804
    805	return inflight;
    806}
    807
    808static void page_pool_release_retry(struct work_struct *wq)
    809{
    810	struct delayed_work *dwq = to_delayed_work(wq);
    811	struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
    812	int inflight;
    813
    814	inflight = page_pool_release(pool);
    815	if (!inflight)
    816		return;
    817
    818	/* Periodic warning */
    819	if (time_after_eq(jiffies, pool->defer_warn)) {
    820		int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
    821
    822		pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
    823			__func__, inflight, sec);
    824		pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
    825	}
    826
    827	/* Still not ready to be disconnected, retry later */
    828	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
    829}
    830
    831void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
    832			   struct xdp_mem_info *mem)
    833{
    834	refcount_inc(&pool->user_cnt);
    835	pool->disconnect = disconnect;
    836	pool->xdp_mem_id = mem->id;
    837}
    838
    839void page_pool_destroy(struct page_pool *pool)
    840{
    841	if (!pool)
    842		return;
    843
    844	if (!page_pool_put(pool))
    845		return;
    846
    847	page_pool_free_frag(pool);
    848
    849	if (!page_pool_release(pool))
    850		return;
    851
    852	pool->defer_start = jiffies;
    853	pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
    854
    855	INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
    856	schedule_delayed_work(&pool->release_dw, DEFER_TIME);
    857}
    858EXPORT_SYMBOL(page_pool_destroy);
    859
    860/* Caller must provide appropriate safe context, e.g. NAPI. */
    861void page_pool_update_nid(struct page_pool *pool, int new_nid)
    862{
    863	struct page *page;
    864
    865	trace_page_pool_update_nid(pool, new_nid);
    866	pool->p.nid = new_nid;
    867
    868	/* Flush pool alloc cache, as refill will check NUMA node */
    869	while (pool->alloc.count) {
    870		page = pool->alloc.cache[--pool->alloc.count];
    871		page_pool_return_page(pool, page);
    872	}
    873}
    874EXPORT_SYMBOL(page_pool_update_nid);
    875
    876bool page_pool_return_skb_page(struct page *page)
    877{
    878	struct page_pool *pp;
    879
    880	page = compound_head(page);
    881
    882	/* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
    883	 * in order to preserve any existing bits, such as bit 0 for the
    884	 * head page of compound page and bit 1 for pfmemalloc page, so
    885	 * mask those bits for freeing side when doing below checking,
    886	 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
    887	 * to avoid recycling the pfmemalloc page.
    888	 */
    889	if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
    890		return false;
    891
    892	pp = page->pp;
    893
    894	/* Driver set this to memory recycling info. Reset it on recycle.
    895	 * This will *not* work for NIC using a split-page memory model.
    896	 * The page will be returned to the pool here regardless of the
    897	 * 'flipped' fragment being in use or not.
    898	 */
    899	page_pool_put_full_page(pp, page, false);
    900
    901	return true;
    902}
    903EXPORT_SYMBOL(page_pool_return_skb_page);