cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

page_pool.h (12048B)


      1/* SPDX-License-Identifier: GPL-2.0
      2 *
      3 * page_pool.h
      4 *	Author:	Jesper Dangaard Brouer <netoptimizer@brouer.com>
      5 *	Copyright (C) 2016 Red Hat, Inc.
      6 */
      7
      8/**
      9 * DOC: page_pool allocator
     10 *
     11 * This page_pool allocator is optimized for the XDP mode that
     12 * uses one-frame-per-page, but have fallbacks that act like the
     13 * regular page allocator APIs.
     14 *
     15 * Basic use involve replacing alloc_pages() calls with the
     16 * page_pool_alloc_pages() call.  Drivers should likely use
     17 * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
     18 *
     19 * API keeps track of in-flight pages, in-order to let API user know
     20 * when it is safe to dealloactor page_pool object.  Thus, API users
     21 * must make sure to call page_pool_release_page() when a page is
     22 * "leaving" the page_pool.  Or call page_pool_put_page() where
     23 * appropiate.  For maintaining correct accounting.
     24 *
     25 * API user must only call page_pool_put_page() once on a page, as it
     26 * will either recycle the page, or in case of elevated refcnt, it
     27 * will release the DMA mapping and in-flight state accounting.  We
     28 * hope to lift this requirement in the future.
     29 */
     30#ifndef _NET_PAGE_POOL_H
     31#define _NET_PAGE_POOL_H
     32
     33#include <linux/mm.h> /* Needed by ptr_ring */
     34#include <linux/ptr_ring.h>
     35#include <linux/dma-direction.h>
     36
     37#define PP_FLAG_DMA_MAP		BIT(0) /* Should page_pool do the DMA
     38					* map/unmap
     39					*/
     40#define PP_FLAG_DMA_SYNC_DEV	BIT(1) /* If set all pages that the driver gets
     41					* from page_pool will be
     42					* DMA-synced-for-device according to
     43					* the length provided by the device
     44					* driver.
     45					* Please note DMA-sync-for-CPU is still
     46					* device driver responsibility
     47					*/
     48#define PP_FLAG_PAGE_FRAG	BIT(2) /* for page frag feature */
     49#define PP_FLAG_ALL		(PP_FLAG_DMA_MAP |\
     50				 PP_FLAG_DMA_SYNC_DEV |\
     51				 PP_FLAG_PAGE_FRAG)
     52
     53/*
     54 * Fast allocation side cache array/stack
     55 *
     56 * The cache size and refill watermark is related to the network
     57 * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
     58 * ring is usually refilled and the max consumed elements will be 64,
     59 * thus a natural max size of objects needed in the cache.
     60 *
     61 * Keeping room for more objects, is due to XDP_DROP use-case.  As
     62 * XDP_DROP allows the opportunity to recycle objects directly into
     63 * this array, as it shares the same softirq/NAPI protection.  If
     64 * cache is already full (or partly full) then the XDP_DROP recycles
     65 * would have to take a slower code path.
     66 */
     67#define PP_ALLOC_CACHE_SIZE	128
     68#define PP_ALLOC_CACHE_REFILL	64
     69struct pp_alloc_cache {
     70	u32 count;
     71	struct page *cache[PP_ALLOC_CACHE_SIZE];
     72};
     73
     74struct page_pool_params {
     75	unsigned int	flags;
     76	unsigned int	order;
     77	unsigned int	pool_size;
     78	int		nid;  /* Numa node id to allocate from pages from */
     79	struct device	*dev; /* device, for DMA pre-mapping purposes */
     80	enum dma_data_direction dma_dir; /* DMA mapping direction */
     81	unsigned int	max_len; /* max DMA sync memory size */
     82	unsigned int	offset;  /* DMA addr offset */
     83	void (*init_callback)(struct page *page, void *arg);
     84	void *init_arg;
     85};
     86
     87#ifdef CONFIG_PAGE_POOL_STATS
     88struct page_pool_alloc_stats {
     89	u64 fast; /* fast path allocations */
     90	u64 slow; /* slow-path order 0 allocations */
     91	u64 slow_high_order; /* slow-path high order allocations */
     92	u64 empty; /* failed refills due to empty ptr ring, forcing
     93		    * slow path allocation
     94		    */
     95	u64 refill; /* allocations via successful refill */
     96	u64 waive;  /* failed refills due to numa zone mismatch */
     97};
     98
     99struct page_pool_recycle_stats {
    100	u64 cached;	/* recycling placed page in the cache. */
    101	u64 cache_full; /* cache was full */
    102	u64 ring;	/* recycling placed page back into ptr ring */
    103	u64 ring_full;	/* page was released from page-pool because
    104			 * PTR ring was full.
    105			 */
    106	u64 released_refcnt; /* page released because of elevated
    107			      * refcnt
    108			      */
    109};
    110
    111/* This struct wraps the above stats structs so users of the
    112 * page_pool_get_stats API can pass a single argument when requesting the
    113 * stats for the page pool.
    114 */
    115struct page_pool_stats {
    116	struct page_pool_alloc_stats alloc_stats;
    117	struct page_pool_recycle_stats recycle_stats;
    118};
    119
    120int page_pool_ethtool_stats_get_count(void);
    121u8 *page_pool_ethtool_stats_get_strings(u8 *data);
    122u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
    123
    124/*
    125 * Drivers that wish to harvest page pool stats and report them to users
    126 * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
    127 * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
    128 */
    129bool page_pool_get_stats(struct page_pool *pool,
    130			 struct page_pool_stats *stats);
    131#else
    132
    133static inline int page_pool_ethtool_stats_get_count(void)
    134{
    135	return 0;
    136}
    137
    138static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
    139{
    140	return data;
    141}
    142
    143static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
    144{
    145	return data;
    146}
    147
    148#endif
    149
    150struct page_pool {
    151	struct page_pool_params p;
    152
    153	struct delayed_work release_dw;
    154	void (*disconnect)(void *);
    155	unsigned long defer_start;
    156	unsigned long defer_warn;
    157
    158	u32 pages_state_hold_cnt;
    159	unsigned int frag_offset;
    160	struct page *frag_page;
    161	long frag_users;
    162
    163#ifdef CONFIG_PAGE_POOL_STATS
    164	/* these stats are incremented while in softirq context */
    165	struct page_pool_alloc_stats alloc_stats;
    166#endif
    167	u32 xdp_mem_id;
    168
    169	/*
    170	 * Data structure for allocation side
    171	 *
    172	 * Drivers allocation side usually already perform some kind
    173	 * of resource protection.  Piggyback on this protection, and
    174	 * require driver to protect allocation side.
    175	 *
    176	 * For NIC drivers this means, allocate a page_pool per
    177	 * RX-queue. As the RX-queue is already protected by
    178	 * Softirq/BH scheduling and napi_schedule. NAPI schedule
    179	 * guarantee that a single napi_struct will only be scheduled
    180	 * on a single CPU (see napi_schedule).
    181	 */
    182	struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
    183
    184	/* Data structure for storing recycled pages.
    185	 *
    186	 * Returning/freeing pages is more complicated synchronization
    187	 * wise, because free's can happen on remote CPUs, with no
    188	 * association with allocation resource.
    189	 *
    190	 * Use ptr_ring, as it separates consumer and producer
    191	 * effeciently, it a way that doesn't bounce cache-lines.
    192	 *
    193	 * TODO: Implement bulk return pages into this structure.
    194	 */
    195	struct ptr_ring ring;
    196
    197#ifdef CONFIG_PAGE_POOL_STATS
    198	/* recycle stats are per-cpu to avoid locking */
    199	struct page_pool_recycle_stats __percpu *recycle_stats;
    200#endif
    201	atomic_t pages_state_release_cnt;
    202
    203	/* A page_pool is strictly tied to a single RX-queue being
    204	 * protected by NAPI, due to above pp_alloc_cache. This
    205	 * refcnt serves purpose is to simplify drivers error handling.
    206	 */
    207	refcount_t user_cnt;
    208
    209	u64 destroy_cnt;
    210};
    211
    212struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
    213
    214static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
    215{
    216	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
    217
    218	return page_pool_alloc_pages(pool, gfp);
    219}
    220
    221struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
    222				  unsigned int size, gfp_t gfp);
    223
    224static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
    225						    unsigned int *offset,
    226						    unsigned int size)
    227{
    228	gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
    229
    230	return page_pool_alloc_frag(pool, offset, size, gfp);
    231}
    232
    233/* get the stored dma direction. A driver might decide to treat this locally and
    234 * avoid the extra cache line from page_pool to determine the direction
    235 */
    236static
    237inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
    238{
    239	return pool->p.dma_dir;
    240}
    241
    242bool page_pool_return_skb_page(struct page *page);
    243
    244struct page_pool *page_pool_create(const struct page_pool_params *params);
    245
    246struct xdp_mem_info;
    247
    248#ifdef CONFIG_PAGE_POOL
    249void page_pool_destroy(struct page_pool *pool);
    250void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
    251			   struct xdp_mem_info *mem);
    252void page_pool_release_page(struct page_pool *pool, struct page *page);
    253void page_pool_put_page_bulk(struct page_pool *pool, void **data,
    254			     int count);
    255#else
    256static inline void page_pool_destroy(struct page_pool *pool)
    257{
    258}
    259
    260static inline void page_pool_use_xdp_mem(struct page_pool *pool,
    261					 void (*disconnect)(void *),
    262					 struct xdp_mem_info *mem)
    263{
    264}
    265static inline void page_pool_release_page(struct page_pool *pool,
    266					  struct page *page)
    267{
    268}
    269
    270static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
    271					   int count)
    272{
    273}
    274#endif
    275
    276void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
    277				  unsigned int dma_sync_size,
    278				  bool allow_direct);
    279
    280static inline void page_pool_fragment_page(struct page *page, long nr)
    281{
    282	atomic_long_set(&page->pp_frag_count, nr);
    283}
    284
    285static inline long page_pool_defrag_page(struct page *page, long nr)
    286{
    287	long ret;
    288
    289	/* If nr == pp_frag_count then we have cleared all remaining
    290	 * references to the page. No need to actually overwrite it, instead
    291	 * we can leave this to be overwritten by the calling function.
    292	 *
    293	 * The main advantage to doing this is that an atomic_read is
    294	 * generally a much cheaper operation than an atomic update,
    295	 * especially when dealing with a page that may be partitioned
    296	 * into only 2 or 3 pieces.
    297	 */
    298	if (atomic_long_read(&page->pp_frag_count) == nr)
    299		return 0;
    300
    301	ret = atomic_long_sub_return(nr, &page->pp_frag_count);
    302	WARN_ON(ret < 0);
    303	return ret;
    304}
    305
    306static inline bool page_pool_is_last_frag(struct page_pool *pool,
    307					  struct page *page)
    308{
    309	/* If fragments aren't enabled or count is 0 we were the last user */
    310	return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
    311	       (page_pool_defrag_page(page, 1) == 0);
    312}
    313
    314static inline void page_pool_put_page(struct page_pool *pool,
    315				      struct page *page,
    316				      unsigned int dma_sync_size,
    317				      bool allow_direct)
    318{
    319	/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
    320	 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
    321	 */
    322#ifdef CONFIG_PAGE_POOL
    323	if (!page_pool_is_last_frag(pool, page))
    324		return;
    325
    326	page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
    327#endif
    328}
    329
    330/* Same as above but will try to sync the entire area pool->max_len */
    331static inline void page_pool_put_full_page(struct page_pool *pool,
    332					   struct page *page, bool allow_direct)
    333{
    334	page_pool_put_page(pool, page, -1, allow_direct);
    335}
    336
    337/* Same as above but the caller must guarantee safe context. e.g NAPI */
    338static inline void page_pool_recycle_direct(struct page_pool *pool,
    339					    struct page *page)
    340{
    341	page_pool_put_full_page(pool, page, true);
    342}
    343
    344#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT	\
    345		(sizeof(dma_addr_t) > sizeof(unsigned long))
    346
    347static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
    348{
    349	dma_addr_t ret = page->dma_addr;
    350
    351	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
    352		ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
    353
    354	return ret;
    355}
    356
    357static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
    358{
    359	page->dma_addr = addr;
    360	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
    361		page->dma_addr_upper = upper_32_bits(addr);
    362}
    363
    364static inline bool is_page_pool_compiled_in(void)
    365{
    366#ifdef CONFIG_PAGE_POOL
    367	return true;
    368#else
    369	return false;
    370#endif
    371}
    372
    373static inline bool page_pool_put(struct page_pool *pool)
    374{
    375	return refcount_dec_and_test(&pool->user_cnt);
    376}
    377
    378/* Caller must provide appropriate safe context, e.g. NAPI. */
    379void page_pool_update_nid(struct page_pool *pool, int new_nid);
    380static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
    381{
    382	if (unlikely(pool->p.nid != new_nid))
    383		page_pool_update_nid(pool, new_nid);
    384}
    385
    386static inline void page_pool_ring_lock(struct page_pool *pool)
    387	__acquires(&pool->ring.producer_lock)
    388{
    389	if (in_serving_softirq())
    390		spin_lock(&pool->ring.producer_lock);
    391	else
    392		spin_lock_bh(&pool->ring.producer_lock);
    393}
    394
    395static inline void page_pool_ring_unlock(struct page_pool *pool)
    396	__releases(&pool->ring.producer_lock)
    397{
    398	if (in_serving_softirq())
    399		spin_unlock(&pool->ring.producer_lock);
    400	else
    401		spin_unlock_bh(&pool->ring.producer_lock);
    402}
    403
    404#endif /* _NET_PAGE_POOL_H */