summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-07-08 14:58:04 -0700
committerDavid S. Miller <davem@davemloft.net>2019-07-08 14:58:04 -0700
commitaa6be2b95d4ecfd0e3a433fc3270da98fffab115 (patch)
treea472e36e039968b80e3ebc8301bc92cda9568ad6 /include
parent49db9228b8d83cbdef44b4757a393990f48b6d85 (diff)
parent9ed4050c0d75768066a07cf66eef4f8dc9d79b52 (diff)
downloadcachepc-linux-aa6be2b95d4ecfd0e3a433fc3270da98fffab115.tar.gz
cachepc-linux-aa6be2b95d4ecfd0e3a433fc3270da98fffab115.zip
Merge branch 'cpsw-Add-XDP-support'
Ivan Khoronzhuk says: ==================== net: ethernet: ti: cpsw: Add XDP support This patchset adds XDP support for TI cpsw driver and base it on page_pool allocator. It was verified on af_xdp socket drop, af_xdp l2f, ebpf XDP_DROP, XDP_REDIRECT, XDP_PASS, XDP_TX. It was verified with following configs enabled: CONFIG_JIT=y CONFIG_BPFILTER=y CONFIG_BPF_SYSCALL=y CONFIG_XDP_SOCKETS=y CONFIG_BPF_EVENTS=y CONFIG_HAVE_EBPF_JIT=y CONFIG_BPF_JIT=y CONFIG_CGROUP_BPF=y Link on previous v7: https://lkml.org/lkml/2019/7/4/715 Also regular tests with iperf2 were done in order to verify impact on regular netstack performance, compared with base commit: https://pastebin.com/JSMT0iZ4 v8..v9: - fix warnings on arm64 caused by typos in type casting v7..v8: - corrected dma calculation based on headroom instead of hard start - minor comment changes v6..v7: - rolled back to v4 solution but with small modification - picked up patch: https://www.spinics.net/lists/netdev/msg583145.html - added changes related to netsec fix and cpsw v5..v6: - do changes that is rx_dev while redirect/flush cycle is kept the same - dropped net: ethernet: ti: davinci_cpdma: return handler status - other changes desc in patches v4..v5: - added two plreliminary patches: net: ethernet: ti: davinci_cpdma: allow desc split while down net: ethernet: ti: cpsw_ethtool: allow res split while down - added xdp alocator refcnt on xdp level, avoiding page pool refcnt - moved flush status as separate argument for cpdma_chan_process - reworked cpsw code according to last changes to allocator - added missed statistic counter v3..v4: - added page pool user counter - use same pool for ndevs in dual mac - restructured page pool create/destroy according to the last changes in API v2..v3: - each rxq and ndev has its own page pool v1..v2: - combined xdp_xmit functions - used page allocation w/o refcnt juggle - unmapped page for skb netstack - moved rxq/page pool allocation to open/close pair - added several preliminary patches: net: page_pool: add helper function to retrieve dma addresses net: page_pool: add helper function to unmap dma addresses net: ethernet: ti: cpsw: use cpsw as drv data net: ethernet: ti: cpsw_ethtool: simplify slave loops ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/page_pool.h25
1 files changed, 25 insertions, 0 deletions
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index ee9c871d2043..2cbcdbdec254 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -101,6 +101,12 @@ struct page_pool {
struct ptr_ring ring;
atomic_t pages_state_release_cnt;
+
+ /* A page_pool is strictly tied to a single RX-queue being
+ * protected by NAPI, due to above pp_alloc_cache. This
+ * refcnt serves purpose is to simplify drivers error handling.
+ */
+ refcount_t user_cnt;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -134,6 +140,15 @@ static inline void page_pool_free(struct page_pool *pool)
#endif
}
+/* Drivers use this instead of page_pool_free */
+static inline void page_pool_destroy(struct page_pool *pool)
+{
+ if (!pool)
+ return;
+
+ page_pool_free(pool);
+}
+
/* Never call this directly, use helpers below */
void __page_pool_put_page(struct page_pool *pool,
struct page *page, bool allow_direct);
@@ -201,4 +216,14 @@ static inline bool is_page_pool_compiled_in(void)
#endif
}
+static inline void page_pool_get(struct page_pool *pool)
+{
+ refcount_inc(&pool->user_cnt);
+}
+
+static inline bool page_pool_put(struct page_pool *pool)
+{
+ return refcount_dec_and_test(&pool->user_cnt);
+}
+
#endif /* _NET_PAGE_POOL_H */