xdp.h (11661B)
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* include/net/xdp.h 3 * 4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. 5 */ 6#ifndef __LINUX_NET_XDP_H__ 7#define __LINUX_NET_XDP_H__ 8 9#include <linux/skbuff.h> /* skb_shared_info */ 10 11/** 12 * DOC: XDP RX-queue information 13 * 14 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver 15 * level RX-ring queues. It is information that is specific to how 16 * the driver have configured a given RX-ring queue. 17 * 18 * Each xdp_buff frame received in the driver carries a (pointer) 19 * reference to this xdp_rxq_info structure. This provides the XDP 20 * data-path read-access to RX-info for both kernel and bpf-side 21 * (limited subset). 22 * 23 * For now, direct access is only safe while running in NAPI/softirq 24 * context. Contents are read-mostly and must not be updated during 25 * driver NAPI/softirq poll. 26 * 27 * The driver usage API is a register and unregister API. 28 * 29 * The struct is not directly tied to the XDP prog. A new XDP prog 30 * can be attached as long as it doesn't change the underlying 31 * RX-ring. If the RX-ring does change significantly, the NIC driver 32 * naturally need to stop the RX-ring before purging and reallocating 33 * memory. In that process the driver MUST call unregister (which 34 * also applies for driver shutdown and unload). The register API is 35 * also mandatory during RX-ring setup. 36 */ 37 38enum xdp_mem_type { 39 MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ 40 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ 41 MEM_TYPE_PAGE_POOL, 42 MEM_TYPE_XSK_BUFF_POOL, 43 MEM_TYPE_MAX, 44}; 45 46/* XDP flags for ndo_xdp_xmit */ 47#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */ 48#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH 49 50struct xdp_mem_info { 51 u32 type; /* enum xdp_mem_type, but known size type */ 52 u32 id; 53}; 54 55struct page_pool; 56 57struct xdp_rxq_info { 58 struct net_device *dev; 59 u32 queue_index; 60 u32 reg_state; 61 struct xdp_mem_info mem; 62 unsigned int napi_id; 63 u32 frag_size; 64} ____cacheline_aligned; /* perf critical, avoid false-sharing */ 65 66struct xdp_txq_info { 67 struct net_device *dev; 68}; 69 70enum xdp_buff_flags { 71 XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */ 72 XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under 73 * pressure 74 */ 75}; 76 77struct xdp_buff { 78 void *data; 79 void *data_end; 80 void *data_meta; 81 void *data_hard_start; 82 struct xdp_rxq_info *rxq; 83 struct xdp_txq_info *txq; 84 u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ 85 u32 flags; /* supported values defined in xdp_buff_flags */ 86}; 87 88static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) 89{ 90 return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); 91} 92 93static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) 94{ 95 xdp->flags |= XDP_FLAGS_HAS_FRAGS; 96} 97 98static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) 99{ 100 xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; 101} 102 103static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp) 104{ 105 return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); 106} 107 108static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) 109{ 110 xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC; 111} 112 113static __always_inline void 114xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) 115{ 116 xdp->frame_sz = frame_sz; 117 xdp->rxq = rxq; 118 xdp->flags = 0; 119} 120 121static __always_inline void 122xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, 123 int headroom, int data_len, const bool meta_valid) 124{ 125 unsigned char *data = hard_start + headroom; 126 127 xdp->data_hard_start = hard_start; 128 xdp->data = data; 129 xdp->data_end = data + data_len; 130 xdp->data_meta = meta_valid ? data : data + 1; 131} 132 133/* Reserve memory area at end-of data area. 134 * 135 * This macro reserves tailroom in the XDP buffer by limiting the 136 * XDP/BPF data access to data_hard_end. Notice same area (and size) 137 * is used for XDP_PASS, when constructing the SKB via build_skb(). 138 */ 139#define xdp_data_hard_end(xdp) \ 140 ((xdp)->data_hard_start + (xdp)->frame_sz - \ 141 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 142 143static inline struct skb_shared_info * 144xdp_get_shared_info_from_buff(struct xdp_buff *xdp) 145{ 146 return (struct skb_shared_info *)xdp_data_hard_end(xdp); 147} 148 149static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp) 150{ 151 unsigned int len = xdp->data_end - xdp->data; 152 struct skb_shared_info *sinfo; 153 154 if (likely(!xdp_buff_has_frags(xdp))) 155 goto out; 156 157 sinfo = xdp_get_shared_info_from_buff(xdp); 158 len += sinfo->xdp_frags_size; 159out: 160 return len; 161} 162 163struct xdp_frame { 164 void *data; 165 u16 len; 166 u16 headroom; 167 u32 metasize:8; 168 u32 frame_sz:24; 169 /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, 170 * while mem info is valid on remote CPU. 171 */ 172 struct xdp_mem_info mem; 173 struct net_device *dev_rx; /* used by cpumap */ 174 u32 flags; /* supported values defined in xdp_buff_flags */ 175}; 176 177static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) 178{ 179 return !!(frame->flags & XDP_FLAGS_HAS_FRAGS); 180} 181 182static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame) 183{ 184 return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); 185} 186 187#define XDP_BULK_QUEUE_SIZE 16 188struct xdp_frame_bulk { 189 int count; 190 void *xa; 191 void *q[XDP_BULK_QUEUE_SIZE]; 192}; 193 194static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq) 195{ 196 /* bq->count will be zero'ed when bq->xa gets updated */ 197 bq->xa = NULL; 198} 199 200static inline struct skb_shared_info * 201xdp_get_shared_info_from_frame(struct xdp_frame *frame) 202{ 203 void *data_hard_start = frame->data - frame->headroom - sizeof(*frame); 204 205 return (struct skb_shared_info *)(data_hard_start + frame->frame_sz - 206 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 207} 208 209struct xdp_cpumap_stats { 210 unsigned int redirect; 211 unsigned int pass; 212 unsigned int drop; 213}; 214 215/* Clear kernel pointers in xdp_frame */ 216static inline void xdp_scrub_frame(struct xdp_frame *frame) 217{ 218 frame->data = NULL; 219 frame->dev_rx = NULL; 220} 221 222static inline void 223xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags, 224 unsigned int size, unsigned int truesize, 225 bool pfmemalloc) 226{ 227 skb_shinfo(skb)->nr_frags = nr_frags; 228 229 skb->len += size; 230 skb->data_len += size; 231 skb->truesize += truesize; 232 skb->pfmemalloc |= pfmemalloc; 233} 234 235/* Avoids inlining WARN macro in fast-path */ 236void xdp_warn(const char *msg, const char *func, const int line); 237#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) 238 239struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); 240struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, 241 struct sk_buff *skb, 242 struct net_device *dev); 243struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, 244 struct net_device *dev); 245int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp); 246struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf); 247 248static inline 249void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) 250{ 251 xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame); 252 xdp->data = frame->data; 253 xdp->data_end = frame->data + frame->len; 254 xdp->data_meta = frame->data - frame->metasize; 255 xdp->frame_sz = frame->frame_sz; 256 xdp->flags = frame->flags; 257} 258 259static inline 260int xdp_update_frame_from_buff(struct xdp_buff *xdp, 261 struct xdp_frame *xdp_frame) 262{ 263 int metasize, headroom; 264 265 /* Assure headroom is available for storing info */ 266 headroom = xdp->data - xdp->data_hard_start; 267 metasize = xdp->data - xdp->data_meta; 268 metasize = metasize > 0 ? metasize : 0; 269 if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) 270 return -ENOSPC; 271 272 /* Catch if driver didn't reserve tailroom for skb_shared_info */ 273 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 274 XDP_WARN("Driver BUG: missing reserved tailroom"); 275 return -ENOSPC; 276 } 277 278 xdp_frame->data = xdp->data; 279 xdp_frame->len = xdp->data_end - xdp->data; 280 xdp_frame->headroom = headroom - sizeof(*xdp_frame); 281 xdp_frame->metasize = metasize; 282 xdp_frame->frame_sz = xdp->frame_sz; 283 xdp_frame->flags = xdp->flags; 284 285 return 0; 286} 287 288/* Convert xdp_buff to xdp_frame */ 289static inline 290struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) 291{ 292 struct xdp_frame *xdp_frame; 293 294 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) 295 return xdp_convert_zc_to_xdp_frame(xdp); 296 297 /* Store info in top of packet */ 298 xdp_frame = xdp->data_hard_start; 299 if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0)) 300 return NULL; 301 302 /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ 303 xdp_frame->mem = xdp->rxq->mem; 304 305 return xdp_frame; 306} 307 308void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, 309 struct xdp_buff *xdp); 310void xdp_return_frame(struct xdp_frame *xdpf); 311void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); 312void xdp_return_buff(struct xdp_buff *xdp); 313void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq); 314void xdp_return_frame_bulk(struct xdp_frame *xdpf, 315 struct xdp_frame_bulk *bq); 316 317/* When sending xdp_frame into the network stack, then there is no 318 * return point callback, which is needed to release e.g. DMA-mapping 319 * resources with page_pool. Thus, have explicit function to release 320 * frame resources. 321 */ 322void __xdp_release_frame(void *data, struct xdp_mem_info *mem); 323static inline void xdp_release_frame(struct xdp_frame *xdpf) 324{ 325 struct xdp_mem_info *mem = &xdpf->mem; 326 struct skb_shared_info *sinfo; 327 int i; 328 329 /* Curr only page_pool needs this */ 330 if (mem->type != MEM_TYPE_PAGE_POOL) 331 return; 332 333 if (likely(!xdp_frame_has_frags(xdpf))) 334 goto out; 335 336 sinfo = xdp_get_shared_info_from_frame(xdpf); 337 for (i = 0; i < sinfo->nr_frags; i++) { 338 struct page *page = skb_frag_page(&sinfo->frags[i]); 339 340 __xdp_release_frame(page_address(page), mem); 341 } 342out: 343 __xdp_release_frame(xdpf->data, mem); 344} 345 346static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf) 347{ 348 struct skb_shared_info *sinfo; 349 unsigned int len = xdpf->len; 350 351 if (likely(!xdp_frame_has_frags(xdpf))) 352 goto out; 353 354 sinfo = xdp_get_shared_info_from_frame(xdpf); 355 len += sinfo->xdp_frags_size; 356out: 357 return len; 358} 359 360int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 361 struct net_device *dev, u32 queue_index, 362 unsigned int napi_id, u32 frag_size); 363static inline int 364xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 365 struct net_device *dev, u32 queue_index, 366 unsigned int napi_id) 367{ 368 return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0); 369} 370 371void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); 372void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); 373bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); 374int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, 375 enum xdp_mem_type type, void *allocator); 376void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); 377int xdp_reg_mem_model(struct xdp_mem_info *mem, 378 enum xdp_mem_type type, void *allocator); 379void xdp_unreg_mem_model(struct xdp_mem_info *mem); 380 381/* Drivers not supporting XDP metadata can use this helper, which 382 * rejects any room expansion for metadata as a result. 383 */ 384static __always_inline void 385xdp_set_data_meta_invalid(struct xdp_buff *xdp) 386{ 387 xdp->data_meta = xdp->data + 1; 388} 389 390static __always_inline bool 391xdp_data_meta_unsupported(const struct xdp_buff *xdp) 392{ 393 return unlikely(xdp->data_meta > xdp->data); 394} 395 396static inline bool xdp_metalen_invalid(unsigned long metalen) 397{ 398 return (metalen & (sizeof(__u32) - 1)) || (metalen > 32); 399} 400 401struct xdp_attachment_info { 402 struct bpf_prog *prog; 403 u32 flags; 404}; 405 406struct netdev_bpf; 407void xdp_attachment_setup(struct xdp_attachment_info *info, 408 struct netdev_bpf *bpf); 409 410#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE 411 412#endif /* __LINUX_NET_XDP_H__ */