cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

skb_array.h (5305B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/*
      3 *	Definitions for the 'struct skb_array' datastructure.
      4 *
      5 *	Author:
      6 *		Michael S. Tsirkin <mst@redhat.com>
      7 *
      8 *	Copyright (C) 2016 Red Hat, Inc.
      9 *
     10 *	Limited-size FIFO of skbs. Can be used more or less whenever
     11 *	sk_buff_head can be used, except you need to know the queue size in
     12 *	advance.
     13 *	Implemented as a type-safe wrapper around ptr_ring.
     14 */
     15
     16#ifndef _LINUX_SKB_ARRAY_H
     17#define _LINUX_SKB_ARRAY_H 1
     18
     19#ifdef __KERNEL__
     20#include <linux/ptr_ring.h>
     21#include <linux/skbuff.h>
     22#include <linux/if_vlan.h>
     23#endif
     24
     25struct skb_array {
     26	struct ptr_ring ring;
     27};
     28
     29/* Might be slightly faster than skb_array_full below, but callers invoking
     30 * this in a loop must use a compiler barrier, for example cpu_relax().
     31 */
     32static inline bool __skb_array_full(struct skb_array *a)
     33{
     34	return __ptr_ring_full(&a->ring);
     35}
     36
     37static inline bool skb_array_full(struct skb_array *a)
     38{
     39	return ptr_ring_full(&a->ring);
     40}
     41
     42static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
     43{
     44	return ptr_ring_produce(&a->ring, skb);
     45}
     46
     47static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
     48{
     49	return ptr_ring_produce_irq(&a->ring, skb);
     50}
     51
     52static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
     53{
     54	return ptr_ring_produce_bh(&a->ring, skb);
     55}
     56
     57static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
     58{
     59	return ptr_ring_produce_any(&a->ring, skb);
     60}
     61
     62/* Might be slightly faster than skb_array_empty below, but only safe if the
     63 * array is never resized. Also, callers invoking this in a loop must take care
     64 * to use a compiler barrier, for example cpu_relax().
     65 */
     66static inline bool __skb_array_empty(struct skb_array *a)
     67{
     68	return __ptr_ring_empty(&a->ring);
     69}
     70
     71static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
     72{
     73	return __ptr_ring_peek(&a->ring);
     74}
     75
     76static inline bool skb_array_empty(struct skb_array *a)
     77{
     78	return ptr_ring_empty(&a->ring);
     79}
     80
     81static inline bool skb_array_empty_bh(struct skb_array *a)
     82{
     83	return ptr_ring_empty_bh(&a->ring);
     84}
     85
     86static inline bool skb_array_empty_irq(struct skb_array *a)
     87{
     88	return ptr_ring_empty_irq(&a->ring);
     89}
     90
     91static inline bool skb_array_empty_any(struct skb_array *a)
     92{
     93	return ptr_ring_empty_any(&a->ring);
     94}
     95
     96static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
     97{
     98	return __ptr_ring_consume(&a->ring);
     99}
    100
    101static inline struct sk_buff *skb_array_consume(struct skb_array *a)
    102{
    103	return ptr_ring_consume(&a->ring);
    104}
    105
    106static inline int skb_array_consume_batched(struct skb_array *a,
    107					    struct sk_buff **array, int n)
    108{
    109	return ptr_ring_consume_batched(&a->ring, (void **)array, n);
    110}
    111
    112static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
    113{
    114	return ptr_ring_consume_irq(&a->ring);
    115}
    116
    117static inline int skb_array_consume_batched_irq(struct skb_array *a,
    118						struct sk_buff **array, int n)
    119{
    120	return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
    121}
    122
    123static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
    124{
    125	return ptr_ring_consume_any(&a->ring);
    126}
    127
    128static inline int skb_array_consume_batched_any(struct skb_array *a,
    129						struct sk_buff **array, int n)
    130{
    131	return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
    132}
    133
    134
    135static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
    136{
    137	return ptr_ring_consume_bh(&a->ring);
    138}
    139
    140static inline int skb_array_consume_batched_bh(struct skb_array *a,
    141					       struct sk_buff **array, int n)
    142{
    143	return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
    144}
    145
    146static inline int __skb_array_len_with_tag(struct sk_buff *skb)
    147{
    148	if (likely(skb)) {
    149		int len = skb->len;
    150
    151		if (skb_vlan_tag_present(skb))
    152			len += VLAN_HLEN;
    153
    154		return len;
    155	} else {
    156		return 0;
    157	}
    158}
    159
    160static inline int skb_array_peek_len(struct skb_array *a)
    161{
    162	return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
    163}
    164
    165static inline int skb_array_peek_len_irq(struct skb_array *a)
    166{
    167	return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
    168}
    169
    170static inline int skb_array_peek_len_bh(struct skb_array *a)
    171{
    172	return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
    173}
    174
    175static inline int skb_array_peek_len_any(struct skb_array *a)
    176{
    177	return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
    178}
    179
    180static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
    181{
    182	return ptr_ring_init(&a->ring, size, gfp);
    183}
    184
    185static void __skb_array_destroy_skb(void *ptr)
    186{
    187	kfree_skb(ptr);
    188}
    189
    190static inline void skb_array_unconsume(struct skb_array *a,
    191				       struct sk_buff **skbs, int n)
    192{
    193	ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
    194}
    195
    196static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
    197{
    198	return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
    199}
    200
    201static inline int skb_array_resize_multiple(struct skb_array **rings,
    202					    int nrings, unsigned int size,
    203					    gfp_t gfp)
    204{
    205	BUILD_BUG_ON(offsetof(struct skb_array, ring));
    206	return ptr_ring_resize_multiple((struct ptr_ring **)rings,
    207					nrings, size, gfp,
    208					__skb_array_destroy_skb);
    209}
    210
    211static inline void skb_array_cleanup(struct skb_array *a)
    212{
    213	ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
    214}
    215
    216#endif /* _LINUX_SKB_ARRAY_H  */