cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gro.h (12079B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2
      3#ifndef _NET_IPV6_GRO_H
      4#define _NET_IPV6_GRO_H
      5
      6#include <linux/indirect_call_wrapper.h>
      7#include <linux/ip.h>
      8#include <linux/ipv6.h>
      9#include <net/ip6_checksum.h>
     10#include <linux/skbuff.h>
     11#include <net/udp.h>
     12
     13struct napi_gro_cb {
     14	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
     15	void	*frag0;
     16
     17	/* Length of frag0. */
     18	unsigned int frag0_len;
     19
     20	/* This indicates where we are processing relative to skb->data. */
     21	int	data_offset;
     22
     23	/* This is non-zero if the packet cannot be merged with the new skb. */
     24	u16	flush;
     25
     26	/* Save the IP ID here and check when we get to the transport layer */
     27	u16	flush_id;
     28
     29	/* Number of segments aggregated. */
     30	u16	count;
     31
     32	/* Used in ipv6_gro_receive() and foo-over-udp */
     33	u16	proto;
     34
     35	/* jiffies when first packet was created/queued */
     36	unsigned long age;
     37
     38/* Used in napi_gro_cb::free */
     39#define NAPI_GRO_FREE             1
     40#define NAPI_GRO_FREE_STOLEN_HEAD 2
     41	/* portion of the cb set to zero at every gro iteration */
     42	struct_group(zeroed,
     43
     44		/* Start offset for remote checksum offload */
     45		u16	gro_remcsum_start;
     46
     47		/* This is non-zero if the packet may be of the same flow. */
     48		u8	same_flow:1;
     49
     50		/* Used in tunnel GRO receive */
     51		u8	encap_mark:1;
     52
     53		/* GRO checksum is valid */
     54		u8	csum_valid:1;
     55
     56		/* Number of checksums via CHECKSUM_UNNECESSARY */
     57		u8	csum_cnt:3;
     58
     59		/* Free the skb? */
     60		u8	free:2;
     61
     62		/* Used in foo-over-udp, set in udp[46]_gro_receive */
     63		u8	is_ipv6:1;
     64
     65		/* Used in GRE, set in fou/gue_gro_receive */
     66		u8	is_fou:1;
     67
     68		/* Used to determine if flush_id can be ignored */
     69		u8	is_atomic:1;
     70
     71		/* Number of gro_receive callbacks this packet already went through */
     72		u8 recursion_counter:4;
     73
     74		/* GRO is done by frag_list pointer chaining. */
     75		u8	is_flist:1;
     76	);
     77
     78	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
     79	__wsum	csum;
     80
     81	/* used in skb_gro_receive() slow path */
     82	struct sk_buff *last;
     83};
     84
     85#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
     86
     87#define GRO_RECURSION_LIMIT 15
     88static inline int gro_recursion_inc_test(struct sk_buff *skb)
     89{
     90	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
     91}
     92
     93typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
     94static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
     95					       struct list_head *head,
     96					       struct sk_buff *skb)
     97{
     98	if (unlikely(gro_recursion_inc_test(skb))) {
     99		NAPI_GRO_CB(skb)->flush |= 1;
    100		return NULL;
    101	}
    102
    103	return cb(head, skb);
    104}
    105
    106typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
    107					    struct sk_buff *);
    108static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
    109						  struct sock *sk,
    110						  struct list_head *head,
    111						  struct sk_buff *skb)
    112{
    113	if (unlikely(gro_recursion_inc_test(skb))) {
    114		NAPI_GRO_CB(skb)->flush |= 1;
    115		return NULL;
    116	}
    117
    118	return cb(sk, head, skb);
    119}
    120
    121static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
    122{
    123	return NAPI_GRO_CB(skb)->data_offset;
    124}
    125
    126static inline unsigned int skb_gro_len(const struct sk_buff *skb)
    127{
    128	return skb->len - NAPI_GRO_CB(skb)->data_offset;
    129}
    130
    131static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
    132{
    133	NAPI_GRO_CB(skb)->data_offset += len;
    134}
    135
    136static inline void *skb_gro_header_fast(struct sk_buff *skb,
    137					unsigned int offset)
    138{
    139	return NAPI_GRO_CB(skb)->frag0 + offset;
    140}
    141
    142static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
    143{
    144	return NAPI_GRO_CB(skb)->frag0_len < hlen;
    145}
    146
    147static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
    148{
    149	NAPI_GRO_CB(skb)->frag0 = NULL;
    150	NAPI_GRO_CB(skb)->frag0_len = 0;
    151}
    152
    153static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
    154					unsigned int offset)
    155{
    156	if (!pskb_may_pull(skb, hlen))
    157		return NULL;
    158
    159	skb_gro_frag0_invalidate(skb);
    160	return skb->data + offset;
    161}
    162
    163static inline void *skb_gro_network_header(struct sk_buff *skb)
    164{
    165	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
    166	       skb_network_offset(skb);
    167}
    168
    169static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
    170{
    171	const struct iphdr *iph = skb_gro_network_header(skb);
    172
    173	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
    174				  skb_gro_len(skb), proto, 0);
    175}
    176
    177static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
    178					const void *start, unsigned int len)
    179{
    180	if (NAPI_GRO_CB(skb)->csum_valid)
    181		NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
    182						wsum_negate(NAPI_GRO_CB(skb)->csum)));
    183}
    184
    185/* GRO checksum functions. These are logical equivalents of the normal
    186 * checksum functions (in skbuff.h) except that they operate on the GRO
    187 * offsets and fields in sk_buff.
    188 */
    189
    190__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
    191
    192static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
    193{
    194	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
    195}
    196
    197static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
    198						      bool zero_okay,
    199						      __sum16 check)
    200{
    201	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
    202		skb_checksum_start_offset(skb) <
    203		 skb_gro_offset(skb)) &&
    204		!skb_at_gro_remcsum_start(skb) &&
    205		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
    206		(!zero_okay || check));
    207}
    208
    209static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
    210							   __wsum psum)
    211{
    212	if (NAPI_GRO_CB(skb)->csum_valid &&
    213	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
    214		return 0;
    215
    216	NAPI_GRO_CB(skb)->csum = psum;
    217
    218	return __skb_gro_checksum_complete(skb);
    219}
    220
    221static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
    222{
    223	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
    224		/* Consume a checksum from CHECKSUM_UNNECESSARY */
    225		NAPI_GRO_CB(skb)->csum_cnt--;
    226	} else {
    227		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
    228		 * verified a new top level checksum or an encapsulated one
    229		 * during GRO. This saves work if we fallback to normal path.
    230		 */
    231		__skb_incr_checksum_unnecessary(skb);
    232	}
    233}
    234
    235#define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
    236				    compute_pseudo)			\
    237({									\
    238	__sum16 __ret = 0;						\
    239	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
    240		__ret = __skb_gro_checksum_validate_complete(skb,	\
    241				compute_pseudo(skb, proto));		\
    242	if (!__ret)							\
    243		skb_gro_incr_csum_unnecessary(skb);			\
    244	__ret;								\
    245})
    246
    247#define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
    248	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
    249
    250#define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
    251					     compute_pseudo)		\
    252	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
    253
    254#define skb_gro_checksum_simple_validate(skb)				\
    255	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
    256
    257static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
    258{
    259	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
    260		!NAPI_GRO_CB(skb)->csum_valid);
    261}
    262
    263static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
    264					      __wsum pseudo)
    265{
    266	NAPI_GRO_CB(skb)->csum = ~pseudo;
    267	NAPI_GRO_CB(skb)->csum_valid = 1;
    268}
    269
    270#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)	\
    271do {									\
    272	if (__skb_gro_checksum_convert_check(skb))			\
    273		__skb_gro_checksum_convert(skb, 			\
    274					   compute_pseudo(skb, proto));	\
    275} while (0)
    276
    277struct gro_remcsum {
    278	int offset;
    279	__wsum delta;
    280};
    281
    282static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
    283{
    284	grc->offset = 0;
    285	grc->delta = 0;
    286}
    287
    288static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
    289					    unsigned int off, size_t hdrlen,
    290					    int start, int offset,
    291					    struct gro_remcsum *grc,
    292					    bool nopartial)
    293{
    294	__wsum delta;
    295	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
    296
    297	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
    298
    299	if (!nopartial) {
    300		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
    301		return ptr;
    302	}
    303
    304	ptr = skb_gro_header_fast(skb, off);
    305	if (skb_gro_header_hard(skb, off + plen)) {
    306		ptr = skb_gro_header_slow(skb, off + plen, off);
    307		if (!ptr)
    308			return NULL;
    309	}
    310
    311	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
    312			       start, offset);
    313
    314	/* Adjust skb->csum since we changed the packet */
    315	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
    316
    317	grc->offset = off + hdrlen + offset;
    318	grc->delta = delta;
    319
    320	return ptr;
    321}
    322
    323static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
    324					   struct gro_remcsum *grc)
    325{
    326	void *ptr;
    327	size_t plen = grc->offset + sizeof(u16);
    328
    329	if (!grc->delta)
    330		return;
    331
    332	ptr = skb_gro_header_fast(skb, grc->offset);
    333	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
    334		ptr = skb_gro_header_slow(skb, plen, grc->offset);
    335		if (!ptr)
    336			return;
    337	}
    338
    339	remcsum_unadjust((__sum16 *)ptr, grc->delta);
    340}
    341
    342#ifdef CONFIG_XFRM_OFFLOAD
    343static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
    344{
    345	if (PTR_ERR(pp) != -EINPROGRESS)
    346		NAPI_GRO_CB(skb)->flush |= flush;
    347}
    348static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
    349					       struct sk_buff *pp,
    350					       int flush,
    351					       struct gro_remcsum *grc)
    352{
    353	if (PTR_ERR(pp) != -EINPROGRESS) {
    354		NAPI_GRO_CB(skb)->flush |= flush;
    355		skb_gro_remcsum_cleanup(skb, grc);
    356		skb->remcsum_offload = 0;
    357	}
    358}
    359#else
    360static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
    361{
    362	NAPI_GRO_CB(skb)->flush |= flush;
    363}
    364static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
    365					       struct sk_buff *pp,
    366					       int flush,
    367					       struct gro_remcsum *grc)
    368{
    369	NAPI_GRO_CB(skb)->flush |= flush;
    370	skb_gro_remcsum_cleanup(skb, grc);
    371	skb->remcsum_offload = 0;
    372}
    373#endif
    374
    375INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
    376							   struct sk_buff *));
    377INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
    378INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
    379							   struct sk_buff *));
    380INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
    381
    382INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
    383							   struct sk_buff *));
    384INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
    385
    386INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
    387							   struct sk_buff *));
    388INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
    389
    390#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb)	\
    391({								\
    392	unlikely(gro_recursion_inc_test(skb)) ?			\
    393		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
    394		INDIRECT_CALL_INET(cb, f2, f1, head, skb);	\
    395})
    396
    397struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
    398				struct udphdr *uh, struct sock *sk);
    399int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
    400
    401static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
    402{
    403	struct udphdr *uh;
    404	unsigned int hlen, off;
    405
    406	off  = skb_gro_offset(skb);
    407	hlen = off + sizeof(*uh);
    408	uh   = skb_gro_header_fast(skb, off);
    409	if (skb_gro_header_hard(skb, hlen))
    410		uh = skb_gro_header_slow(skb, hlen, off);
    411
    412	return uh;
    413}
    414
    415static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
    416{
    417	const struct ipv6hdr *iph = skb_gro_network_header(skb);
    418
    419	return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
    420					    skb_gro_len(skb), proto, 0));
    421}
    422
    423int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
    424
    425/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
    426static inline void gro_normal_list(struct napi_struct *napi)
    427{
    428	if (!napi->rx_count)
    429		return;
    430	netif_receive_skb_list_internal(&napi->rx_list);
    431	INIT_LIST_HEAD(&napi->rx_list);
    432	napi->rx_count = 0;
    433}
    434
    435/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
    436 * pass the whole batch up to the stack.
    437 */
    438static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
    439{
    440	list_add_tail(&skb->list, &napi->rx_list);
    441	napi->rx_count += segs;
    442	if (napi->rx_count >= gro_normal_batch)
    443		gro_normal_list(napi);
    444}
    445
    446
    447#endif /* _NET_IPV6_GRO_H */