cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

gre_offload.c (7326B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *	IPV4 GSO/GRO offload support
      4 *	Linux INET implementation
      5 *
      6 *	GRE GSO support
      7 */
      8
      9#include <linux/skbuff.h>
     10#include <linux/init.h>
     11#include <net/protocol.h>
     12#include <net/gre.h>
     13#include <net/gro.h>
     14
     15static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
     16				       netdev_features_t features)
     17{
     18	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
     19	bool need_csum, offload_csum, gso_partial, need_ipsec;
     20	struct sk_buff *segs = ERR_PTR(-EINVAL);
     21	u16 mac_offset = skb->mac_header;
     22	__be16 protocol = skb->protocol;
     23	u16 mac_len = skb->mac_len;
     24	int gre_offset, outer_hlen;
     25
     26	if (!skb->encapsulation)
     27		goto out;
     28
     29	if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
     30		goto out;
     31
     32	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
     33		goto out;
     34
     35	/* setup inner skb. */
     36	skb->encapsulation = 0;
     37	SKB_GSO_CB(skb)->encap_level = 0;
     38	__skb_pull(skb, tnl_hlen);
     39	skb_reset_mac_header(skb);
     40	skb_set_network_header(skb, skb_inner_network_offset(skb));
     41	skb->mac_len = skb_inner_network_offset(skb);
     42	skb->protocol = skb->inner_protocol;
     43
     44	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
     45	skb->encap_hdr_csum = need_csum;
     46
     47	features &= skb->dev->hw_enc_features;
     48	if (need_csum)
     49		features &= ~NETIF_F_SCTP_CRC;
     50
     51	need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
     52	/* Try to offload checksum if possible */
     53	offload_csum = !!(need_csum && !need_ipsec &&
     54			  (skb->dev->features & NETIF_F_HW_CSUM));
     55
     56	/* segment inner packet. */
     57	segs = skb_mac_gso_segment(skb, features);
     58	if (IS_ERR_OR_NULL(segs)) {
     59		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
     60				     mac_len);
     61		goto out;
     62	}
     63
     64	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
     65
     66	outer_hlen = skb_tnl_header_len(skb);
     67	gre_offset = outer_hlen - tnl_hlen;
     68	skb = segs;
     69	do {
     70		struct gre_base_hdr *greh;
     71		__sum16 *pcsum;
     72
     73		/* Set up inner headers if we are offloading inner checksum */
     74		if (skb->ip_summed == CHECKSUM_PARTIAL) {
     75			skb_reset_inner_headers(skb);
     76			skb->encapsulation = 1;
     77		}
     78
     79		skb->mac_len = mac_len;
     80		skb->protocol = protocol;
     81
     82		__skb_push(skb, outer_hlen);
     83		skb_reset_mac_header(skb);
     84		skb_set_network_header(skb, mac_len);
     85		skb_set_transport_header(skb, gre_offset);
     86
     87		if (!need_csum)
     88			continue;
     89
     90		greh = (struct gre_base_hdr *)skb_transport_header(skb);
     91		pcsum = (__sum16 *)(greh + 1);
     92
     93		if (gso_partial && skb_is_gso(skb)) {
     94			unsigned int partial_adj;
     95
     96			/* Adjust checksum to account for the fact that
     97			 * the partial checksum is based on actual size
     98			 * whereas headers should be based on MSS size.
     99			 */
    100			partial_adj = skb->len + skb_headroom(skb) -
    101				      SKB_GSO_CB(skb)->data_offset -
    102				      skb_shinfo(skb)->gso_size;
    103			*pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
    104		} else {
    105			*pcsum = 0;
    106		}
    107
    108		*(pcsum + 1) = 0;
    109		if (skb->encapsulation || !offload_csum) {
    110			*pcsum = gso_make_checksum(skb, 0);
    111		} else {
    112			skb->ip_summed = CHECKSUM_PARTIAL;
    113			skb->csum_start = skb_transport_header(skb) - skb->head;
    114			skb->csum_offset = sizeof(*greh);
    115		}
    116	} while ((skb = skb->next));
    117out:
    118	return segs;
    119}
    120
    121static struct sk_buff *gre_gro_receive(struct list_head *head,
    122				       struct sk_buff *skb)
    123{
    124	struct sk_buff *pp = NULL;
    125	struct sk_buff *p;
    126	const struct gre_base_hdr *greh;
    127	unsigned int hlen, grehlen;
    128	unsigned int off;
    129	int flush = 1;
    130	struct packet_offload *ptype;
    131	__be16 type;
    132
    133	if (NAPI_GRO_CB(skb)->encap_mark)
    134		goto out;
    135
    136	NAPI_GRO_CB(skb)->encap_mark = 1;
    137
    138	off = skb_gro_offset(skb);
    139	hlen = off + sizeof(*greh);
    140	greh = skb_gro_header_fast(skb, off);
    141	if (skb_gro_header_hard(skb, hlen)) {
    142		greh = skb_gro_header_slow(skb, hlen, off);
    143		if (unlikely(!greh))
    144			goto out;
    145	}
    146
    147	/* Only support version 0 and K (key), C (csum) flags. Note that
    148	 * although the support for the S (seq#) flag can be added easily
    149	 * for GRO, this is problematic for GSO hence can not be enabled
    150	 * here because a GRO pkt may end up in the forwarding path, thus
    151	 * requiring GSO support to break it up correctly.
    152	 */
    153	if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
    154		goto out;
    155
    156	/* We can only support GRE_CSUM if we can track the location of
    157	 * the GRE header.  In the case of FOU/GUE we cannot because the
    158	 * outer UDP header displaces the GRE header leaving us in a state
    159	 * of limbo.
    160	 */
    161	if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
    162		goto out;
    163
    164	type = greh->protocol;
    165
    166	ptype = gro_find_receive_by_type(type);
    167	if (!ptype)
    168		goto out;
    169
    170	grehlen = GRE_HEADER_SECTION;
    171
    172	if (greh->flags & GRE_KEY)
    173		grehlen += GRE_HEADER_SECTION;
    174
    175	if (greh->flags & GRE_CSUM)
    176		grehlen += GRE_HEADER_SECTION;
    177
    178	hlen = off + grehlen;
    179	if (skb_gro_header_hard(skb, hlen)) {
    180		greh = skb_gro_header_slow(skb, hlen, off);
    181		if (unlikely(!greh))
    182			goto out;
    183	}
    184
    185	/* Don't bother verifying checksum if we're going to flush anyway. */
    186	if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
    187		if (skb_gro_checksum_simple_validate(skb))
    188			goto out;
    189
    190		skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
    191					     null_compute_pseudo);
    192	}
    193
    194	list_for_each_entry(p, head, list) {
    195		const struct gre_base_hdr *greh2;
    196
    197		if (!NAPI_GRO_CB(p)->same_flow)
    198			continue;
    199
    200		/* The following checks are needed to ensure only pkts
    201		 * from the same tunnel are considered for aggregation.
    202		 * The criteria for "the same tunnel" includes:
    203		 * 1) same version (we only support version 0 here)
    204		 * 2) same protocol (we only support ETH_P_IP for now)
    205		 * 3) same set of flags
    206		 * 4) same key if the key field is present.
    207		 */
    208		greh2 = (struct gre_base_hdr *)(p->data + off);
    209
    210		if (greh2->flags != greh->flags ||
    211		    greh2->protocol != greh->protocol) {
    212			NAPI_GRO_CB(p)->same_flow = 0;
    213			continue;
    214		}
    215		if (greh->flags & GRE_KEY) {
    216			/* compare keys */
    217			if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
    218				NAPI_GRO_CB(p)->same_flow = 0;
    219				continue;
    220			}
    221		}
    222	}
    223
    224	skb_gro_pull(skb, grehlen);
    225
    226	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
    227	skb_gro_postpull_rcsum(skb, greh, grehlen);
    228
    229	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
    230	flush = 0;
    231
    232out:
    233	skb_gro_flush_final(skb, pp, flush);
    234
    235	return pp;
    236}
    237
    238static int gre_gro_complete(struct sk_buff *skb, int nhoff)
    239{
    240	struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
    241	struct packet_offload *ptype;
    242	unsigned int grehlen = sizeof(*greh);
    243	int err = -ENOENT;
    244	__be16 type;
    245
    246	skb->encapsulation = 1;
    247	skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
    248
    249	type = greh->protocol;
    250	if (greh->flags & GRE_KEY)
    251		grehlen += GRE_HEADER_SECTION;
    252
    253	if (greh->flags & GRE_CSUM)
    254		grehlen += GRE_HEADER_SECTION;
    255
    256	ptype = gro_find_complete_by_type(type);
    257	if (ptype)
    258		err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
    259
    260	skb_set_inner_mac_header(skb, nhoff + grehlen);
    261
    262	return err;
    263}
    264
    265static const struct net_offload gre_offload = {
    266	.callbacks = {
    267		.gso_segment = gre_gso_segment,
    268		.gro_receive = gre_gro_receive,
    269		.gro_complete = gre_gro_complete,
    270	},
    271};
    272
    273static int __init gre_offload_init(void)
    274{
    275	int err;
    276
    277	err = inet_add_offload(&gre_offload, IPPROTO_GRE);
    278#if IS_ENABLED(CONFIG_IPV6)
    279	if (err)
    280		return err;
    281
    282	err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
    283	if (err)
    284		inet_del_offload(&gre_offload, IPPROTO_GRE);
    285#endif
    286
    287	return err;
    288}
    289device_initcall(gre_offload_init);