cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

esp4_offload.c (8973B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * IPV4 GSO/GRO offload support
      4 * Linux INET implementation
      5 *
      6 * Copyright (C) 2016 secunet Security Networks AG
      7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
      8 *
      9 * ESP GRO support
     10 */
     11
     12#include <linux/skbuff.h>
     13#include <linux/init.h>
     14#include <net/protocol.h>
     15#include <crypto/aead.h>
     16#include <crypto/authenc.h>
     17#include <linux/err.h>
     18#include <linux/module.h>
     19#include <net/gro.h>
     20#include <net/ip.h>
     21#include <net/xfrm.h>
     22#include <net/esp.h>
     23#include <linux/scatterlist.h>
     24#include <linux/kernel.h>
     25#include <linux/slab.h>
     26#include <linux/spinlock.h>
     27#include <net/udp.h>
     28
     29static struct sk_buff *esp4_gro_receive(struct list_head *head,
     30					struct sk_buff *skb)
     31{
     32	int offset = skb_gro_offset(skb);
     33	struct xfrm_offload *xo;
     34	struct xfrm_state *x;
     35	__be32 seq;
     36	__be32 spi;
     37
     38	if (!pskb_pull(skb, offset))
     39		return NULL;
     40
     41	if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
     42		goto out;
     43
     44	xo = xfrm_offload(skb);
     45	if (!xo || !(xo->flags & CRYPTO_DONE)) {
     46		struct sec_path *sp = secpath_set(skb);
     47
     48		if (!sp)
     49			goto out;
     50
     51		if (sp->len == XFRM_MAX_DEPTH)
     52			goto out_reset;
     53
     54		x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
     55				      (xfrm_address_t *)&ip_hdr(skb)->daddr,
     56				      spi, IPPROTO_ESP, AF_INET);
     57		if (!x)
     58			goto out_reset;
     59
     60		skb->mark = xfrm_smark_get(skb->mark, x);
     61
     62		sp->xvec[sp->len++] = x;
     63		sp->olen++;
     64
     65		xo = xfrm_offload(skb);
     66		if (!xo)
     67			goto out_reset;
     68	}
     69
     70	xo->flags |= XFRM_GRO;
     71
     72	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
     73	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
     74	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
     75	XFRM_SPI_SKB_CB(skb)->seq = seq;
     76
     77	/* We don't need to handle errors from xfrm_input, it does all
     78	 * the error handling and frees the resources on error. */
     79	xfrm_input(skb, IPPROTO_ESP, spi, -2);
     80
     81	return ERR_PTR(-EINPROGRESS);
     82out_reset:
     83	secpath_reset(skb);
     84out:
     85	skb_push(skb, offset);
     86	NAPI_GRO_CB(skb)->same_flow = 0;
     87	NAPI_GRO_CB(skb)->flush = 1;
     88
     89	return NULL;
     90}
     91
     92static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
     93{
     94	struct ip_esp_hdr *esph;
     95	struct iphdr *iph = ip_hdr(skb);
     96	struct xfrm_offload *xo = xfrm_offload(skb);
     97	int proto = iph->protocol;
     98
     99	skb_push(skb, -skb_network_offset(skb));
    100	esph = ip_esp_hdr(skb);
    101	*skb_mac_header(skb) = IPPROTO_ESP;
    102
    103	esph->spi = x->id.spi;
    104	esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
    105
    106	xo->proto = proto;
    107}
    108
    109static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
    110						struct sk_buff *skb,
    111						netdev_features_t features)
    112{
    113	return skb_eth_gso_segment(skb, features, htons(ETH_P_IP));
    114}
    115
    116static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
    117						   struct sk_buff *skb,
    118						   netdev_features_t features)
    119{
    120	const struct net_offload *ops;
    121	struct sk_buff *segs = ERR_PTR(-EINVAL);
    122	struct xfrm_offload *xo = xfrm_offload(skb);
    123
    124	skb->transport_header += x->props.header_len;
    125	ops = rcu_dereference(inet_offloads[xo->proto]);
    126	if (likely(ops && ops->callbacks.gso_segment))
    127		segs = ops->callbacks.gso_segment(skb, features);
    128
    129	return segs;
    130}
    131
    132static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
    133					      struct sk_buff *skb,
    134					      netdev_features_t features)
    135{
    136	struct xfrm_offload *xo = xfrm_offload(skb);
    137	struct sk_buff *segs = ERR_PTR(-EINVAL);
    138	const struct net_offload *ops;
    139	u8 proto = xo->proto;
    140
    141	skb->transport_header += x->props.header_len;
    142
    143	if (x->sel.family != AF_INET6) {
    144		if (proto == IPPROTO_BEETPH) {
    145			struct ip_beet_phdr *ph =
    146				(struct ip_beet_phdr *)skb->data;
    147
    148			skb->transport_header += ph->hdrlen * 8;
    149			proto = ph->nexthdr;
    150		} else {
    151			skb->transport_header -= IPV4_BEET_PHMAXLEN;
    152		}
    153	} else {
    154		__be16 frag;
    155
    156		skb->transport_header +=
    157			ipv6_skip_exthdr(skb, 0, &proto, &frag);
    158		if (proto == IPPROTO_TCP)
    159			skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
    160	}
    161
    162	if (proto == IPPROTO_IPV6)
    163		skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
    164
    165	__skb_pull(skb, skb_transport_offset(skb));
    166	ops = rcu_dereference(inet_offloads[proto]);
    167	if (likely(ops && ops->callbacks.gso_segment))
    168		segs = ops->callbacks.gso_segment(skb, features);
    169
    170	return segs;
    171}
    172
    173static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
    174						    struct sk_buff *skb,
    175						    netdev_features_t features)
    176{
    177	switch (x->outer_mode.encap) {
    178	case XFRM_MODE_TUNNEL:
    179		return xfrm4_tunnel_gso_segment(x, skb, features);
    180	case XFRM_MODE_TRANSPORT:
    181		return xfrm4_transport_gso_segment(x, skb, features);
    182	case XFRM_MODE_BEET:
    183		return xfrm4_beet_gso_segment(x, skb, features);
    184	}
    185
    186	return ERR_PTR(-EOPNOTSUPP);
    187}
    188
    189static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
    190				        netdev_features_t features)
    191{
    192	struct xfrm_state *x;
    193	struct ip_esp_hdr *esph;
    194	struct crypto_aead *aead;
    195	netdev_features_t esp_features = features;
    196	struct xfrm_offload *xo = xfrm_offload(skb);
    197	struct sec_path *sp;
    198
    199	if (!xo)
    200		return ERR_PTR(-EINVAL);
    201
    202	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
    203		return ERR_PTR(-EINVAL);
    204
    205	sp = skb_sec_path(skb);
    206	x = sp->xvec[sp->len - 1];
    207	aead = x->data;
    208	esph = ip_esp_hdr(skb);
    209
    210	if (esph->spi != x->id.spi)
    211		return ERR_PTR(-EINVAL);
    212
    213	if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
    214		return ERR_PTR(-EINVAL);
    215
    216	__skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
    217
    218	skb->encap_hdr_csum = 1;
    219
    220	if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
    221	     !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
    222		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
    223					    NETIF_F_SCTP_CRC);
    224	else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
    225		 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
    226		esp_features = features & ~(NETIF_F_CSUM_MASK |
    227					    NETIF_F_SCTP_CRC);
    228
    229	xo->flags |= XFRM_GSO_SEGMENT;
    230
    231	return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
    232}
    233
    234static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
    235{
    236	struct crypto_aead *aead = x->data;
    237	struct xfrm_offload *xo = xfrm_offload(skb);
    238
    239	if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
    240		return -EINVAL;
    241
    242	if (!(xo->flags & CRYPTO_DONE))
    243		skb->ip_summed = CHECKSUM_NONE;
    244
    245	return esp_input_done2(skb, 0);
    246}
    247
    248static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
    249{
    250	int err;
    251	int alen;
    252	int blksize;
    253	struct xfrm_offload *xo;
    254	struct ip_esp_hdr *esph;
    255	struct crypto_aead *aead;
    256	struct esp_info esp;
    257	bool hw_offload = true;
    258	__u32 seq;
    259
    260	esp.inplace = true;
    261
    262	xo = xfrm_offload(skb);
    263
    264	if (!xo)
    265		return -EINVAL;
    266
    267	if ((!(features & NETIF_F_HW_ESP) &&
    268	     !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
    269	    x->xso.dev != skb->dev) {
    270		xo->flags |= CRYPTO_FALLBACK;
    271		hw_offload = false;
    272	}
    273
    274	esp.proto = xo->proto;
    275
    276	/* skb is pure payload to encrypt */
    277
    278	aead = x->data;
    279	alen = crypto_aead_authsize(aead);
    280
    281	esp.tfclen = 0;
    282	/* XXX: Add support for tfc padding here. */
    283
    284	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
    285	esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
    286	esp.plen = esp.clen - skb->len - esp.tfclen;
    287	esp.tailen = esp.tfclen + esp.plen + alen;
    288
    289	esp.esph = ip_esp_hdr(skb);
    290
    291
    292	if (!hw_offload || !skb_is_gso(skb)) {
    293		esp.nfrags = esp_output_head(x, skb, &esp);
    294		if (esp.nfrags < 0)
    295			return esp.nfrags;
    296	}
    297
    298	seq = xo->seq.low;
    299
    300	esph = esp.esph;
    301	esph->spi = x->id.spi;
    302
    303	skb_push(skb, -skb_network_offset(skb));
    304
    305	if (xo->flags & XFRM_GSO_SEGMENT) {
    306		esph->seq_no = htonl(seq);
    307
    308		if (!skb_is_gso(skb))
    309			xo->seq.low++;
    310		else
    311			xo->seq.low += skb_shinfo(skb)->gso_segs;
    312	}
    313
    314	esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
    315
    316	ip_hdr(skb)->tot_len = htons(skb->len);
    317	ip_send_check(ip_hdr(skb));
    318
    319	if (hw_offload) {
    320		if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
    321			return -ENOMEM;
    322
    323		xo = xfrm_offload(skb);
    324		if (!xo)
    325			return -EINVAL;
    326
    327		xo->flags |= XFRM_XMIT;
    328		return 0;
    329	}
    330
    331	err = esp_output_tail(x, skb, &esp);
    332	if (err)
    333		return err;
    334
    335	secpath_reset(skb);
    336
    337	return 0;
    338}
    339
    340static const struct net_offload esp4_offload = {
    341	.callbacks = {
    342		.gro_receive = esp4_gro_receive,
    343		.gso_segment = esp4_gso_segment,
    344	},
    345};
    346
    347static const struct xfrm_type_offload esp_type_offload = {
    348	.owner		= THIS_MODULE,
    349	.proto	     	= IPPROTO_ESP,
    350	.input_tail	= esp_input_tail,
    351	.xmit		= esp_xmit,
    352	.encap		= esp4_gso_encap,
    353};
    354
    355static int __init esp4_offload_init(void)
    356{
    357	if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
    358		pr_info("%s: can't add xfrm type offload\n", __func__);
    359		return -EAGAIN;
    360	}
    361
    362	return inet_add_offload(&esp4_offload, IPPROTO_ESP);
    363}
    364
    365static void __exit esp4_offload_exit(void)
    366{
    367	xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
    368	inet_del_offload(&esp4_offload, IPPROTO_ESP);
    369}
    370
    371module_init(esp4_offload_init);
    372module_exit(esp4_offload_exit);
    373MODULE_LICENSE("GPL");
    374MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
    375MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
    376MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");