esp6.c (29436B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C)2002 USAGI/WIDE Project 4 * 5 * Authors 6 * 7 * Mitsuru KANDA @USAGI : IPv6 Support 8 * Kazunori MIYAZAWA @USAGI : 9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 10 * 11 * This file is derived from net/ipv4/esp.c 12 */ 13 14#define pr_fmt(fmt) "IPv6: " fmt 15 16#include <crypto/aead.h> 17#include <crypto/authenc.h> 18#include <linux/err.h> 19#include <linux/module.h> 20#include <net/ip.h> 21#include <net/xfrm.h> 22#include <net/esp.h> 23#include <linux/scatterlist.h> 24#include <linux/kernel.h> 25#include <linux/pfkeyv2.h> 26#include <linux/random.h> 27#include <linux/slab.h> 28#include <linux/spinlock.h> 29#include <net/ip6_checksum.h> 30#include <net/ip6_route.h> 31#include <net/icmp.h> 32#include <net/ipv6.h> 33#include <net/protocol.h> 34#include <net/udp.h> 35#include <linux/icmpv6.h> 36#include <net/tcp.h> 37#include <net/espintcp.h> 38#include <net/inet6_hashtables.h> 39 40#include <linux/highmem.h> 41 42struct esp_skb_cb { 43 struct xfrm_skb_cb xfrm; 44 void *tmp; 45}; 46 47struct esp_output_extra { 48 __be32 seqhi; 49 u32 esphoff; 50}; 51 52#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) 53 54/* 55 * Allocate an AEAD request structure with extra space for SG and IV. 56 * 57 * For alignment considerations the upper 32 bits of the sequence number are 58 * placed at the front, if present. Followed by the IV, the request and finally 59 * the SG list. 60 * 61 * TODO: Use spare space in skb for this where possible. 62 */ 63static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) 64{ 65 unsigned int len; 66 67 len = seqihlen; 68 69 len += crypto_aead_ivsize(aead); 70 71 if (len) { 72 len += crypto_aead_alignmask(aead) & 73 ~(crypto_tfm_ctx_alignment() - 1); 74 len = ALIGN(len, crypto_tfm_ctx_alignment()); 75 } 76 77 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); 78 len = ALIGN(len, __alignof__(struct scatterlist)); 79 80 len += sizeof(struct scatterlist) * nfrags; 81 82 return kmalloc(len, GFP_ATOMIC); 83} 84 85static inline void *esp_tmp_extra(void *tmp) 86{ 87 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); 88} 89 90static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) 91{ 92 return crypto_aead_ivsize(aead) ? 93 PTR_ALIGN((u8 *)tmp + seqhilen, 94 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; 95} 96 97static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) 98{ 99 struct aead_request *req; 100 101 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), 102 crypto_tfm_ctx_alignment()); 103 aead_request_set_tfm(req, aead); 104 return req; 105} 106 107static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, 108 struct aead_request *req) 109{ 110 return (void *)ALIGN((unsigned long)(req + 1) + 111 crypto_aead_reqsize(aead), 112 __alignof__(struct scatterlist)); 113} 114 115static void esp_ssg_unref(struct xfrm_state *x, void *tmp) 116{ 117 struct crypto_aead *aead = x->data; 118 int extralen = 0; 119 u8 *iv; 120 struct aead_request *req; 121 struct scatterlist *sg; 122 123 if (x->props.flags & XFRM_STATE_ESN) 124 extralen += sizeof(struct esp_output_extra); 125 126 iv = esp_tmp_iv(aead, tmp, extralen); 127 req = esp_tmp_req(aead, iv); 128 129 /* Unref skb_frag_pages in the src scatterlist if necessary. 130 * Skip the first sg which comes from skb->data. 131 */ 132 if (req->src != req->dst) 133 for (sg = sg_next(req->src); sg; sg = sg_next(sg)) 134 put_page(sg_page(sg)); 135} 136 137#ifdef CONFIG_INET6_ESPINTCP 138struct esp_tcp_sk { 139 struct sock *sk; 140 struct rcu_head rcu; 141}; 142 143static void esp_free_tcp_sk(struct rcu_head *head) 144{ 145 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); 146 147 sock_put(esk->sk); 148 kfree(esk); 149} 150 151static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) 152{ 153 struct xfrm_encap_tmpl *encap = x->encap; 154 struct esp_tcp_sk *esk; 155 __be16 sport, dport; 156 struct sock *nsk; 157 struct sock *sk; 158 159 sk = rcu_dereference(x->encap_sk); 160 if (sk && sk->sk_state == TCP_ESTABLISHED) 161 return sk; 162 163 spin_lock_bh(&x->lock); 164 sport = encap->encap_sport; 165 dport = encap->encap_dport; 166 nsk = rcu_dereference_protected(x->encap_sk, 167 lockdep_is_held(&x->lock)); 168 if (sk && sk == nsk) { 169 esk = kmalloc(sizeof(*esk), GFP_ATOMIC); 170 if (!esk) { 171 spin_unlock_bh(&x->lock); 172 return ERR_PTR(-ENOMEM); 173 } 174 RCU_INIT_POINTER(x->encap_sk, NULL); 175 esk->sk = sk; 176 call_rcu(&esk->rcu, esp_free_tcp_sk); 177 } 178 spin_unlock_bh(&x->lock); 179 180 sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6, 181 dport, &x->props.saddr.in6, ntohs(sport), 0, 0); 182 if (!sk) 183 return ERR_PTR(-ENOENT); 184 185 if (!tcp_is_ulp_esp(sk)) { 186 sock_put(sk); 187 return ERR_PTR(-EINVAL); 188 } 189 190 spin_lock_bh(&x->lock); 191 nsk = rcu_dereference_protected(x->encap_sk, 192 lockdep_is_held(&x->lock)); 193 if (encap->encap_sport != sport || 194 encap->encap_dport != dport) { 195 sock_put(sk); 196 sk = nsk ?: ERR_PTR(-EREMCHG); 197 } else if (sk == nsk) { 198 sock_put(sk); 199 } else { 200 rcu_assign_pointer(x->encap_sk, sk); 201 } 202 spin_unlock_bh(&x->lock); 203 204 return sk; 205} 206 207static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) 208{ 209 struct sock *sk; 210 int err; 211 212 rcu_read_lock(); 213 214 sk = esp6_find_tcp_sk(x); 215 err = PTR_ERR_OR_ZERO(sk); 216 if (err) 217 goto out; 218 219 bh_lock_sock(sk); 220 if (sock_owned_by_user(sk)) 221 err = espintcp_queue_out(sk, skb); 222 else 223 err = espintcp_push_skb(sk, skb); 224 bh_unlock_sock(sk); 225 226out: 227 rcu_read_unlock(); 228 return err; 229} 230 231static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk, 232 struct sk_buff *skb) 233{ 234 struct dst_entry *dst = skb_dst(skb); 235 struct xfrm_state *x = dst->xfrm; 236 237 return esp_output_tcp_finish(x, skb); 238} 239 240static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 241{ 242 int err; 243 244 local_bh_disable(); 245 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb); 246 local_bh_enable(); 247 248 /* EINPROGRESS just happens to do the right thing. It 249 * actually means that the skb has been consumed and 250 * isn't coming back. 251 */ 252 return err ?: -EINPROGRESS; 253} 254#else 255static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 256{ 257 kfree_skb(skb); 258 259 return -EOPNOTSUPP; 260} 261#endif 262 263static void esp_output_encap_csum(struct sk_buff *skb) 264{ 265 /* UDP encap with IPv6 requires a valid checksum */ 266 if (*skb_mac_header(skb) == IPPROTO_UDP) { 267 struct udphdr *uh = udp_hdr(skb); 268 struct ipv6hdr *ip6h = ipv6_hdr(skb); 269 int len = ntohs(uh->len); 270 unsigned int offset = skb_transport_offset(skb); 271 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0); 272 273 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 274 len, IPPROTO_UDP, csum); 275 if (uh->check == 0) 276 uh->check = CSUM_MANGLED_0; 277 } 278} 279 280static void esp_output_done(struct crypto_async_request *base, int err) 281{ 282 struct sk_buff *skb = base->data; 283 struct xfrm_offload *xo = xfrm_offload(skb); 284 void *tmp; 285 struct xfrm_state *x; 286 287 if (xo && (xo->flags & XFRM_DEV_RESUME)) { 288 struct sec_path *sp = skb_sec_path(skb); 289 290 x = sp->xvec[sp->len - 1]; 291 } else { 292 x = skb_dst(skb)->xfrm; 293 } 294 295 tmp = ESP_SKB_CB(skb)->tmp; 296 esp_ssg_unref(x, tmp); 297 kfree(tmp); 298 299 esp_output_encap_csum(skb); 300 301 if (xo && (xo->flags & XFRM_DEV_RESUME)) { 302 if (err) { 303 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 304 kfree_skb(skb); 305 return; 306 } 307 308 skb_push(skb, skb->data - skb_mac_header(skb)); 309 secpath_reset(skb); 310 xfrm_dev_resume(skb); 311 } else { 312 if (!err && 313 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) 314 esp_output_tail_tcp(x, skb); 315 else 316 xfrm_output_resume(skb->sk, skb, err); 317 } 318} 319 320/* Move ESP header back into place. */ 321static void esp_restore_header(struct sk_buff *skb, unsigned int offset) 322{ 323 struct ip_esp_hdr *esph = (void *)(skb->data + offset); 324 void *tmp = ESP_SKB_CB(skb)->tmp; 325 __be32 *seqhi = esp_tmp_extra(tmp); 326 327 esph->seq_no = esph->spi; 328 esph->spi = *seqhi; 329} 330 331static void esp_output_restore_header(struct sk_buff *skb) 332{ 333 void *tmp = ESP_SKB_CB(skb)->tmp; 334 struct esp_output_extra *extra = esp_tmp_extra(tmp); 335 336 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - 337 sizeof(__be32)); 338} 339 340static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, 341 struct xfrm_state *x, 342 struct ip_esp_hdr *esph, 343 struct esp_output_extra *extra) 344{ 345 /* For ESN we move the header forward by 4 bytes to 346 * accomodate the high bits. We will move it back after 347 * encryption. 348 */ 349 if ((x->props.flags & XFRM_STATE_ESN)) { 350 __u32 seqhi; 351 struct xfrm_offload *xo = xfrm_offload(skb); 352 353 if (xo) 354 seqhi = xo->seq.hi; 355 else 356 seqhi = XFRM_SKB_CB(skb)->seq.output.hi; 357 358 extra->esphoff = (unsigned char *)esph - 359 skb_transport_header(skb); 360 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); 361 extra->seqhi = esph->spi; 362 esph->seq_no = htonl(seqhi); 363 } 364 365 esph->spi = x->id.spi; 366 367 return esph; 368} 369 370static void esp_output_done_esn(struct crypto_async_request *base, int err) 371{ 372 struct sk_buff *skb = base->data; 373 374 esp_output_restore_header(skb); 375 esp_output_done(base, err); 376} 377 378static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb, 379 int encap_type, 380 struct esp_info *esp, 381 __be16 sport, 382 __be16 dport) 383{ 384 struct udphdr *uh; 385 __be32 *udpdata32; 386 unsigned int len; 387 388 len = skb->len + esp->tailen - skb_transport_offset(skb); 389 if (len > U16_MAX) 390 return ERR_PTR(-EMSGSIZE); 391 392 uh = (struct udphdr *)esp->esph; 393 uh->source = sport; 394 uh->dest = dport; 395 uh->len = htons(len); 396 uh->check = 0; 397 398 *skb_mac_header(skb) = IPPROTO_UDP; 399 400 if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) { 401 udpdata32 = (__be32 *)(uh + 1); 402 udpdata32[0] = udpdata32[1] = 0; 403 return (struct ip_esp_hdr *)(udpdata32 + 2); 404 } 405 406 return (struct ip_esp_hdr *)(uh + 1); 407} 408 409#ifdef CONFIG_INET6_ESPINTCP 410static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, 411 struct sk_buff *skb, 412 struct esp_info *esp) 413{ 414 __be16 *lenp = (void *)esp->esph; 415 struct ip_esp_hdr *esph; 416 unsigned int len; 417 struct sock *sk; 418 419 len = skb->len + esp->tailen - skb_transport_offset(skb); 420 if (len > IP_MAX_MTU) 421 return ERR_PTR(-EMSGSIZE); 422 423 rcu_read_lock(); 424 sk = esp6_find_tcp_sk(x); 425 rcu_read_unlock(); 426 427 if (IS_ERR(sk)) 428 return ERR_CAST(sk); 429 430 *lenp = htons(len); 431 esph = (struct ip_esp_hdr *)(lenp + 1); 432 433 return esph; 434} 435#else 436static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, 437 struct sk_buff *skb, 438 struct esp_info *esp) 439{ 440 return ERR_PTR(-EOPNOTSUPP); 441} 442#endif 443 444static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb, 445 struct esp_info *esp) 446{ 447 struct xfrm_encap_tmpl *encap = x->encap; 448 struct ip_esp_hdr *esph; 449 __be16 sport, dport; 450 int encap_type; 451 452 spin_lock_bh(&x->lock); 453 sport = encap->encap_sport; 454 dport = encap->encap_dport; 455 encap_type = encap->encap_type; 456 spin_unlock_bh(&x->lock); 457 458 switch (encap_type) { 459 default: 460 case UDP_ENCAP_ESPINUDP: 461 case UDP_ENCAP_ESPINUDP_NON_IKE: 462 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport); 463 break; 464 case TCP_ENCAP_ESPINTCP: 465 esph = esp6_output_tcp_encap(x, skb, esp); 466 break; 467 } 468 469 if (IS_ERR(esph)) 470 return PTR_ERR(esph); 471 472 esp->esph = esph; 473 474 return 0; 475} 476 477int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 478{ 479 u8 *tail; 480 int nfrags; 481 int esph_offset; 482 struct page *page; 483 struct sk_buff *trailer; 484 int tailen = esp->tailen; 485 486 if (x->encap) { 487 int err = esp6_output_encap(x, skb, esp); 488 489 if (err < 0) 490 return err; 491 } 492 493 if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE || 494 ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE) 495 goto cow; 496 497 if (!skb_cloned(skb)) { 498 if (tailen <= skb_tailroom(skb)) { 499 nfrags = 1; 500 trailer = skb; 501 tail = skb_tail_pointer(trailer); 502 503 goto skip_cow; 504 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) 505 && !skb_has_frag_list(skb)) { 506 int allocsize; 507 struct sock *sk = skb->sk; 508 struct page_frag *pfrag = &x->xfrag; 509 510 esp->inplace = false; 511 512 allocsize = ALIGN(tailen, L1_CACHE_BYTES); 513 514 spin_lock_bh(&x->lock); 515 516 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 517 spin_unlock_bh(&x->lock); 518 goto cow; 519 } 520 521 page = pfrag->page; 522 get_page(page); 523 524 tail = page_address(page) + pfrag->offset; 525 526 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 527 528 nfrags = skb_shinfo(skb)->nr_frags; 529 530 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, 531 tailen); 532 skb_shinfo(skb)->nr_frags = ++nfrags; 533 534 pfrag->offset = pfrag->offset + allocsize; 535 536 spin_unlock_bh(&x->lock); 537 538 nfrags++; 539 540 skb->len += tailen; 541 skb->data_len += tailen; 542 skb->truesize += tailen; 543 if (sk && sk_fullsock(sk)) 544 refcount_add(tailen, &sk->sk_wmem_alloc); 545 546 goto out; 547 } 548 } 549 550cow: 551 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); 552 553 nfrags = skb_cow_data(skb, tailen, &trailer); 554 if (nfrags < 0) 555 goto out; 556 tail = skb_tail_pointer(trailer); 557 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); 558 559skip_cow: 560 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 561 pskb_put(skb, trailer, tailen); 562 563out: 564 return nfrags; 565} 566EXPORT_SYMBOL_GPL(esp6_output_head); 567 568int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 569{ 570 u8 *iv; 571 int alen; 572 void *tmp; 573 int ivlen; 574 int assoclen; 575 int extralen; 576 struct page *page; 577 struct ip_esp_hdr *esph; 578 struct aead_request *req; 579 struct crypto_aead *aead; 580 struct scatterlist *sg, *dsg; 581 struct esp_output_extra *extra; 582 int err = -ENOMEM; 583 584 assoclen = sizeof(struct ip_esp_hdr); 585 extralen = 0; 586 587 if (x->props.flags & XFRM_STATE_ESN) { 588 extralen += sizeof(*extra); 589 assoclen += sizeof(__be32); 590 } 591 592 aead = x->data; 593 alen = crypto_aead_authsize(aead); 594 ivlen = crypto_aead_ivsize(aead); 595 596 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); 597 if (!tmp) 598 goto error; 599 600 extra = esp_tmp_extra(tmp); 601 iv = esp_tmp_iv(aead, tmp, extralen); 602 req = esp_tmp_req(aead, iv); 603 sg = esp_req_sg(aead, req); 604 605 if (esp->inplace) 606 dsg = sg; 607 else 608 dsg = &sg[esp->nfrags]; 609 610 esph = esp_output_set_esn(skb, x, esp->esph, extra); 611 esp->esph = esph; 612 613 sg_init_table(sg, esp->nfrags); 614 err = skb_to_sgvec(skb, sg, 615 (unsigned char *)esph - skb->data, 616 assoclen + ivlen + esp->clen + alen); 617 if (unlikely(err < 0)) 618 goto error_free; 619 620 if (!esp->inplace) { 621 int allocsize; 622 struct page_frag *pfrag = &x->xfrag; 623 624 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 625 626 spin_lock_bh(&x->lock); 627 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 628 spin_unlock_bh(&x->lock); 629 goto error_free; 630 } 631 632 skb_shinfo(skb)->nr_frags = 1; 633 634 page = pfrag->page; 635 get_page(page); 636 /* replace page frags in skb with new page */ 637 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 638 pfrag->offset = pfrag->offset + allocsize; 639 spin_unlock_bh(&x->lock); 640 641 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 642 err = skb_to_sgvec(skb, dsg, 643 (unsigned char *)esph - skb->data, 644 assoclen + ivlen + esp->clen + alen); 645 if (unlikely(err < 0)) 646 goto error_free; 647 } 648 649 if ((x->props.flags & XFRM_STATE_ESN)) 650 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 651 else 652 aead_request_set_callback(req, 0, esp_output_done, skb); 653 654 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); 655 aead_request_set_ad(req, assoclen); 656 657 memset(iv, 0, ivlen); 658 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), 659 min(ivlen, 8)); 660 661 ESP_SKB_CB(skb)->tmp = tmp; 662 err = crypto_aead_encrypt(req); 663 664 switch (err) { 665 case -EINPROGRESS: 666 goto error; 667 668 case -ENOSPC: 669 err = NET_XMIT_DROP; 670 break; 671 672 case 0: 673 if ((x->props.flags & XFRM_STATE_ESN)) 674 esp_output_restore_header(skb); 675 esp_output_encap_csum(skb); 676 } 677 678 if (sg != dsg) 679 esp_ssg_unref(x, tmp); 680 681 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) 682 err = esp_output_tail_tcp(x, skb); 683 684error_free: 685 kfree(tmp); 686error: 687 return err; 688} 689EXPORT_SYMBOL_GPL(esp6_output_tail); 690 691static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) 692{ 693 int alen; 694 int blksize; 695 struct ip_esp_hdr *esph; 696 struct crypto_aead *aead; 697 struct esp_info esp; 698 699 esp.inplace = true; 700 701 esp.proto = *skb_mac_header(skb); 702 *skb_mac_header(skb) = IPPROTO_ESP; 703 704 /* skb is pure payload to encrypt */ 705 706 aead = x->data; 707 alen = crypto_aead_authsize(aead); 708 709 esp.tfclen = 0; 710 if (x->tfcpad) { 711 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 712 u32 padto; 713 714 padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); 715 if (skb->len < padto) 716 esp.tfclen = padto - skb->len; 717 } 718 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 719 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 720 esp.plen = esp.clen - skb->len - esp.tfclen; 721 esp.tailen = esp.tfclen + esp.plen + alen; 722 723 esp.esph = ip_esp_hdr(skb); 724 725 esp.nfrags = esp6_output_head(x, skb, &esp); 726 if (esp.nfrags < 0) 727 return esp.nfrags; 728 729 esph = esp.esph; 730 esph->spi = x->id.spi; 731 732 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 733 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 734 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 735 736 skb_push(skb, -skb_network_offset(skb)); 737 738 return esp6_output_tail(x, skb, &esp); 739} 740 741static inline int esp_remove_trailer(struct sk_buff *skb) 742{ 743 struct xfrm_state *x = xfrm_input_state(skb); 744 struct crypto_aead *aead = x->data; 745 int alen, hlen, elen; 746 int padlen, trimlen; 747 __wsum csumdiff; 748 u8 nexthdr[2]; 749 int ret; 750 751 alen = crypto_aead_authsize(aead); 752 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 753 elen = skb->len - hlen; 754 755 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2); 756 BUG_ON(ret); 757 758 ret = -EINVAL; 759 padlen = nexthdr[0]; 760 if (padlen + 2 + alen >= elen) { 761 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", 762 padlen + 2, elen - alen); 763 goto out; 764 } 765 766 trimlen = alen + padlen + 2; 767 if (skb->ip_summed == CHECKSUM_COMPLETE) { 768 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); 769 skb->csum = csum_block_sub(skb->csum, csumdiff, 770 skb->len - trimlen); 771 } 772 pskb_trim(skb, skb->len - trimlen); 773 774 ret = nexthdr[1]; 775 776out: 777 return ret; 778} 779 780int esp6_input_done2(struct sk_buff *skb, int err) 781{ 782 struct xfrm_state *x = xfrm_input_state(skb); 783 struct xfrm_offload *xo = xfrm_offload(skb); 784 struct crypto_aead *aead = x->data; 785 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); 786 int hdr_len = skb_network_header_len(skb); 787 788 if (!xo || !(xo->flags & CRYPTO_DONE)) 789 kfree(ESP_SKB_CB(skb)->tmp); 790 791 if (unlikely(err)) 792 goto out; 793 794 err = esp_remove_trailer(skb); 795 if (unlikely(err < 0)) 796 goto out; 797 798 if (x->encap) { 799 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 800 int offset = skb_network_offset(skb) + sizeof(*ip6h); 801 struct xfrm_encap_tmpl *encap = x->encap; 802 u8 nexthdr = ip6h->nexthdr; 803 __be16 frag_off, source; 804 struct udphdr *uh; 805 struct tcphdr *th; 806 807 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); 808 if (offset == -1) { 809 err = -EINVAL; 810 goto out; 811 } 812 813 uh = (void *)(skb->data + offset); 814 th = (void *)(skb->data + offset); 815 hdr_len += offset; 816 817 switch (x->encap->encap_type) { 818 case TCP_ENCAP_ESPINTCP: 819 source = th->source; 820 break; 821 case UDP_ENCAP_ESPINUDP: 822 case UDP_ENCAP_ESPINUDP_NON_IKE: 823 source = uh->source; 824 break; 825 default: 826 WARN_ON_ONCE(1); 827 err = -EINVAL; 828 goto out; 829 } 830 831 /* 832 * 1) if the NAT-T peer's IP or port changed then 833 * advertize the change to the keying daemon. 834 * This is an inbound SA, so just compare 835 * SRC ports. 836 */ 837 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) || 838 source != encap->encap_sport) { 839 xfrm_address_t ipaddr; 840 841 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6)); 842 km_new_mapping(x, &ipaddr, source); 843 844 /* XXX: perhaps add an extra 845 * policy check here, to see 846 * if we should allow or 847 * reject a packet from a 848 * different source 849 * address/port. 850 */ 851 } 852 853 /* 854 * 2) ignore UDP/TCP checksums in case 855 * of NAT-T in Transport Mode, or 856 * perform other post-processing fixes 857 * as per draft-ietf-ipsec-udp-encaps-06, 858 * section 3.1.2 859 */ 860 if (x->props.mode == XFRM_MODE_TRANSPORT) 861 skb->ip_summed = CHECKSUM_UNNECESSARY; 862 } 863 864 skb_postpull_rcsum(skb, skb_network_header(skb), 865 skb_network_header_len(skb)); 866 skb_pull_rcsum(skb, hlen); 867 if (x->props.mode == XFRM_MODE_TUNNEL) 868 skb_reset_transport_header(skb); 869 else 870 skb_set_transport_header(skb, -hdr_len); 871 872 /* RFC4303: Drop dummy packets without any error */ 873 if (err == IPPROTO_NONE) 874 err = -EINVAL; 875 876out: 877 return err; 878} 879EXPORT_SYMBOL_GPL(esp6_input_done2); 880 881static void esp_input_done(struct crypto_async_request *base, int err) 882{ 883 struct sk_buff *skb = base->data; 884 885 xfrm_input_resume(skb, esp6_input_done2(skb, err)); 886} 887 888static void esp_input_restore_header(struct sk_buff *skb) 889{ 890 esp_restore_header(skb, 0); 891 __skb_pull(skb, 4); 892} 893 894static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) 895{ 896 struct xfrm_state *x = xfrm_input_state(skb); 897 898 /* For ESN we move the header forward by 4 bytes to 899 * accomodate the high bits. We will move it back after 900 * decryption. 901 */ 902 if ((x->props.flags & XFRM_STATE_ESN)) { 903 struct ip_esp_hdr *esph = skb_push(skb, 4); 904 905 *seqhi = esph->spi; 906 esph->spi = esph->seq_no; 907 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; 908 } 909} 910 911static void esp_input_done_esn(struct crypto_async_request *base, int err) 912{ 913 struct sk_buff *skb = base->data; 914 915 esp_input_restore_header(skb); 916 esp_input_done(base, err); 917} 918 919static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) 920{ 921 struct crypto_aead *aead = x->data; 922 struct aead_request *req; 923 struct sk_buff *trailer; 924 int ivlen = crypto_aead_ivsize(aead); 925 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; 926 int nfrags; 927 int assoclen; 928 int seqhilen; 929 int ret = 0; 930 void *tmp; 931 __be32 *seqhi; 932 u8 *iv; 933 struct scatterlist *sg; 934 935 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) { 936 ret = -EINVAL; 937 goto out; 938 } 939 940 if (elen <= 0) { 941 ret = -EINVAL; 942 goto out; 943 } 944 945 assoclen = sizeof(struct ip_esp_hdr); 946 seqhilen = 0; 947 948 if (x->props.flags & XFRM_STATE_ESN) { 949 seqhilen += sizeof(__be32); 950 assoclen += seqhilen; 951 } 952 953 if (!skb_cloned(skb)) { 954 if (!skb_is_nonlinear(skb)) { 955 nfrags = 1; 956 957 goto skip_cow; 958 } else if (!skb_has_frag_list(skb)) { 959 nfrags = skb_shinfo(skb)->nr_frags; 960 nfrags++; 961 962 goto skip_cow; 963 } 964 } 965 966 nfrags = skb_cow_data(skb, 0, &trailer); 967 if (nfrags < 0) { 968 ret = -EINVAL; 969 goto out; 970 } 971 972skip_cow: 973 ret = -ENOMEM; 974 tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 975 if (!tmp) 976 goto out; 977 978 ESP_SKB_CB(skb)->tmp = tmp; 979 seqhi = esp_tmp_extra(tmp); 980 iv = esp_tmp_iv(aead, tmp, seqhilen); 981 req = esp_tmp_req(aead, iv); 982 sg = esp_req_sg(aead, req); 983 984 esp_input_set_header(skb, seqhi); 985 986 sg_init_table(sg, nfrags); 987 ret = skb_to_sgvec(skb, sg, 0, skb->len); 988 if (unlikely(ret < 0)) { 989 kfree(tmp); 990 goto out; 991 } 992 993 skb->ip_summed = CHECKSUM_NONE; 994 995 if ((x->props.flags & XFRM_STATE_ESN)) 996 aead_request_set_callback(req, 0, esp_input_done_esn, skb); 997 else 998 aead_request_set_callback(req, 0, esp_input_done, skb); 999 1000 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); 1001 aead_request_set_ad(req, assoclen); 1002 1003 ret = crypto_aead_decrypt(req); 1004 if (ret == -EINPROGRESS) 1005 goto out; 1006 1007 if ((x->props.flags & XFRM_STATE_ESN)) 1008 esp_input_restore_header(skb); 1009 1010 ret = esp6_input_done2(skb, ret); 1011 1012out: 1013 return ret; 1014} 1015 1016static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 1017 u8 type, u8 code, int offset, __be32 info) 1018{ 1019 struct net *net = dev_net(skb->dev); 1020 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; 1021 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 1022 struct xfrm_state *x; 1023 1024 if (type != ICMPV6_PKT_TOOBIG && 1025 type != NDISC_REDIRECT) 1026 return 0; 1027 1028 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 1029 esph->spi, IPPROTO_ESP, AF_INET6); 1030 if (!x) 1031 return 0; 1032 1033 if (type == NDISC_REDIRECT) 1034 ip6_redirect(skb, net, skb->dev->ifindex, 0, 1035 sock_net_uid(net, NULL)); 1036 else 1037 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); 1038 xfrm_state_put(x); 1039 1040 return 0; 1041} 1042 1043static void esp6_destroy(struct xfrm_state *x) 1044{ 1045 struct crypto_aead *aead = x->data; 1046 1047 if (!aead) 1048 return; 1049 1050 crypto_free_aead(aead); 1051} 1052 1053static int esp_init_aead(struct xfrm_state *x) 1054{ 1055 char aead_name[CRYPTO_MAX_ALG_NAME]; 1056 struct crypto_aead *aead; 1057 int err; 1058 1059 err = -ENAMETOOLONG; 1060 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 1061 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) 1062 goto error; 1063 1064 aead = crypto_alloc_aead(aead_name, 0, 0); 1065 err = PTR_ERR(aead); 1066 if (IS_ERR(aead)) 1067 goto error; 1068 1069 x->data = aead; 1070 1071 err = crypto_aead_setkey(aead, x->aead->alg_key, 1072 (x->aead->alg_key_len + 7) / 8); 1073 if (err) 1074 goto error; 1075 1076 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); 1077 if (err) 1078 goto error; 1079 1080error: 1081 return err; 1082} 1083 1084static int esp_init_authenc(struct xfrm_state *x) 1085{ 1086 struct crypto_aead *aead; 1087 struct crypto_authenc_key_param *param; 1088 struct rtattr *rta; 1089 char *key; 1090 char *p; 1091 char authenc_name[CRYPTO_MAX_ALG_NAME]; 1092 unsigned int keylen; 1093 int err; 1094 1095 err = -EINVAL; 1096 if (!x->ealg) 1097 goto error; 1098 1099 err = -ENAMETOOLONG; 1100 1101 if ((x->props.flags & XFRM_STATE_ESN)) { 1102 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 1103 "%s%sauthencesn(%s,%s)%s", 1104 x->geniv ?: "", x->geniv ? "(" : "", 1105 x->aalg ? x->aalg->alg_name : "digest_null", 1106 x->ealg->alg_name, 1107 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 1108 goto error; 1109 } else { 1110 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, 1111 "%s%sauthenc(%s,%s)%s", 1112 x->geniv ?: "", x->geniv ? "(" : "", 1113 x->aalg ? x->aalg->alg_name : "digest_null", 1114 x->ealg->alg_name, 1115 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) 1116 goto error; 1117 } 1118 1119 aead = crypto_alloc_aead(authenc_name, 0, 0); 1120 err = PTR_ERR(aead); 1121 if (IS_ERR(aead)) 1122 goto error; 1123 1124 x->data = aead; 1125 1126 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + 1127 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); 1128 err = -ENOMEM; 1129 key = kmalloc(keylen, GFP_KERNEL); 1130 if (!key) 1131 goto error; 1132 1133 p = key; 1134 rta = (void *)p; 1135 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; 1136 rta->rta_len = RTA_LENGTH(sizeof(*param)); 1137 param = RTA_DATA(rta); 1138 p += RTA_SPACE(sizeof(*param)); 1139 1140 if (x->aalg) { 1141 struct xfrm_algo_desc *aalg_desc; 1142 1143 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); 1144 p += (x->aalg->alg_key_len + 7) / 8; 1145 1146 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 1147 BUG_ON(!aalg_desc); 1148 1149 err = -EINVAL; 1150 if (aalg_desc->uinfo.auth.icv_fullbits / 8 != 1151 crypto_aead_authsize(aead)) { 1152 pr_info("ESP: %s digestsize %u != %u\n", 1153 x->aalg->alg_name, 1154 crypto_aead_authsize(aead), 1155 aalg_desc->uinfo.auth.icv_fullbits / 8); 1156 goto free_key; 1157 } 1158 1159 err = crypto_aead_setauthsize( 1160 aead, x->aalg->alg_trunc_len / 8); 1161 if (err) 1162 goto free_key; 1163 } 1164 1165 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); 1166 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); 1167 1168 err = crypto_aead_setkey(aead, key, keylen); 1169 1170free_key: 1171 kfree(key); 1172 1173error: 1174 return err; 1175} 1176 1177static int esp6_init_state(struct xfrm_state *x) 1178{ 1179 struct crypto_aead *aead; 1180 u32 align; 1181 int err; 1182 1183 x->data = NULL; 1184 1185 if (x->aead) 1186 err = esp_init_aead(x); 1187 else 1188 err = esp_init_authenc(x); 1189 1190 if (err) 1191 goto error; 1192 1193 aead = x->data; 1194 1195 x->props.header_len = sizeof(struct ip_esp_hdr) + 1196 crypto_aead_ivsize(aead); 1197 switch (x->props.mode) { 1198 case XFRM_MODE_BEET: 1199 if (x->sel.family != AF_INET6) 1200 x->props.header_len += IPV4_BEET_PHMAXLEN + 1201 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 1202 break; 1203 default: 1204 case XFRM_MODE_TRANSPORT: 1205 break; 1206 case XFRM_MODE_TUNNEL: 1207 x->props.header_len += sizeof(struct ipv6hdr); 1208 break; 1209 } 1210 1211 if (x->encap) { 1212 struct xfrm_encap_tmpl *encap = x->encap; 1213 1214 switch (encap->encap_type) { 1215 default: 1216 err = -EINVAL; 1217 goto error; 1218 case UDP_ENCAP_ESPINUDP: 1219 x->props.header_len += sizeof(struct udphdr); 1220 break; 1221 case UDP_ENCAP_ESPINUDP_NON_IKE: 1222 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 1223 break; 1224#ifdef CONFIG_INET6_ESPINTCP 1225 case TCP_ENCAP_ESPINTCP: 1226 /* only the length field, TCP encap is done by 1227 * the socket 1228 */ 1229 x->props.header_len += 2; 1230 break; 1231#endif 1232 } 1233 } 1234 1235 align = ALIGN(crypto_aead_blocksize(aead), 4); 1236 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); 1237 1238error: 1239 return err; 1240} 1241 1242static int esp6_rcv_cb(struct sk_buff *skb, int err) 1243{ 1244 return 0; 1245} 1246 1247static const struct xfrm_type esp6_type = { 1248 .owner = THIS_MODULE, 1249 .proto = IPPROTO_ESP, 1250 .flags = XFRM_TYPE_REPLAY_PROT, 1251 .init_state = esp6_init_state, 1252 .destructor = esp6_destroy, 1253 .input = esp6_input, 1254 .output = esp6_output, 1255}; 1256 1257static struct xfrm6_protocol esp6_protocol = { 1258 .handler = xfrm6_rcv, 1259 .input_handler = xfrm_input, 1260 .cb_handler = esp6_rcv_cb, 1261 .err_handler = esp6_err, 1262 .priority = 0, 1263}; 1264 1265static int __init esp6_init(void) 1266{ 1267 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { 1268 pr_info("%s: can't add xfrm type\n", __func__); 1269 return -EAGAIN; 1270 } 1271 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) { 1272 pr_info("%s: can't add protocol\n", __func__); 1273 xfrm_unregister_type(&esp6_type, AF_INET6); 1274 return -EAGAIN; 1275 } 1276 1277 return 0; 1278} 1279 1280static void __exit esp6_fini(void) 1281{ 1282 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0) 1283 pr_info("%s: can't remove protocol\n", __func__); 1284 xfrm_unregister_type(&esp6_type, AF_INET6); 1285} 1286 1287module_init(esp6_init); 1288module_exit(esp6_fini); 1289 1290MODULE_LICENSE("GPL"); 1291MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);