flow_dissector.c (51324B)
1// SPDX-License-Identifier: GPL-2.0-only 2#include <linux/kernel.h> 3#include <linux/skbuff.h> 4#include <linux/export.h> 5#include <linux/ip.h> 6#include <linux/ipv6.h> 7#include <linux/if_vlan.h> 8#include <linux/filter.h> 9#include <net/dsa.h> 10#include <net/dst_metadata.h> 11#include <net/ip.h> 12#include <net/ipv6.h> 13#include <net/gre.h> 14#include <net/pptp.h> 15#include <net/tipc.h> 16#include <linux/igmp.h> 17#include <linux/icmp.h> 18#include <linux/sctp.h> 19#include <linux/dccp.h> 20#include <linux/if_tunnel.h> 21#include <linux/if_pppox.h> 22#include <linux/ppp_defs.h> 23#include <linux/stddef.h> 24#include <linux/if_ether.h> 25#include <linux/if_hsr.h> 26#include <linux/mpls.h> 27#include <linux/tcp.h> 28#include <linux/ptp_classify.h> 29#include <net/flow_dissector.h> 30#include <scsi/fc/fc_fcoe.h> 31#include <uapi/linux/batadv_packet.h> 32#include <linux/bpf.h> 33#if IS_ENABLED(CONFIG_NF_CONNTRACK) 34#include <net/netfilter/nf_conntrack_core.h> 35#include <net/netfilter/nf_conntrack_labels.h> 36#endif 37#include <linux/bpf-netns.h> 38 39static void dissector_set_key(struct flow_dissector *flow_dissector, 40 enum flow_dissector_key_id key_id) 41{ 42 flow_dissector->used_keys |= (1 << key_id); 43} 44 45void skb_flow_dissector_init(struct flow_dissector *flow_dissector, 46 const struct flow_dissector_key *key, 47 unsigned int key_count) 48{ 49 unsigned int i; 50 51 memset(flow_dissector, 0, sizeof(*flow_dissector)); 52 53 for (i = 0; i < key_count; i++, key++) { 54 /* User should make sure that every key target offset is within 55 * boundaries of unsigned short. 56 */ 57 BUG_ON(key->offset > USHRT_MAX); 58 BUG_ON(dissector_uses_key(flow_dissector, 59 key->key_id)); 60 61 dissector_set_key(flow_dissector, key->key_id); 62 flow_dissector->offset[key->key_id] = key->offset; 63 } 64 65 /* Ensure that the dissector always includes control and basic key. 66 * That way we are able to avoid handling lack of these in fast path. 67 */ 68 BUG_ON(!dissector_uses_key(flow_dissector, 69 FLOW_DISSECTOR_KEY_CONTROL)); 70 BUG_ON(!dissector_uses_key(flow_dissector, 71 FLOW_DISSECTOR_KEY_BASIC)); 72} 73EXPORT_SYMBOL(skb_flow_dissector_init); 74 75#ifdef CONFIG_BPF_SYSCALL 76int flow_dissector_bpf_prog_attach_check(struct net *net, 77 struct bpf_prog *prog) 78{ 79 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; 80 81 if (net == &init_net) { 82 /* BPF flow dissector in the root namespace overrides 83 * any per-net-namespace one. When attaching to root, 84 * make sure we don't have any BPF program attached 85 * to the non-root namespaces. 86 */ 87 struct net *ns; 88 89 for_each_net(ns) { 90 if (ns == &init_net) 91 continue; 92 if (rcu_access_pointer(ns->bpf.run_array[type])) 93 return -EEXIST; 94 } 95 } else { 96 /* Make sure root flow dissector is not attached 97 * when attaching to the non-root namespace. 98 */ 99 if (rcu_access_pointer(init_net.bpf.run_array[type])) 100 return -EEXIST; 101 } 102 103 return 0; 104} 105#endif /* CONFIG_BPF_SYSCALL */ 106 107/** 108 * __skb_flow_get_ports - extract the upper layer ports and return them 109 * @skb: sk_buff to extract the ports from 110 * @thoff: transport header offset 111 * @ip_proto: protocol for which to get port offset 112 * @data: raw buffer pointer to the packet, if NULL use skb->data 113 * @hlen: packet header length, if @data is NULL use skb_headlen(skb) 114 * 115 * The function will try to retrieve the ports at offset thoff + poff where poff 116 * is the protocol port offset returned from proto_ports_offset 117 */ 118__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, 119 const void *data, int hlen) 120{ 121 int poff = proto_ports_offset(ip_proto); 122 123 if (!data) { 124 data = skb->data; 125 hlen = skb_headlen(skb); 126 } 127 128 if (poff >= 0) { 129 __be32 *ports, _ports; 130 131 ports = __skb_header_pointer(skb, thoff + poff, 132 sizeof(_ports), data, hlen, &_ports); 133 if (ports) 134 return *ports; 135 } 136 137 return 0; 138} 139EXPORT_SYMBOL(__skb_flow_get_ports); 140 141static bool icmp_has_id(u8 type) 142{ 143 switch (type) { 144 case ICMP_ECHO: 145 case ICMP_ECHOREPLY: 146 case ICMP_TIMESTAMP: 147 case ICMP_TIMESTAMPREPLY: 148 case ICMPV6_ECHO_REQUEST: 149 case ICMPV6_ECHO_REPLY: 150 return true; 151 } 152 153 return false; 154} 155 156/** 157 * skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields 158 * @skb: sk_buff to extract from 159 * @key_icmp: struct flow_dissector_key_icmp to fill 160 * @data: raw buffer pointer to the packet 161 * @thoff: offset to extract at 162 * @hlen: packet header length 163 */ 164void skb_flow_get_icmp_tci(const struct sk_buff *skb, 165 struct flow_dissector_key_icmp *key_icmp, 166 const void *data, int thoff, int hlen) 167{ 168 struct icmphdr *ih, _ih; 169 170 ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih); 171 if (!ih) 172 return; 173 174 key_icmp->type = ih->type; 175 key_icmp->code = ih->code; 176 177 /* As we use 0 to signal that the Id field is not present, 178 * avoid confusion with packets without such field 179 */ 180 if (icmp_has_id(ih->type)) 181 key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1; 182 else 183 key_icmp->id = 0; 184} 185EXPORT_SYMBOL(skb_flow_get_icmp_tci); 186 187/* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet 188 * using skb_flow_get_icmp_tci(). 189 */ 190static void __skb_flow_dissect_icmp(const struct sk_buff *skb, 191 struct flow_dissector *flow_dissector, 192 void *target_container, const void *data, 193 int thoff, int hlen) 194{ 195 struct flow_dissector_key_icmp *key_icmp; 196 197 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP)) 198 return; 199 200 key_icmp = skb_flow_dissector_target(flow_dissector, 201 FLOW_DISSECTOR_KEY_ICMP, 202 target_container); 203 204 skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen); 205} 206 207void skb_flow_dissect_meta(const struct sk_buff *skb, 208 struct flow_dissector *flow_dissector, 209 void *target_container) 210{ 211 struct flow_dissector_key_meta *meta; 212 213 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_META)) 214 return; 215 216 meta = skb_flow_dissector_target(flow_dissector, 217 FLOW_DISSECTOR_KEY_META, 218 target_container); 219 meta->ingress_ifindex = skb->skb_iif; 220} 221EXPORT_SYMBOL(skb_flow_dissect_meta); 222 223static void 224skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type, 225 struct flow_dissector *flow_dissector, 226 void *target_container) 227{ 228 struct flow_dissector_key_control *ctrl; 229 230 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) 231 return; 232 233 ctrl = skb_flow_dissector_target(flow_dissector, 234 FLOW_DISSECTOR_KEY_ENC_CONTROL, 235 target_container); 236 ctrl->addr_type = type; 237} 238 239void 240skb_flow_dissect_ct(const struct sk_buff *skb, 241 struct flow_dissector *flow_dissector, 242 void *target_container, u16 *ctinfo_map, 243 size_t mapsize, bool post_ct, u16 zone) 244{ 245#if IS_ENABLED(CONFIG_NF_CONNTRACK) 246 struct flow_dissector_key_ct *key; 247 enum ip_conntrack_info ctinfo; 248 struct nf_conn_labels *cl; 249 struct nf_conn *ct; 250 251 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CT)) 252 return; 253 254 ct = nf_ct_get(skb, &ctinfo); 255 if (!ct && !post_ct) 256 return; 257 258 key = skb_flow_dissector_target(flow_dissector, 259 FLOW_DISSECTOR_KEY_CT, 260 target_container); 261 262 if (!ct) { 263 key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 264 TCA_FLOWER_KEY_CT_FLAGS_INVALID; 265 key->ct_zone = zone; 266 return; 267 } 268 269 if (ctinfo < mapsize) 270 key->ct_state = ctinfo_map[ctinfo]; 271#if IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) 272 key->ct_zone = ct->zone.id; 273#endif 274#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) 275 key->ct_mark = ct->mark; 276#endif 277 278 cl = nf_ct_labels_find(ct); 279 if (cl) 280 memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels)); 281#endif /* CONFIG_NF_CONNTRACK */ 282} 283EXPORT_SYMBOL(skb_flow_dissect_ct); 284 285void 286skb_flow_dissect_tunnel_info(const struct sk_buff *skb, 287 struct flow_dissector *flow_dissector, 288 void *target_container) 289{ 290 struct ip_tunnel_info *info; 291 struct ip_tunnel_key *key; 292 293 /* A quick check to see if there might be something to do. */ 294 if (!dissector_uses_key(flow_dissector, 295 FLOW_DISSECTOR_KEY_ENC_KEYID) && 296 !dissector_uses_key(flow_dissector, 297 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) && 298 !dissector_uses_key(flow_dissector, 299 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) && 300 !dissector_uses_key(flow_dissector, 301 FLOW_DISSECTOR_KEY_ENC_CONTROL) && 302 !dissector_uses_key(flow_dissector, 303 FLOW_DISSECTOR_KEY_ENC_PORTS) && 304 !dissector_uses_key(flow_dissector, 305 FLOW_DISSECTOR_KEY_ENC_IP) && 306 !dissector_uses_key(flow_dissector, 307 FLOW_DISSECTOR_KEY_ENC_OPTS)) 308 return; 309 310 info = skb_tunnel_info(skb); 311 if (!info) 312 return; 313 314 key = &info->key; 315 316 switch (ip_tunnel_info_af(info)) { 317 case AF_INET: 318 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS, 319 flow_dissector, 320 target_container); 321 if (dissector_uses_key(flow_dissector, 322 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { 323 struct flow_dissector_key_ipv4_addrs *ipv4; 324 325 ipv4 = skb_flow_dissector_target(flow_dissector, 326 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, 327 target_container); 328 ipv4->src = key->u.ipv4.src; 329 ipv4->dst = key->u.ipv4.dst; 330 } 331 break; 332 case AF_INET6: 333 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS, 334 flow_dissector, 335 target_container); 336 if (dissector_uses_key(flow_dissector, 337 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { 338 struct flow_dissector_key_ipv6_addrs *ipv6; 339 340 ipv6 = skb_flow_dissector_target(flow_dissector, 341 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, 342 target_container); 343 ipv6->src = key->u.ipv6.src; 344 ipv6->dst = key->u.ipv6.dst; 345 } 346 break; 347 } 348 349 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 350 struct flow_dissector_key_keyid *keyid; 351 352 keyid = skb_flow_dissector_target(flow_dissector, 353 FLOW_DISSECTOR_KEY_ENC_KEYID, 354 target_container); 355 keyid->keyid = tunnel_id_to_key32(key->tun_id); 356 } 357 358 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { 359 struct flow_dissector_key_ports *tp; 360 361 tp = skb_flow_dissector_target(flow_dissector, 362 FLOW_DISSECTOR_KEY_ENC_PORTS, 363 target_container); 364 tp->src = key->tp_src; 365 tp->dst = key->tp_dst; 366 } 367 368 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) { 369 struct flow_dissector_key_ip *ip; 370 371 ip = skb_flow_dissector_target(flow_dissector, 372 FLOW_DISSECTOR_KEY_ENC_IP, 373 target_container); 374 ip->tos = key->tos; 375 ip->ttl = key->ttl; 376 } 377 378 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) { 379 struct flow_dissector_key_enc_opts *enc_opt; 380 381 enc_opt = skb_flow_dissector_target(flow_dissector, 382 FLOW_DISSECTOR_KEY_ENC_OPTS, 383 target_container); 384 385 if (info->options_len) { 386 enc_opt->len = info->options_len; 387 ip_tunnel_info_opts_get(enc_opt->data, info); 388 enc_opt->dst_opt_type = info->key.tun_flags & 389 TUNNEL_OPTIONS_PRESENT; 390 } 391 } 392} 393EXPORT_SYMBOL(skb_flow_dissect_tunnel_info); 394 395void skb_flow_dissect_hash(const struct sk_buff *skb, 396 struct flow_dissector *flow_dissector, 397 void *target_container) 398{ 399 struct flow_dissector_key_hash *key; 400 401 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH)) 402 return; 403 404 key = skb_flow_dissector_target(flow_dissector, 405 FLOW_DISSECTOR_KEY_HASH, 406 target_container); 407 408 key->hash = skb_get_hash_raw(skb); 409} 410EXPORT_SYMBOL(skb_flow_dissect_hash); 411 412static enum flow_dissect_ret 413__skb_flow_dissect_mpls(const struct sk_buff *skb, 414 struct flow_dissector *flow_dissector, 415 void *target_container, const void *data, int nhoff, 416 int hlen, int lse_index, bool *entropy_label) 417{ 418 struct mpls_label *hdr, _hdr; 419 u32 entry, label, bos; 420 421 if (!dissector_uses_key(flow_dissector, 422 FLOW_DISSECTOR_KEY_MPLS_ENTROPY) && 423 !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) 424 return FLOW_DISSECT_RET_OUT_GOOD; 425 426 if (lse_index >= FLOW_DIS_MPLS_MAX) 427 return FLOW_DISSECT_RET_OUT_GOOD; 428 429 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, 430 hlen, &_hdr); 431 if (!hdr) 432 return FLOW_DISSECT_RET_OUT_BAD; 433 434 entry = ntohl(hdr->entry); 435 label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT; 436 bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT; 437 438 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) { 439 struct flow_dissector_key_mpls *key_mpls; 440 struct flow_dissector_mpls_lse *lse; 441 442 key_mpls = skb_flow_dissector_target(flow_dissector, 443 FLOW_DISSECTOR_KEY_MPLS, 444 target_container); 445 lse = &key_mpls->ls[lse_index]; 446 447 lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT; 448 lse->mpls_bos = bos; 449 lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT; 450 lse->mpls_label = label; 451 dissector_set_mpls_lse(key_mpls, lse_index); 452 } 453 454 if (*entropy_label && 455 dissector_uses_key(flow_dissector, 456 FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) { 457 struct flow_dissector_key_keyid *key_keyid; 458 459 key_keyid = skb_flow_dissector_target(flow_dissector, 460 FLOW_DISSECTOR_KEY_MPLS_ENTROPY, 461 target_container); 462 key_keyid->keyid = cpu_to_be32(label); 463 } 464 465 *entropy_label = label == MPLS_LABEL_ENTROPY; 466 467 return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN; 468} 469 470static enum flow_dissect_ret 471__skb_flow_dissect_arp(const struct sk_buff *skb, 472 struct flow_dissector *flow_dissector, 473 void *target_container, const void *data, 474 int nhoff, int hlen) 475{ 476 struct flow_dissector_key_arp *key_arp; 477 struct { 478 unsigned char ar_sha[ETH_ALEN]; 479 unsigned char ar_sip[4]; 480 unsigned char ar_tha[ETH_ALEN]; 481 unsigned char ar_tip[4]; 482 } *arp_eth, _arp_eth; 483 const struct arphdr *arp; 484 struct arphdr _arp; 485 486 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP)) 487 return FLOW_DISSECT_RET_OUT_GOOD; 488 489 arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data, 490 hlen, &_arp); 491 if (!arp) 492 return FLOW_DISSECT_RET_OUT_BAD; 493 494 if (arp->ar_hrd != htons(ARPHRD_ETHER) || 495 arp->ar_pro != htons(ETH_P_IP) || 496 arp->ar_hln != ETH_ALEN || 497 arp->ar_pln != 4 || 498 (arp->ar_op != htons(ARPOP_REPLY) && 499 arp->ar_op != htons(ARPOP_REQUEST))) 500 return FLOW_DISSECT_RET_OUT_BAD; 501 502 arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp), 503 sizeof(_arp_eth), data, 504 hlen, &_arp_eth); 505 if (!arp_eth) 506 return FLOW_DISSECT_RET_OUT_BAD; 507 508 key_arp = skb_flow_dissector_target(flow_dissector, 509 FLOW_DISSECTOR_KEY_ARP, 510 target_container); 511 512 memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip)); 513 memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip)); 514 515 /* Only store the lower byte of the opcode; 516 * this covers ARPOP_REPLY and ARPOP_REQUEST. 517 */ 518 key_arp->op = ntohs(arp->ar_op) & 0xff; 519 520 ether_addr_copy(key_arp->sha, arp_eth->ar_sha); 521 ether_addr_copy(key_arp->tha, arp_eth->ar_tha); 522 523 return FLOW_DISSECT_RET_OUT_GOOD; 524} 525 526static enum flow_dissect_ret 527__skb_flow_dissect_gre(const struct sk_buff *skb, 528 struct flow_dissector_key_control *key_control, 529 struct flow_dissector *flow_dissector, 530 void *target_container, const void *data, 531 __be16 *p_proto, int *p_nhoff, int *p_hlen, 532 unsigned int flags) 533{ 534 struct flow_dissector_key_keyid *key_keyid; 535 struct gre_base_hdr *hdr, _hdr; 536 int offset = 0; 537 u16 gre_ver; 538 539 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), 540 data, *p_hlen, &_hdr); 541 if (!hdr) 542 return FLOW_DISSECT_RET_OUT_BAD; 543 544 /* Only look inside GRE without routing */ 545 if (hdr->flags & GRE_ROUTING) 546 return FLOW_DISSECT_RET_OUT_GOOD; 547 548 /* Only look inside GRE for version 0 and 1 */ 549 gre_ver = ntohs(hdr->flags & GRE_VERSION); 550 if (gre_ver > 1) 551 return FLOW_DISSECT_RET_OUT_GOOD; 552 553 *p_proto = hdr->protocol; 554 if (gre_ver) { 555 /* Version1 must be PPTP, and check the flags */ 556 if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY))) 557 return FLOW_DISSECT_RET_OUT_GOOD; 558 } 559 560 offset += sizeof(struct gre_base_hdr); 561 562 if (hdr->flags & GRE_CSUM) 563 offset += sizeof_field(struct gre_full_hdr, csum) + 564 sizeof_field(struct gre_full_hdr, reserved1); 565 566 if (hdr->flags & GRE_KEY) { 567 const __be32 *keyid; 568 __be32 _keyid; 569 570 keyid = __skb_header_pointer(skb, *p_nhoff + offset, 571 sizeof(_keyid), 572 data, *p_hlen, &_keyid); 573 if (!keyid) 574 return FLOW_DISSECT_RET_OUT_BAD; 575 576 if (dissector_uses_key(flow_dissector, 577 FLOW_DISSECTOR_KEY_GRE_KEYID)) { 578 key_keyid = skb_flow_dissector_target(flow_dissector, 579 FLOW_DISSECTOR_KEY_GRE_KEYID, 580 target_container); 581 if (gre_ver == 0) 582 key_keyid->keyid = *keyid; 583 else 584 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; 585 } 586 offset += sizeof_field(struct gre_full_hdr, key); 587 } 588 589 if (hdr->flags & GRE_SEQ) 590 offset += sizeof_field(struct pptp_gre_header, seq); 591 592 if (gre_ver == 0) { 593 if (*p_proto == htons(ETH_P_TEB)) { 594 const struct ethhdr *eth; 595 struct ethhdr _eth; 596 597 eth = __skb_header_pointer(skb, *p_nhoff + offset, 598 sizeof(_eth), 599 data, *p_hlen, &_eth); 600 if (!eth) 601 return FLOW_DISSECT_RET_OUT_BAD; 602 *p_proto = eth->h_proto; 603 offset += sizeof(*eth); 604 605 /* Cap headers that we access via pointers at the 606 * end of the Ethernet header as our maximum alignment 607 * at that point is only 2 bytes. 608 */ 609 if (NET_IP_ALIGN) 610 *p_hlen = *p_nhoff + offset; 611 } 612 } else { /* version 1, must be PPTP */ 613 u8 _ppp_hdr[PPP_HDRLEN]; 614 u8 *ppp_hdr; 615 616 if (hdr->flags & GRE_ACK) 617 offset += sizeof_field(struct pptp_gre_header, ack); 618 619 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset, 620 sizeof(_ppp_hdr), 621 data, *p_hlen, _ppp_hdr); 622 if (!ppp_hdr) 623 return FLOW_DISSECT_RET_OUT_BAD; 624 625 switch (PPP_PROTOCOL(ppp_hdr)) { 626 case PPP_IP: 627 *p_proto = htons(ETH_P_IP); 628 break; 629 case PPP_IPV6: 630 *p_proto = htons(ETH_P_IPV6); 631 break; 632 default: 633 /* Could probably catch some more like MPLS */ 634 break; 635 } 636 637 offset += PPP_HDRLEN; 638 } 639 640 *p_nhoff += offset; 641 key_control->flags |= FLOW_DIS_ENCAPSULATION; 642 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) 643 return FLOW_DISSECT_RET_OUT_GOOD; 644 645 return FLOW_DISSECT_RET_PROTO_AGAIN; 646} 647 648/** 649 * __skb_flow_dissect_batadv() - dissect batman-adv header 650 * @skb: sk_buff to with the batman-adv header 651 * @key_control: flow dissectors control key 652 * @data: raw buffer pointer to the packet, if NULL use skb->data 653 * @p_proto: pointer used to update the protocol to process next 654 * @p_nhoff: pointer used to update inner network header offset 655 * @hlen: packet header length 656 * @flags: any combination of FLOW_DISSECTOR_F_* 657 * 658 * ETH_P_BATMAN packets are tried to be dissected. Only 659 * &struct batadv_unicast packets are actually processed because they contain an 660 * inner ethernet header and are usually followed by actual network header. This 661 * allows the flow dissector to continue processing the packet. 662 * 663 * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found, 664 * FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation, 665 * otherwise FLOW_DISSECT_RET_OUT_BAD 666 */ 667static enum flow_dissect_ret 668__skb_flow_dissect_batadv(const struct sk_buff *skb, 669 struct flow_dissector_key_control *key_control, 670 const void *data, __be16 *p_proto, int *p_nhoff, 671 int hlen, unsigned int flags) 672{ 673 struct { 674 struct batadv_unicast_packet batadv_unicast; 675 struct ethhdr eth; 676 } *hdr, _hdr; 677 678 hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen, 679 &_hdr); 680 if (!hdr) 681 return FLOW_DISSECT_RET_OUT_BAD; 682 683 if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION) 684 return FLOW_DISSECT_RET_OUT_BAD; 685 686 if (hdr->batadv_unicast.packet_type != BATADV_UNICAST) 687 return FLOW_DISSECT_RET_OUT_BAD; 688 689 *p_proto = hdr->eth.h_proto; 690 *p_nhoff += sizeof(*hdr); 691 692 key_control->flags |= FLOW_DIS_ENCAPSULATION; 693 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) 694 return FLOW_DISSECT_RET_OUT_GOOD; 695 696 return FLOW_DISSECT_RET_PROTO_AGAIN; 697} 698 699static void 700__skb_flow_dissect_tcp(const struct sk_buff *skb, 701 struct flow_dissector *flow_dissector, 702 void *target_container, const void *data, 703 int thoff, int hlen) 704{ 705 struct flow_dissector_key_tcp *key_tcp; 706 struct tcphdr *th, _th; 707 708 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP)) 709 return; 710 711 th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th); 712 if (!th) 713 return; 714 715 if (unlikely(__tcp_hdrlen(th) < sizeof(_th))) 716 return; 717 718 key_tcp = skb_flow_dissector_target(flow_dissector, 719 FLOW_DISSECTOR_KEY_TCP, 720 target_container); 721 key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF)); 722} 723 724static void 725__skb_flow_dissect_ports(const struct sk_buff *skb, 726 struct flow_dissector *flow_dissector, 727 void *target_container, const void *data, 728 int nhoff, u8 ip_proto, int hlen) 729{ 730 enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX; 731 struct flow_dissector_key_ports *key_ports; 732 733 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) 734 dissector_ports = FLOW_DISSECTOR_KEY_PORTS; 735 else if (dissector_uses_key(flow_dissector, 736 FLOW_DISSECTOR_KEY_PORTS_RANGE)) 737 dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE; 738 739 if (dissector_ports == FLOW_DISSECTOR_KEY_MAX) 740 return; 741 742 key_ports = skb_flow_dissector_target(flow_dissector, 743 dissector_ports, 744 target_container); 745 key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, 746 data, hlen); 747} 748 749static void 750__skb_flow_dissect_ipv4(const struct sk_buff *skb, 751 struct flow_dissector *flow_dissector, 752 void *target_container, const void *data, 753 const struct iphdr *iph) 754{ 755 struct flow_dissector_key_ip *key_ip; 756 757 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) 758 return; 759 760 key_ip = skb_flow_dissector_target(flow_dissector, 761 FLOW_DISSECTOR_KEY_IP, 762 target_container); 763 key_ip->tos = iph->tos; 764 key_ip->ttl = iph->ttl; 765} 766 767static void 768__skb_flow_dissect_ipv6(const struct sk_buff *skb, 769 struct flow_dissector *flow_dissector, 770 void *target_container, const void *data, 771 const struct ipv6hdr *iph) 772{ 773 struct flow_dissector_key_ip *key_ip; 774 775 if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP)) 776 return; 777 778 key_ip = skb_flow_dissector_target(flow_dissector, 779 FLOW_DISSECTOR_KEY_IP, 780 target_container); 781 key_ip->tos = ipv6_get_dsfield(iph); 782 key_ip->ttl = iph->hop_limit; 783} 784 785/* Maximum number of protocol headers that can be parsed in 786 * __skb_flow_dissect 787 */ 788#define MAX_FLOW_DISSECT_HDRS 15 789 790static bool skb_flow_dissect_allowed(int *num_hdrs) 791{ 792 ++*num_hdrs; 793 794 return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS); 795} 796 797static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, 798 struct flow_dissector *flow_dissector, 799 void *target_container) 800{ 801 struct flow_dissector_key_ports *key_ports = NULL; 802 struct flow_dissector_key_control *key_control; 803 struct flow_dissector_key_basic *key_basic; 804 struct flow_dissector_key_addrs *key_addrs; 805 struct flow_dissector_key_tags *key_tags; 806 807 key_control = skb_flow_dissector_target(flow_dissector, 808 FLOW_DISSECTOR_KEY_CONTROL, 809 target_container); 810 key_control->thoff = flow_keys->thoff; 811 if (flow_keys->is_frag) 812 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 813 if (flow_keys->is_first_frag) 814 key_control->flags |= FLOW_DIS_FIRST_FRAG; 815 if (flow_keys->is_encap) 816 key_control->flags |= FLOW_DIS_ENCAPSULATION; 817 818 key_basic = skb_flow_dissector_target(flow_dissector, 819 FLOW_DISSECTOR_KEY_BASIC, 820 target_container); 821 key_basic->n_proto = flow_keys->n_proto; 822 key_basic->ip_proto = flow_keys->ip_proto; 823 824 if (flow_keys->addr_proto == ETH_P_IP && 825 dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 826 key_addrs = skb_flow_dissector_target(flow_dissector, 827 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 828 target_container); 829 key_addrs->v4addrs.src = flow_keys->ipv4_src; 830 key_addrs->v4addrs.dst = flow_keys->ipv4_dst; 831 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 832 } else if (flow_keys->addr_proto == ETH_P_IPV6 && 833 dissector_uses_key(flow_dissector, 834 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 835 key_addrs = skb_flow_dissector_target(flow_dissector, 836 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 837 target_container); 838 memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src, 839 sizeof(key_addrs->v6addrs.src)); 840 memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst, 841 sizeof(key_addrs->v6addrs.dst)); 842 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 843 } 844 845 if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) 846 key_ports = skb_flow_dissector_target(flow_dissector, 847 FLOW_DISSECTOR_KEY_PORTS, 848 target_container); 849 else if (dissector_uses_key(flow_dissector, 850 FLOW_DISSECTOR_KEY_PORTS_RANGE)) 851 key_ports = skb_flow_dissector_target(flow_dissector, 852 FLOW_DISSECTOR_KEY_PORTS_RANGE, 853 target_container); 854 855 if (key_ports) { 856 key_ports->src = flow_keys->sport; 857 key_ports->dst = flow_keys->dport; 858 } 859 860 if (dissector_uses_key(flow_dissector, 861 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 862 key_tags = skb_flow_dissector_target(flow_dissector, 863 FLOW_DISSECTOR_KEY_FLOW_LABEL, 864 target_container); 865 key_tags->flow_label = ntohl(flow_keys->flow_label); 866 } 867} 868 869bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, 870 __be16 proto, int nhoff, int hlen, unsigned int flags) 871{ 872 struct bpf_flow_keys *flow_keys = ctx->flow_keys; 873 u32 result; 874 875 /* Pass parameters to the BPF program */ 876 memset(flow_keys, 0, sizeof(*flow_keys)); 877 flow_keys->n_proto = proto; 878 flow_keys->nhoff = nhoff; 879 flow_keys->thoff = flow_keys->nhoff; 880 881 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG != 882 (int)FLOW_DISSECTOR_F_PARSE_1ST_FRAG); 883 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL != 884 (int)FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 885 BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP != 886 (int)FLOW_DISSECTOR_F_STOP_AT_ENCAP); 887 flow_keys->flags = flags; 888 889 result = bpf_prog_run_pin_on_cpu(prog, ctx); 890 891 flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen); 892 flow_keys->thoff = clamp_t(u16, flow_keys->thoff, 893 flow_keys->nhoff, hlen); 894 895 return result == BPF_OK; 896} 897 898/** 899 * __skb_flow_dissect - extract the flow_keys struct and return it 900 * @net: associated network namespace, derived from @skb if NULL 901 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified 902 * @flow_dissector: list of keys to dissect 903 * @target_container: target structure to put dissected values into 904 * @data: raw buffer pointer to the packet, if NULL use skb->data 905 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol 906 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb) 907 * @hlen: packet header length, if @data is NULL use skb_headlen(skb) 908 * @flags: flags that control the dissection process, e.g. 909 * FLOW_DISSECTOR_F_STOP_AT_ENCAP. 910 * 911 * The function will try to retrieve individual keys into target specified 912 * by flow_dissector from either the skbuff or a raw buffer specified by the 913 * rest parameters. 914 * 915 * Caller must take care of zeroing target container memory. 916 */ 917bool __skb_flow_dissect(const struct net *net, 918 const struct sk_buff *skb, 919 struct flow_dissector *flow_dissector, 920 void *target_container, const void *data, 921 __be16 proto, int nhoff, int hlen, unsigned int flags) 922{ 923 struct flow_dissector_key_control *key_control; 924 struct flow_dissector_key_basic *key_basic; 925 struct flow_dissector_key_addrs *key_addrs; 926 struct flow_dissector_key_tags *key_tags; 927 struct flow_dissector_key_vlan *key_vlan; 928 enum flow_dissect_ret fdret; 929 enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX; 930 bool mpls_el = false; 931 int mpls_lse = 0; 932 int num_hdrs = 0; 933 u8 ip_proto = 0; 934 bool ret; 935 936 if (!data) { 937 data = skb->data; 938 proto = skb_vlan_tag_present(skb) ? 939 skb->vlan_proto : skb->protocol; 940 nhoff = skb_network_offset(skb); 941 hlen = skb_headlen(skb); 942#if IS_ENABLED(CONFIG_NET_DSA) 943 if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) && 944 proto == htons(ETH_P_XDSA))) { 945 const struct dsa_device_ops *ops; 946 int offset = 0; 947 948 ops = skb->dev->dsa_ptr->tag_ops; 949 /* Only DSA header taggers break flow dissection */ 950 if (ops->needed_headroom) { 951 if (ops->flow_dissect) 952 ops->flow_dissect(skb, &proto, &offset); 953 else 954 dsa_tag_generic_flow_dissect(skb, 955 &proto, 956 &offset); 957 hlen -= offset; 958 nhoff += offset; 959 } 960 } 961#endif 962 } 963 964 /* It is ensured by skb_flow_dissector_init() that control key will 965 * be always present. 966 */ 967 key_control = skb_flow_dissector_target(flow_dissector, 968 FLOW_DISSECTOR_KEY_CONTROL, 969 target_container); 970 971 /* It is ensured by skb_flow_dissector_init() that basic key will 972 * be always present. 973 */ 974 key_basic = skb_flow_dissector_target(flow_dissector, 975 FLOW_DISSECTOR_KEY_BASIC, 976 target_container); 977 978 if (skb) { 979 if (!net) { 980 if (skb->dev) 981 net = dev_net(skb->dev); 982 else if (skb->sk) 983 net = sock_net(skb->sk); 984 } 985 } 986 987 WARN_ON_ONCE(!net); 988 if (net) { 989 enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; 990 struct bpf_prog_array *run_array; 991 992 rcu_read_lock(); 993 run_array = rcu_dereference(init_net.bpf.run_array[type]); 994 if (!run_array) 995 run_array = rcu_dereference(net->bpf.run_array[type]); 996 997 if (run_array) { 998 struct bpf_flow_keys flow_keys; 999 struct bpf_flow_dissector ctx = { 1000 .flow_keys = &flow_keys, 1001 .data = data, 1002 .data_end = data + hlen, 1003 }; 1004 __be16 n_proto = proto; 1005 struct bpf_prog *prog; 1006 1007 if (skb) { 1008 ctx.skb = skb; 1009 /* we can't use 'proto' in the skb case 1010 * because it might be set to skb->vlan_proto 1011 * which has been pulled from the data 1012 */ 1013 n_proto = skb->protocol; 1014 } 1015 1016 prog = READ_ONCE(run_array->items[0].prog); 1017 ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, 1018 hlen, flags); 1019 __skb_flow_bpf_to_target(&flow_keys, flow_dissector, 1020 target_container); 1021 rcu_read_unlock(); 1022 return ret; 1023 } 1024 rcu_read_unlock(); 1025 } 1026 1027 if (dissector_uses_key(flow_dissector, 1028 FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 1029 struct ethhdr *eth = eth_hdr(skb); 1030 struct flow_dissector_key_eth_addrs *key_eth_addrs; 1031 1032 key_eth_addrs = skb_flow_dissector_target(flow_dissector, 1033 FLOW_DISSECTOR_KEY_ETH_ADDRS, 1034 target_container); 1035 memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs)); 1036 } 1037 1038 if (dissector_uses_key(flow_dissector, 1039 FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) { 1040 struct flow_dissector_key_num_of_vlans *key_num_of_vlans; 1041 1042 key_num_of_vlans = skb_flow_dissector_target(flow_dissector, 1043 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, 1044 target_container); 1045 key_num_of_vlans->num_of_vlans = 0; 1046 } 1047 1048proto_again: 1049 fdret = FLOW_DISSECT_RET_CONTINUE; 1050 1051 switch (proto) { 1052 case htons(ETH_P_IP): { 1053 const struct iphdr *iph; 1054 struct iphdr _iph; 1055 1056 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); 1057 if (!iph || iph->ihl < 5) { 1058 fdret = FLOW_DISSECT_RET_OUT_BAD; 1059 break; 1060 } 1061 1062 nhoff += iph->ihl * 4; 1063 1064 ip_proto = iph->protocol; 1065 1066 if (dissector_uses_key(flow_dissector, 1067 FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 1068 key_addrs = skb_flow_dissector_target(flow_dissector, 1069 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1070 target_container); 1071 1072 memcpy(&key_addrs->v4addrs.src, &iph->saddr, 1073 sizeof(key_addrs->v4addrs.src)); 1074 memcpy(&key_addrs->v4addrs.dst, &iph->daddr, 1075 sizeof(key_addrs->v4addrs.dst)); 1076 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1077 } 1078 1079 __skb_flow_dissect_ipv4(skb, flow_dissector, 1080 target_container, data, iph); 1081 1082 if (ip_is_fragment(iph)) { 1083 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 1084 1085 if (iph->frag_off & htons(IP_OFFSET)) { 1086 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1087 break; 1088 } else { 1089 key_control->flags |= FLOW_DIS_FIRST_FRAG; 1090 if (!(flags & 1091 FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) { 1092 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1093 break; 1094 } 1095 } 1096 } 1097 1098 break; 1099 } 1100 case htons(ETH_P_IPV6): { 1101 const struct ipv6hdr *iph; 1102 struct ipv6hdr _iph; 1103 1104 iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); 1105 if (!iph) { 1106 fdret = FLOW_DISSECT_RET_OUT_BAD; 1107 break; 1108 } 1109 1110 ip_proto = iph->nexthdr; 1111 nhoff += sizeof(struct ipv6hdr); 1112 1113 if (dissector_uses_key(flow_dissector, 1114 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 1115 key_addrs = skb_flow_dissector_target(flow_dissector, 1116 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1117 target_container); 1118 1119 memcpy(&key_addrs->v6addrs.src, &iph->saddr, 1120 sizeof(key_addrs->v6addrs.src)); 1121 memcpy(&key_addrs->v6addrs.dst, &iph->daddr, 1122 sizeof(key_addrs->v6addrs.dst)); 1123 key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1124 } 1125 1126 if ((dissector_uses_key(flow_dissector, 1127 FLOW_DISSECTOR_KEY_FLOW_LABEL) || 1128 (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) && 1129 ip6_flowlabel(iph)) { 1130 __be32 flow_label = ip6_flowlabel(iph); 1131 1132 if (dissector_uses_key(flow_dissector, 1133 FLOW_DISSECTOR_KEY_FLOW_LABEL)) { 1134 key_tags = skb_flow_dissector_target(flow_dissector, 1135 FLOW_DISSECTOR_KEY_FLOW_LABEL, 1136 target_container); 1137 key_tags->flow_label = ntohl(flow_label); 1138 } 1139 if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) { 1140 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1141 break; 1142 } 1143 } 1144 1145 __skb_flow_dissect_ipv6(skb, flow_dissector, 1146 target_container, data, iph); 1147 1148 break; 1149 } 1150 case htons(ETH_P_8021AD): 1151 case htons(ETH_P_8021Q): { 1152 const struct vlan_hdr *vlan = NULL; 1153 struct vlan_hdr _vlan; 1154 __be16 saved_vlan_tpid = proto; 1155 1156 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX && 1157 skb && skb_vlan_tag_present(skb)) { 1158 proto = skb->protocol; 1159 } else { 1160 vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), 1161 data, hlen, &_vlan); 1162 if (!vlan) { 1163 fdret = FLOW_DISSECT_RET_OUT_BAD; 1164 break; 1165 } 1166 1167 proto = vlan->h_vlan_encapsulated_proto; 1168 nhoff += sizeof(*vlan); 1169 } 1170 1171 if (dissector_uses_key(flow_dissector, 1172 FLOW_DISSECTOR_KEY_NUM_OF_VLANS)) { 1173 struct flow_dissector_key_num_of_vlans *key_nvs; 1174 1175 key_nvs = skb_flow_dissector_target(flow_dissector, 1176 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, 1177 target_container); 1178 key_nvs->num_of_vlans++; 1179 } 1180 1181 if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) { 1182 dissector_vlan = FLOW_DISSECTOR_KEY_VLAN; 1183 } else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) { 1184 dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN; 1185 } else { 1186 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1187 break; 1188 } 1189 1190 if (dissector_uses_key(flow_dissector, dissector_vlan)) { 1191 key_vlan = skb_flow_dissector_target(flow_dissector, 1192 dissector_vlan, 1193 target_container); 1194 1195 if (!vlan) { 1196 key_vlan->vlan_id = skb_vlan_tag_get_id(skb); 1197 key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb); 1198 } else { 1199 key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) & 1200 VLAN_VID_MASK; 1201 key_vlan->vlan_priority = 1202 (ntohs(vlan->h_vlan_TCI) & 1203 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1204 } 1205 key_vlan->vlan_tpid = saved_vlan_tpid; 1206 key_vlan->vlan_eth_type = proto; 1207 } 1208 1209 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1210 break; 1211 } 1212 case htons(ETH_P_PPP_SES): { 1213 struct { 1214 struct pppoe_hdr hdr; 1215 __be16 proto; 1216 } *hdr, _hdr; 1217 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr); 1218 if (!hdr) { 1219 fdret = FLOW_DISSECT_RET_OUT_BAD; 1220 break; 1221 } 1222 1223 nhoff += PPPOE_SES_HLEN; 1224 switch (hdr->proto) { 1225 case htons(PPP_IP): 1226 proto = htons(ETH_P_IP); 1227 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1228 break; 1229 case htons(PPP_IPV6): 1230 proto = htons(ETH_P_IPV6); 1231 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1232 break; 1233 default: 1234 fdret = FLOW_DISSECT_RET_OUT_BAD; 1235 break; 1236 } 1237 break; 1238 } 1239 case htons(ETH_P_TIPC): { 1240 struct tipc_basic_hdr *hdr, _hdr; 1241 1242 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), 1243 data, hlen, &_hdr); 1244 if (!hdr) { 1245 fdret = FLOW_DISSECT_RET_OUT_BAD; 1246 break; 1247 } 1248 1249 if (dissector_uses_key(flow_dissector, 1250 FLOW_DISSECTOR_KEY_TIPC)) { 1251 key_addrs = skb_flow_dissector_target(flow_dissector, 1252 FLOW_DISSECTOR_KEY_TIPC, 1253 target_container); 1254 key_addrs->tipckey.key = tipc_hdr_rps_key(hdr); 1255 key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC; 1256 } 1257 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1258 break; 1259 } 1260 1261 case htons(ETH_P_MPLS_UC): 1262 case htons(ETH_P_MPLS_MC): 1263 fdret = __skb_flow_dissect_mpls(skb, flow_dissector, 1264 target_container, data, 1265 nhoff, hlen, mpls_lse, 1266 &mpls_el); 1267 nhoff += sizeof(struct mpls_label); 1268 mpls_lse++; 1269 break; 1270 case htons(ETH_P_FCOE): 1271 if ((hlen - nhoff) < FCOE_HEADER_LEN) { 1272 fdret = FLOW_DISSECT_RET_OUT_BAD; 1273 break; 1274 } 1275 1276 nhoff += FCOE_HEADER_LEN; 1277 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1278 break; 1279 1280 case htons(ETH_P_ARP): 1281 case htons(ETH_P_RARP): 1282 fdret = __skb_flow_dissect_arp(skb, flow_dissector, 1283 target_container, data, 1284 nhoff, hlen); 1285 break; 1286 1287 case htons(ETH_P_BATMAN): 1288 fdret = __skb_flow_dissect_batadv(skb, key_control, data, 1289 &proto, &nhoff, hlen, flags); 1290 break; 1291 1292 case htons(ETH_P_1588): { 1293 struct ptp_header *hdr, _hdr; 1294 1295 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, 1296 hlen, &_hdr); 1297 if (!hdr) { 1298 fdret = FLOW_DISSECT_RET_OUT_BAD; 1299 break; 1300 } 1301 1302 nhoff += ntohs(hdr->message_length); 1303 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1304 break; 1305 } 1306 1307 case htons(ETH_P_PRP): 1308 case htons(ETH_P_HSR): { 1309 struct hsr_tag *hdr, _hdr; 1310 1311 hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, 1312 &_hdr); 1313 if (!hdr) { 1314 fdret = FLOW_DISSECT_RET_OUT_BAD; 1315 break; 1316 } 1317 1318 proto = hdr->encap_proto; 1319 nhoff += HSR_HLEN; 1320 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1321 break; 1322 } 1323 1324 default: 1325 fdret = FLOW_DISSECT_RET_OUT_BAD; 1326 break; 1327 } 1328 1329 /* Process result of proto processing */ 1330 switch (fdret) { 1331 case FLOW_DISSECT_RET_OUT_GOOD: 1332 goto out_good; 1333 case FLOW_DISSECT_RET_PROTO_AGAIN: 1334 if (skb_flow_dissect_allowed(&num_hdrs)) 1335 goto proto_again; 1336 goto out_good; 1337 case FLOW_DISSECT_RET_CONTINUE: 1338 case FLOW_DISSECT_RET_IPPROTO_AGAIN: 1339 break; 1340 case FLOW_DISSECT_RET_OUT_BAD: 1341 default: 1342 goto out_bad; 1343 } 1344 1345ip_proto_again: 1346 fdret = FLOW_DISSECT_RET_CONTINUE; 1347 1348 switch (ip_proto) { 1349 case IPPROTO_GRE: 1350 if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { 1351 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1352 break; 1353 } 1354 1355 fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector, 1356 target_container, data, 1357 &proto, &nhoff, &hlen, flags); 1358 break; 1359 1360 case NEXTHDR_HOP: 1361 case NEXTHDR_ROUTING: 1362 case NEXTHDR_DEST: { 1363 u8 _opthdr[2], *opthdr; 1364 1365 if (proto != htons(ETH_P_IPV6)) 1366 break; 1367 1368 opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr), 1369 data, hlen, &_opthdr); 1370 if (!opthdr) { 1371 fdret = FLOW_DISSECT_RET_OUT_BAD; 1372 break; 1373 } 1374 1375 ip_proto = opthdr[0]; 1376 nhoff += (opthdr[1] + 1) << 3; 1377 1378 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN; 1379 break; 1380 } 1381 case NEXTHDR_FRAGMENT: { 1382 struct frag_hdr _fh, *fh; 1383 1384 if (proto != htons(ETH_P_IPV6)) 1385 break; 1386 1387 fh = __skb_header_pointer(skb, nhoff, sizeof(_fh), 1388 data, hlen, &_fh); 1389 1390 if (!fh) { 1391 fdret = FLOW_DISSECT_RET_OUT_BAD; 1392 break; 1393 } 1394 1395 key_control->flags |= FLOW_DIS_IS_FRAGMENT; 1396 1397 nhoff += sizeof(_fh); 1398 ip_proto = fh->nexthdr; 1399 1400 if (!(fh->frag_off & htons(IP6_OFFSET))) { 1401 key_control->flags |= FLOW_DIS_FIRST_FRAG; 1402 if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) { 1403 fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN; 1404 break; 1405 } 1406 } 1407 1408 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1409 break; 1410 } 1411 case IPPROTO_IPIP: 1412 if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { 1413 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1414 break; 1415 } 1416 1417 proto = htons(ETH_P_IP); 1418 1419 key_control->flags |= FLOW_DIS_ENCAPSULATION; 1420 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) { 1421 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1422 break; 1423 } 1424 1425 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1426 break; 1427 1428 case IPPROTO_IPV6: 1429 if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) { 1430 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1431 break; 1432 } 1433 1434 proto = htons(ETH_P_IPV6); 1435 1436 key_control->flags |= FLOW_DIS_ENCAPSULATION; 1437 if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) { 1438 fdret = FLOW_DISSECT_RET_OUT_GOOD; 1439 break; 1440 } 1441 1442 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1443 break; 1444 1445 1446 case IPPROTO_MPLS: 1447 proto = htons(ETH_P_MPLS_UC); 1448 fdret = FLOW_DISSECT_RET_PROTO_AGAIN; 1449 break; 1450 1451 case IPPROTO_TCP: 1452 __skb_flow_dissect_tcp(skb, flow_dissector, target_container, 1453 data, nhoff, hlen); 1454 break; 1455 1456 case IPPROTO_ICMP: 1457 case IPPROTO_ICMPV6: 1458 __skb_flow_dissect_icmp(skb, flow_dissector, target_container, 1459 data, nhoff, hlen); 1460 break; 1461 1462 default: 1463 break; 1464 } 1465 1466 if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT)) 1467 __skb_flow_dissect_ports(skb, flow_dissector, target_container, 1468 data, nhoff, ip_proto, hlen); 1469 1470 /* Process result of IP proto processing */ 1471 switch (fdret) { 1472 case FLOW_DISSECT_RET_PROTO_AGAIN: 1473 if (skb_flow_dissect_allowed(&num_hdrs)) 1474 goto proto_again; 1475 break; 1476 case FLOW_DISSECT_RET_IPPROTO_AGAIN: 1477 if (skb_flow_dissect_allowed(&num_hdrs)) 1478 goto ip_proto_again; 1479 break; 1480 case FLOW_DISSECT_RET_OUT_GOOD: 1481 case FLOW_DISSECT_RET_CONTINUE: 1482 break; 1483 case FLOW_DISSECT_RET_OUT_BAD: 1484 default: 1485 goto out_bad; 1486 } 1487 1488out_good: 1489 ret = true; 1490 1491out: 1492 key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); 1493 key_basic->n_proto = proto; 1494 key_basic->ip_proto = ip_proto; 1495 1496 return ret; 1497 1498out_bad: 1499 ret = false; 1500 goto out; 1501} 1502EXPORT_SYMBOL(__skb_flow_dissect); 1503 1504static siphash_aligned_key_t hashrnd; 1505static __always_inline void __flow_hash_secret_init(void) 1506{ 1507 net_get_random_once(&hashrnd, sizeof(hashrnd)); 1508} 1509 1510static const void *flow_keys_hash_start(const struct flow_keys *flow) 1511{ 1512 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT); 1513 return &flow->FLOW_KEYS_HASH_START_FIELD; 1514} 1515 1516static inline size_t flow_keys_hash_length(const struct flow_keys *flow) 1517{ 1518 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); 1519 1520 BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); 1521 1522 switch (flow->control.addr_type) { 1523 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1524 diff -= sizeof(flow->addrs.v4addrs); 1525 break; 1526 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1527 diff -= sizeof(flow->addrs.v6addrs); 1528 break; 1529 case FLOW_DISSECTOR_KEY_TIPC: 1530 diff -= sizeof(flow->addrs.tipckey); 1531 break; 1532 } 1533 return sizeof(*flow) - diff; 1534} 1535 1536__be32 flow_get_u32_src(const struct flow_keys *flow) 1537{ 1538 switch (flow->control.addr_type) { 1539 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1540 return flow->addrs.v4addrs.src; 1541 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1542 return (__force __be32)ipv6_addr_hash( 1543 &flow->addrs.v6addrs.src); 1544 case FLOW_DISSECTOR_KEY_TIPC: 1545 return flow->addrs.tipckey.key; 1546 default: 1547 return 0; 1548 } 1549} 1550EXPORT_SYMBOL(flow_get_u32_src); 1551 1552__be32 flow_get_u32_dst(const struct flow_keys *flow) 1553{ 1554 switch (flow->control.addr_type) { 1555 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1556 return flow->addrs.v4addrs.dst; 1557 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1558 return (__force __be32)ipv6_addr_hash( 1559 &flow->addrs.v6addrs.dst); 1560 default: 1561 return 0; 1562 } 1563} 1564EXPORT_SYMBOL(flow_get_u32_dst); 1565 1566/* Sort the source and destination IP and the ports, 1567 * to have consistent hash within the two directions 1568 */ 1569static inline void __flow_hash_consistentify(struct flow_keys *keys) 1570{ 1571 int addr_diff, i; 1572 1573 switch (keys->control.addr_type) { 1574 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 1575 addr_diff = (__force u32)keys->addrs.v4addrs.dst - 1576 (__force u32)keys->addrs.v4addrs.src; 1577 if (addr_diff < 0) 1578 swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst); 1579 1580 if ((__force u16)keys->ports.dst < 1581 (__force u16)keys->ports.src) { 1582 swap(keys->ports.src, keys->ports.dst); 1583 } 1584 break; 1585 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 1586 addr_diff = memcmp(&keys->addrs.v6addrs.dst, 1587 &keys->addrs.v6addrs.src, 1588 sizeof(keys->addrs.v6addrs.dst)); 1589 if (addr_diff < 0) { 1590 for (i = 0; i < 4; i++) 1591 swap(keys->addrs.v6addrs.src.s6_addr32[i], 1592 keys->addrs.v6addrs.dst.s6_addr32[i]); 1593 } 1594 if ((__force u16)keys->ports.dst < 1595 (__force u16)keys->ports.src) { 1596 swap(keys->ports.src, keys->ports.dst); 1597 } 1598 break; 1599 } 1600} 1601 1602static inline u32 __flow_hash_from_keys(struct flow_keys *keys, 1603 const siphash_key_t *keyval) 1604{ 1605 u32 hash; 1606 1607 __flow_hash_consistentify(keys); 1608 1609 hash = siphash(flow_keys_hash_start(keys), 1610 flow_keys_hash_length(keys), keyval); 1611 if (!hash) 1612 hash = 1; 1613 1614 return hash; 1615} 1616 1617u32 flow_hash_from_keys(struct flow_keys *keys) 1618{ 1619 __flow_hash_secret_init(); 1620 return __flow_hash_from_keys(keys, &hashrnd); 1621} 1622EXPORT_SYMBOL(flow_hash_from_keys); 1623 1624static inline u32 ___skb_get_hash(const struct sk_buff *skb, 1625 struct flow_keys *keys, 1626 const siphash_key_t *keyval) 1627{ 1628 skb_flow_dissect_flow_keys(skb, keys, 1629 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1630 1631 return __flow_hash_from_keys(keys, keyval); 1632} 1633 1634struct _flow_keys_digest_data { 1635 __be16 n_proto; 1636 u8 ip_proto; 1637 u8 padding; 1638 __be32 ports; 1639 __be32 src; 1640 __be32 dst; 1641}; 1642 1643void make_flow_keys_digest(struct flow_keys_digest *digest, 1644 const struct flow_keys *flow) 1645{ 1646 struct _flow_keys_digest_data *data = 1647 (struct _flow_keys_digest_data *)digest; 1648 1649 BUILD_BUG_ON(sizeof(*data) > sizeof(*digest)); 1650 1651 memset(digest, 0, sizeof(*digest)); 1652 1653 data->n_proto = flow->basic.n_proto; 1654 data->ip_proto = flow->basic.ip_proto; 1655 data->ports = flow->ports.ports; 1656 data->src = flow->addrs.v4addrs.src; 1657 data->dst = flow->addrs.v4addrs.dst; 1658} 1659EXPORT_SYMBOL(make_flow_keys_digest); 1660 1661static struct flow_dissector flow_keys_dissector_symmetric __read_mostly; 1662 1663u32 __skb_get_hash_symmetric(const struct sk_buff *skb) 1664{ 1665 struct flow_keys keys; 1666 1667 __flow_hash_secret_init(); 1668 1669 memset(&keys, 0, sizeof(keys)); 1670 __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric, 1671 &keys, NULL, 0, 0, 0, 1672 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1673 1674 return __flow_hash_from_keys(&keys, &hashrnd); 1675} 1676EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); 1677 1678/** 1679 * __skb_get_hash: calculate a flow hash 1680 * @skb: sk_buff to calculate flow hash from 1681 * 1682 * This function calculates a flow hash based on src/dst addresses 1683 * and src/dst port numbers. Sets hash in skb to non-zero hash value 1684 * on success, zero indicates no valid hash. Also, sets l4_hash in skb 1685 * if hash is a canonical 4-tuple hash over transport ports. 1686 */ 1687void __skb_get_hash(struct sk_buff *skb) 1688{ 1689 struct flow_keys keys; 1690 u32 hash; 1691 1692 __flow_hash_secret_init(); 1693 1694 hash = ___skb_get_hash(skb, &keys, &hashrnd); 1695 1696 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1697} 1698EXPORT_SYMBOL(__skb_get_hash); 1699 1700__u32 skb_get_hash_perturb(const struct sk_buff *skb, 1701 const siphash_key_t *perturb) 1702{ 1703 struct flow_keys keys; 1704 1705 return ___skb_get_hash(skb, &keys, perturb); 1706} 1707EXPORT_SYMBOL(skb_get_hash_perturb); 1708 1709u32 __skb_get_poff(const struct sk_buff *skb, const void *data, 1710 const struct flow_keys_basic *keys, int hlen) 1711{ 1712 u32 poff = keys->control.thoff; 1713 1714 /* skip L4 headers for fragments after the first */ 1715 if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) && 1716 !(keys->control.flags & FLOW_DIS_FIRST_FRAG)) 1717 return poff; 1718 1719 switch (keys->basic.ip_proto) { 1720 case IPPROTO_TCP: { 1721 /* access doff as u8 to avoid unaligned access */ 1722 const u8 *doff; 1723 u8 _doff; 1724 1725 doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff), 1726 data, hlen, &_doff); 1727 if (!doff) 1728 return poff; 1729 1730 poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2); 1731 break; 1732 } 1733 case IPPROTO_UDP: 1734 case IPPROTO_UDPLITE: 1735 poff += sizeof(struct udphdr); 1736 break; 1737 /* For the rest, we do not really care about header 1738 * extensions at this point for now. 1739 */ 1740 case IPPROTO_ICMP: 1741 poff += sizeof(struct icmphdr); 1742 break; 1743 case IPPROTO_ICMPV6: 1744 poff += sizeof(struct icmp6hdr); 1745 break; 1746 case IPPROTO_IGMP: 1747 poff += sizeof(struct igmphdr); 1748 break; 1749 case IPPROTO_DCCP: 1750 poff += sizeof(struct dccp_hdr); 1751 break; 1752 case IPPROTO_SCTP: 1753 poff += sizeof(struct sctphdr); 1754 break; 1755 } 1756 1757 return poff; 1758} 1759 1760/** 1761 * skb_get_poff - get the offset to the payload 1762 * @skb: sk_buff to get the payload offset from 1763 * 1764 * The function will get the offset to the payload as far as it could 1765 * be dissected. The main user is currently BPF, so that we can dynamically 1766 * truncate packets without needing to push actual payload to the user 1767 * space and can analyze headers only, instead. 1768 */ 1769u32 skb_get_poff(const struct sk_buff *skb) 1770{ 1771 struct flow_keys_basic keys; 1772 1773 if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, 1774 NULL, 0, 0, 0, 0)) 1775 return 0; 1776 1777 return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb)); 1778} 1779 1780__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys) 1781{ 1782 memset(keys, 0, sizeof(*keys)); 1783 1784 memcpy(&keys->addrs.v6addrs.src, &fl6->saddr, 1785 sizeof(keys->addrs.v6addrs.src)); 1786 memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr, 1787 sizeof(keys->addrs.v6addrs.dst)); 1788 keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1789 keys->ports.src = fl6->fl6_sport; 1790 keys->ports.dst = fl6->fl6_dport; 1791 keys->keyid.keyid = fl6->fl6_gre_key; 1792 keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); 1793 keys->basic.ip_proto = fl6->flowi6_proto; 1794 1795 return flow_hash_from_keys(keys); 1796} 1797EXPORT_SYMBOL(__get_hash_from_flowi6); 1798 1799static const struct flow_dissector_key flow_keys_dissector_keys[] = { 1800 { 1801 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1802 .offset = offsetof(struct flow_keys, control), 1803 }, 1804 { 1805 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1806 .offset = offsetof(struct flow_keys, basic), 1807 }, 1808 { 1809 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1810 .offset = offsetof(struct flow_keys, addrs.v4addrs), 1811 }, 1812 { 1813 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1814 .offset = offsetof(struct flow_keys, addrs.v6addrs), 1815 }, 1816 { 1817 .key_id = FLOW_DISSECTOR_KEY_TIPC, 1818 .offset = offsetof(struct flow_keys, addrs.tipckey), 1819 }, 1820 { 1821 .key_id = FLOW_DISSECTOR_KEY_PORTS, 1822 .offset = offsetof(struct flow_keys, ports), 1823 }, 1824 { 1825 .key_id = FLOW_DISSECTOR_KEY_VLAN, 1826 .offset = offsetof(struct flow_keys, vlan), 1827 }, 1828 { 1829 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, 1830 .offset = offsetof(struct flow_keys, tags), 1831 }, 1832 { 1833 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 1834 .offset = offsetof(struct flow_keys, keyid), 1835 }, 1836}; 1837 1838static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = { 1839 { 1840 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1841 .offset = offsetof(struct flow_keys, control), 1842 }, 1843 { 1844 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1845 .offset = offsetof(struct flow_keys, basic), 1846 }, 1847 { 1848 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 1849 .offset = offsetof(struct flow_keys, addrs.v4addrs), 1850 }, 1851 { 1852 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 1853 .offset = offsetof(struct flow_keys, addrs.v6addrs), 1854 }, 1855 { 1856 .key_id = FLOW_DISSECTOR_KEY_PORTS, 1857 .offset = offsetof(struct flow_keys, ports), 1858 }, 1859}; 1860 1861static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = { 1862 { 1863 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 1864 .offset = offsetof(struct flow_keys, control), 1865 }, 1866 { 1867 .key_id = FLOW_DISSECTOR_KEY_BASIC, 1868 .offset = offsetof(struct flow_keys, basic), 1869 }, 1870}; 1871 1872struct flow_dissector flow_keys_dissector __read_mostly; 1873EXPORT_SYMBOL(flow_keys_dissector); 1874 1875struct flow_dissector flow_keys_basic_dissector __read_mostly; 1876EXPORT_SYMBOL(flow_keys_basic_dissector); 1877 1878static int __init init_default_flow_dissectors(void) 1879{ 1880 skb_flow_dissector_init(&flow_keys_dissector, 1881 flow_keys_dissector_keys, 1882 ARRAY_SIZE(flow_keys_dissector_keys)); 1883 skb_flow_dissector_init(&flow_keys_dissector_symmetric, 1884 flow_keys_dissector_symmetric_keys, 1885 ARRAY_SIZE(flow_keys_dissector_symmetric_keys)); 1886 skb_flow_dissector_init(&flow_keys_basic_dissector, 1887 flow_keys_basic_dissector_keys, 1888 ARRAY_SIZE(flow_keys_basic_dissector_keys)); 1889 return 0; 1890} 1891core_initcall(init_default_flow_dissectors);