nf_conntrack_netlink.c (96976B)
1/* Connection tracking via netlink socket. Allows for user space 2 * protocol helpers and general trouble making from userspace. 3 * 4 * (C) 2001 by Jay Schulist <jschlst@samba.org> 5 * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org> 6 * (C) 2003 by Patrick Mchardy <kaber@trash.net> 7 * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org> 8 * 9 * Initial connection tracking via netlink development funded and 10 * generally made possible by Network Robots, Inc. (www.networkrobots.com) 11 * 12 * Further development of this code funded by Astaro AG (http://www.astaro.com) 13 * 14 * This software may be used and distributed according to the terms 15 * of the GNU General Public License, incorporated herein by reference. 16 */ 17 18#include <linux/init.h> 19#include <linux/module.h> 20#include <linux/kernel.h> 21#include <linux/rculist.h> 22#include <linux/rculist_nulls.h> 23#include <linux/types.h> 24#include <linux/timer.h> 25#include <linux/security.h> 26#include <linux/skbuff.h> 27#include <linux/errno.h> 28#include <linux/netlink.h> 29#include <linux/spinlock.h> 30#include <linux/interrupt.h> 31#include <linux/slab.h> 32#include <linux/siphash.h> 33 34#include <linux/netfilter.h> 35#include <net/netlink.h> 36#include <net/sock.h> 37#include <net/netfilter/nf_conntrack.h> 38#include <net/netfilter/nf_conntrack_core.h> 39#include <net/netfilter/nf_conntrack_expect.h> 40#include <net/netfilter/nf_conntrack_helper.h> 41#include <net/netfilter/nf_conntrack_seqadj.h> 42#include <net/netfilter/nf_conntrack_l4proto.h> 43#include <net/netfilter/nf_conntrack_tuple.h> 44#include <net/netfilter/nf_conntrack_acct.h> 45#include <net/netfilter/nf_conntrack_zones.h> 46#include <net/netfilter/nf_conntrack_timestamp.h> 47#include <net/netfilter/nf_conntrack_labels.h> 48#include <net/netfilter/nf_conntrack_synproxy.h> 49#if IS_ENABLED(CONFIG_NF_NAT) 50#include <net/netfilter/nf_nat.h> 51#include <net/netfilter/nf_nat_helper.h> 52#endif 53 54#include <linux/netfilter/nfnetlink.h> 55#include <linux/netfilter/nfnetlink_conntrack.h> 56 57#include "nf_internals.h" 58 59MODULE_LICENSE("GPL"); 60 61struct ctnetlink_list_dump_ctx { 62 struct nf_conn *last; 63 unsigned int cpu; 64 bool done; 65}; 66 67static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, 68 const struct nf_conntrack_tuple *tuple, 69 const struct nf_conntrack_l4proto *l4proto) 70{ 71 int ret = 0; 72 struct nlattr *nest_parms; 73 74 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO); 75 if (!nest_parms) 76 goto nla_put_failure; 77 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum)) 78 goto nla_put_failure; 79 80 if (likely(l4proto->tuple_to_nlattr)) 81 ret = l4proto->tuple_to_nlattr(skb, tuple); 82 83 nla_nest_end(skb, nest_parms); 84 85 return ret; 86 87nla_put_failure: 88 return -1; 89} 90 91static int ipv4_tuple_to_nlattr(struct sk_buff *skb, 92 const struct nf_conntrack_tuple *tuple) 93{ 94 if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) || 95 nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip)) 96 return -EMSGSIZE; 97 return 0; 98} 99 100static int ipv6_tuple_to_nlattr(struct sk_buff *skb, 101 const struct nf_conntrack_tuple *tuple) 102{ 103 if (nla_put_in6_addr(skb, CTA_IP_V6_SRC, &tuple->src.u3.in6) || 104 nla_put_in6_addr(skb, CTA_IP_V6_DST, &tuple->dst.u3.in6)) 105 return -EMSGSIZE; 106 return 0; 107} 108 109static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, 110 const struct nf_conntrack_tuple *tuple) 111{ 112 int ret = 0; 113 struct nlattr *nest_parms; 114 115 nest_parms = nla_nest_start(skb, CTA_TUPLE_IP); 116 if (!nest_parms) 117 goto nla_put_failure; 118 119 switch (tuple->src.l3num) { 120 case NFPROTO_IPV4: 121 ret = ipv4_tuple_to_nlattr(skb, tuple); 122 break; 123 case NFPROTO_IPV6: 124 ret = ipv6_tuple_to_nlattr(skb, tuple); 125 break; 126 } 127 128 nla_nest_end(skb, nest_parms); 129 130 return ret; 131 132nla_put_failure: 133 return -1; 134} 135 136static int ctnetlink_dump_tuples(struct sk_buff *skb, 137 const struct nf_conntrack_tuple *tuple) 138{ 139 const struct nf_conntrack_l4proto *l4proto; 140 int ret; 141 142 rcu_read_lock(); 143 ret = ctnetlink_dump_tuples_ip(skb, tuple); 144 145 if (ret >= 0) { 146 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 147 ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto); 148 } 149 rcu_read_unlock(); 150 return ret; 151} 152 153static int ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype, 154 const struct nf_conntrack_zone *zone, int dir) 155{ 156 if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir) 157 return 0; 158 if (nla_put_be16(skb, attrtype, htons(zone->id))) 159 goto nla_put_failure; 160 return 0; 161 162nla_put_failure: 163 return -1; 164} 165 166static int ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) 167{ 168 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status))) 169 goto nla_put_failure; 170 return 0; 171 172nla_put_failure: 173 return -1; 174} 175 176static int ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct, 177 bool skip_zero) 178{ 179 long timeout = nf_ct_expires(ct) / HZ; 180 181 if (skip_zero && timeout == 0) 182 return 0; 183 184 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout))) 185 goto nla_put_failure; 186 return 0; 187 188nla_put_failure: 189 return -1; 190} 191 192static int ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct, 193 bool destroy) 194{ 195 const struct nf_conntrack_l4proto *l4proto; 196 struct nlattr *nest_proto; 197 int ret; 198 199 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 200 if (!l4proto->to_nlattr) 201 return 0; 202 203 nest_proto = nla_nest_start(skb, CTA_PROTOINFO); 204 if (!nest_proto) 205 goto nla_put_failure; 206 207 ret = l4proto->to_nlattr(skb, nest_proto, ct, destroy); 208 209 nla_nest_end(skb, nest_proto); 210 211 return ret; 212 213nla_put_failure: 214 return -1; 215} 216 217static int ctnetlink_dump_helpinfo(struct sk_buff *skb, 218 const struct nf_conn *ct) 219{ 220 struct nlattr *nest_helper; 221 const struct nf_conn_help *help = nfct_help(ct); 222 struct nf_conntrack_helper *helper; 223 224 if (!help) 225 return 0; 226 227 rcu_read_lock(); 228 helper = rcu_dereference(help->helper); 229 if (!helper) 230 goto out; 231 232 nest_helper = nla_nest_start(skb, CTA_HELP); 233 if (!nest_helper) 234 goto nla_put_failure; 235 if (nla_put_string(skb, CTA_HELP_NAME, helper->name)) 236 goto nla_put_failure; 237 238 if (helper->to_nlattr) 239 helper->to_nlattr(skb, ct); 240 241 nla_nest_end(skb, nest_helper); 242out: 243 rcu_read_unlock(); 244 return 0; 245 246nla_put_failure: 247 rcu_read_unlock(); 248 return -1; 249} 250 251static int 252dump_counters(struct sk_buff *skb, struct nf_conn_acct *acct, 253 enum ip_conntrack_dir dir, int type) 254{ 255 enum ctattr_type attr = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG; 256 struct nf_conn_counter *counter = acct->counter; 257 struct nlattr *nest_count; 258 u64 pkts, bytes; 259 260 if (type == IPCTNL_MSG_CT_GET_CTRZERO) { 261 pkts = atomic64_xchg(&counter[dir].packets, 0); 262 bytes = atomic64_xchg(&counter[dir].bytes, 0); 263 } else { 264 pkts = atomic64_read(&counter[dir].packets); 265 bytes = atomic64_read(&counter[dir].bytes); 266 } 267 268 nest_count = nla_nest_start(skb, attr); 269 if (!nest_count) 270 goto nla_put_failure; 271 272 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts), 273 CTA_COUNTERS_PAD) || 274 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes), 275 CTA_COUNTERS_PAD)) 276 goto nla_put_failure; 277 278 nla_nest_end(skb, nest_count); 279 280 return 0; 281 282nla_put_failure: 283 return -1; 284} 285 286static int 287ctnetlink_dump_acct(struct sk_buff *skb, const struct nf_conn *ct, int type) 288{ 289 struct nf_conn_acct *acct = nf_conn_acct_find(ct); 290 291 if (!acct) 292 return 0; 293 294 if (dump_counters(skb, acct, IP_CT_DIR_ORIGINAL, type) < 0) 295 return -1; 296 if (dump_counters(skb, acct, IP_CT_DIR_REPLY, type) < 0) 297 return -1; 298 299 return 0; 300} 301 302static int 303ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) 304{ 305 struct nlattr *nest_count; 306 const struct nf_conn_tstamp *tstamp; 307 308 tstamp = nf_conn_tstamp_find(ct); 309 if (!tstamp) 310 return 0; 311 312 nest_count = nla_nest_start(skb, CTA_TIMESTAMP); 313 if (!nest_count) 314 goto nla_put_failure; 315 316 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start), 317 CTA_TIMESTAMP_PAD) || 318 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP, 319 cpu_to_be64(tstamp->stop), 320 CTA_TIMESTAMP_PAD))) 321 goto nla_put_failure; 322 nla_nest_end(skb, nest_count); 323 324 return 0; 325 326nla_put_failure: 327 return -1; 328} 329 330#ifdef CONFIG_NF_CONNTRACK_MARK 331static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) 332{ 333 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark))) 334 goto nla_put_failure; 335 return 0; 336 337nla_put_failure: 338 return -1; 339} 340#else 341#define ctnetlink_dump_mark(a, b) (0) 342#endif 343 344#ifdef CONFIG_NF_CONNTRACK_SECMARK 345static int ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct) 346{ 347 struct nlattr *nest_secctx; 348 int len, ret; 349 char *secctx; 350 351 ret = security_secid_to_secctx(ct->secmark, &secctx, &len); 352 if (ret) 353 return 0; 354 355 ret = -1; 356 nest_secctx = nla_nest_start(skb, CTA_SECCTX); 357 if (!nest_secctx) 358 goto nla_put_failure; 359 360 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx)) 361 goto nla_put_failure; 362 nla_nest_end(skb, nest_secctx); 363 364 ret = 0; 365nla_put_failure: 366 security_release_secctx(secctx, len); 367 return ret; 368} 369#else 370#define ctnetlink_dump_secctx(a, b) (0) 371#endif 372 373#ifdef CONFIG_NF_CONNTRACK_LABELS 374static inline int ctnetlink_label_size(const struct nf_conn *ct) 375{ 376 struct nf_conn_labels *labels = nf_ct_labels_find(ct); 377 378 if (!labels) 379 return 0; 380 return nla_total_size(sizeof(labels->bits)); 381} 382 383static int 384ctnetlink_dump_labels(struct sk_buff *skb, const struct nf_conn *ct) 385{ 386 struct nf_conn_labels *labels = nf_ct_labels_find(ct); 387 unsigned int i; 388 389 if (!labels) 390 return 0; 391 392 i = 0; 393 do { 394 if (labels->bits[i] != 0) 395 return nla_put(skb, CTA_LABELS, sizeof(labels->bits), 396 labels->bits); 397 i++; 398 } while (i < ARRAY_SIZE(labels->bits)); 399 400 return 0; 401} 402#else 403#define ctnetlink_dump_labels(a, b) (0) 404#define ctnetlink_label_size(a) (0) 405#endif 406 407#define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple) 408 409static int ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct) 410{ 411 struct nlattr *nest_parms; 412 413 if (!(ct->status & IPS_EXPECTED)) 414 return 0; 415 416 nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER); 417 if (!nest_parms) 418 goto nla_put_failure; 419 if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0) 420 goto nla_put_failure; 421 nla_nest_end(skb, nest_parms); 422 423 return 0; 424 425nla_put_failure: 426 return -1; 427} 428 429static int 430dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type) 431{ 432 struct nlattr *nest_parms; 433 434 nest_parms = nla_nest_start(skb, type); 435 if (!nest_parms) 436 goto nla_put_failure; 437 438 if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS, 439 htonl(seq->correction_pos)) || 440 nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE, 441 htonl(seq->offset_before)) || 442 nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER, 443 htonl(seq->offset_after))) 444 goto nla_put_failure; 445 446 nla_nest_end(skb, nest_parms); 447 448 return 0; 449 450nla_put_failure: 451 return -1; 452} 453 454static int ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, struct nf_conn *ct) 455{ 456 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); 457 struct nf_ct_seqadj *seq; 458 459 if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj) 460 return 0; 461 462 spin_lock_bh(&ct->lock); 463 seq = &seqadj->seq[IP_CT_DIR_ORIGINAL]; 464 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1) 465 goto err; 466 467 seq = &seqadj->seq[IP_CT_DIR_REPLY]; 468 if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1) 469 goto err; 470 471 spin_unlock_bh(&ct->lock); 472 return 0; 473err: 474 spin_unlock_bh(&ct->lock); 475 return -1; 476} 477 478static int ctnetlink_dump_ct_synproxy(struct sk_buff *skb, struct nf_conn *ct) 479{ 480 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); 481 struct nlattr *nest_parms; 482 483 if (!synproxy) 484 return 0; 485 486 nest_parms = nla_nest_start(skb, CTA_SYNPROXY); 487 if (!nest_parms) 488 goto nla_put_failure; 489 490 if (nla_put_be32(skb, CTA_SYNPROXY_ISN, htonl(synproxy->isn)) || 491 nla_put_be32(skb, CTA_SYNPROXY_ITS, htonl(synproxy->its)) || 492 nla_put_be32(skb, CTA_SYNPROXY_TSOFF, htonl(synproxy->tsoff))) 493 goto nla_put_failure; 494 495 nla_nest_end(skb, nest_parms); 496 497 return 0; 498 499nla_put_failure: 500 return -1; 501} 502 503static int ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) 504{ 505 __be32 id = (__force __be32)nf_ct_get_id(ct); 506 507 if (nla_put_be32(skb, CTA_ID, id)) 508 goto nla_put_failure; 509 return 0; 510 511nla_put_failure: 512 return -1; 513} 514 515static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) 516{ 517 if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use)))) 518 goto nla_put_failure; 519 return 0; 520 521nla_put_failure: 522 return -1; 523} 524 525/* all these functions access ct->ext. Caller must either hold a reference 526 * on ct or prevent its deletion by holding either the bucket spinlock or 527 * pcpu dying list lock. 528 */ 529static int ctnetlink_dump_extinfo(struct sk_buff *skb, 530 struct nf_conn *ct, u32 type) 531{ 532 if (ctnetlink_dump_acct(skb, ct, type) < 0 || 533 ctnetlink_dump_timestamp(skb, ct) < 0 || 534 ctnetlink_dump_helpinfo(skb, ct) < 0 || 535 ctnetlink_dump_labels(skb, ct) < 0 || 536 ctnetlink_dump_ct_seq_adj(skb, ct) < 0 || 537 ctnetlink_dump_ct_synproxy(skb, ct) < 0) 538 return -1; 539 540 return 0; 541} 542 543static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) 544{ 545 if (ctnetlink_dump_status(skb, ct) < 0 || 546 ctnetlink_dump_mark(skb, ct) < 0 || 547 ctnetlink_dump_secctx(skb, ct) < 0 || 548 ctnetlink_dump_id(skb, ct) < 0 || 549 ctnetlink_dump_use(skb, ct) < 0 || 550 ctnetlink_dump_master(skb, ct) < 0) 551 return -1; 552 553 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) && 554 (ctnetlink_dump_timeout(skb, ct, false) < 0 || 555 ctnetlink_dump_protoinfo(skb, ct, false) < 0)) 556 return -1; 557 558 return 0; 559} 560 561static int 562ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, 563 struct nf_conn *ct, bool extinfo, unsigned int flags) 564{ 565 const struct nf_conntrack_zone *zone; 566 struct nlmsghdr *nlh; 567 struct nlattr *nest_parms; 568 unsigned int event; 569 570 if (portid) 571 flags |= NLM_F_MULTI; 572 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_NEW); 573 nlh = nfnl_msg_put(skb, portid, seq, event, flags, nf_ct_l3num(ct), 574 NFNETLINK_V0, 0); 575 if (!nlh) 576 goto nlmsg_failure; 577 578 zone = nf_ct_zone(ct); 579 580 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 581 if (!nest_parms) 582 goto nla_put_failure; 583 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 584 goto nla_put_failure; 585 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 586 NF_CT_ZONE_DIR_ORIG) < 0) 587 goto nla_put_failure; 588 nla_nest_end(skb, nest_parms); 589 590 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 591 if (!nest_parms) 592 goto nla_put_failure; 593 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 594 goto nla_put_failure; 595 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 596 NF_CT_ZONE_DIR_REPL) < 0) 597 goto nla_put_failure; 598 nla_nest_end(skb, nest_parms); 599 600 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 601 NF_CT_DEFAULT_ZONE_DIR) < 0) 602 goto nla_put_failure; 603 604 if (ctnetlink_dump_info(skb, ct) < 0) 605 goto nla_put_failure; 606 if (extinfo && ctnetlink_dump_extinfo(skb, ct, type) < 0) 607 goto nla_put_failure; 608 609 nlmsg_end(skb, nlh); 610 return skb->len; 611 612nlmsg_failure: 613nla_put_failure: 614 nlmsg_cancel(skb, nlh); 615 return -1; 616} 617 618static const struct nla_policy cta_ip_nla_policy[CTA_IP_MAX + 1] = { 619 [CTA_IP_V4_SRC] = { .type = NLA_U32 }, 620 [CTA_IP_V4_DST] = { .type = NLA_U32 }, 621 [CTA_IP_V6_SRC] = { .len = sizeof(__be32) * 4 }, 622 [CTA_IP_V6_DST] = { .len = sizeof(__be32) * 4 }, 623}; 624 625#if defined(CONFIG_NETFILTER_NETLINK_GLUE_CT) || defined(CONFIG_NF_CONNTRACK_EVENTS) 626static size_t ctnetlink_proto_size(const struct nf_conn *ct) 627{ 628 const struct nf_conntrack_l4proto *l4proto; 629 size_t len, len4 = 0; 630 631 len = nla_policy_len(cta_ip_nla_policy, CTA_IP_MAX + 1); 632 len *= 3u; /* ORIG, REPLY, MASTER */ 633 634 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 635 len += l4proto->nlattr_size; 636 if (l4proto->nlattr_tuple_size) { 637 len4 = l4proto->nlattr_tuple_size(); 638 len4 *= 3u; /* ORIG, REPLY, MASTER */ 639 } 640 641 return len + len4; 642} 643#endif 644 645static inline size_t ctnetlink_acct_size(const struct nf_conn *ct) 646{ 647 if (!nf_ct_ext_exist(ct, NF_CT_EXT_ACCT)) 648 return 0; 649 return 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */ 650 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */ 651 + 2 * nla_total_size_64bit(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */ 652 ; 653} 654 655static inline int ctnetlink_secctx_size(const struct nf_conn *ct) 656{ 657#ifdef CONFIG_NF_CONNTRACK_SECMARK 658 int len, ret; 659 660 ret = security_secid_to_secctx(ct->secmark, NULL, &len); 661 if (ret) 662 return 0; 663 664 return nla_total_size(0) /* CTA_SECCTX */ 665 + nla_total_size(sizeof(char) * len); /* CTA_SECCTX_NAME */ 666#else 667 return 0; 668#endif 669} 670 671static inline size_t ctnetlink_timestamp_size(const struct nf_conn *ct) 672{ 673#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 674 if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP)) 675 return 0; 676 return nla_total_size(0) + 2 * nla_total_size_64bit(sizeof(uint64_t)); 677#else 678 return 0; 679#endif 680} 681 682#ifdef CONFIG_NF_CONNTRACK_EVENTS 683static size_t ctnetlink_nlmsg_size(const struct nf_conn *ct) 684{ 685 return NLMSG_ALIGN(sizeof(struct nfgenmsg)) 686 + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ 687 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ 688 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ 689 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ 690 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ 691 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ 692 + ctnetlink_acct_size(ct) 693 + ctnetlink_timestamp_size(ct) 694 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ 695 + nla_total_size(0) /* CTA_PROTOINFO */ 696 + nla_total_size(0) /* CTA_HELP */ 697 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 698 + ctnetlink_secctx_size(ct) 699#if IS_ENABLED(CONFIG_NF_NAT) 700 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 701 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ 702#endif 703#ifdef CONFIG_NF_CONNTRACK_MARK 704 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 705#endif 706#ifdef CONFIG_NF_CONNTRACK_ZONES 707 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ 708#endif 709 + ctnetlink_proto_size(ct) 710 + ctnetlink_label_size(ct) 711 ; 712} 713 714static int 715ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) 716{ 717 const struct nf_conntrack_zone *zone; 718 struct net *net; 719 struct nlmsghdr *nlh; 720 struct nlattr *nest_parms; 721 struct nf_conn *ct = item->ct; 722 struct sk_buff *skb; 723 unsigned int type; 724 unsigned int flags = 0, group; 725 int err; 726 727 if (events & (1 << IPCT_DESTROY)) { 728 type = IPCTNL_MSG_CT_DELETE; 729 group = NFNLGRP_CONNTRACK_DESTROY; 730 } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) { 731 type = IPCTNL_MSG_CT_NEW; 732 flags = NLM_F_CREATE|NLM_F_EXCL; 733 group = NFNLGRP_CONNTRACK_NEW; 734 } else if (events) { 735 type = IPCTNL_MSG_CT_NEW; 736 group = NFNLGRP_CONNTRACK_UPDATE; 737 } else 738 return 0; 739 740 net = nf_ct_net(ct); 741 if (!item->report && !nfnetlink_has_listeners(net, group)) 742 return 0; 743 744 skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC); 745 if (skb == NULL) 746 goto errout; 747 748 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, type); 749 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, nf_ct_l3num(ct), 750 NFNETLINK_V0, 0); 751 if (!nlh) 752 goto nlmsg_failure; 753 754 zone = nf_ct_zone(ct); 755 756 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 757 if (!nest_parms) 758 goto nla_put_failure; 759 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 760 goto nla_put_failure; 761 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 762 NF_CT_ZONE_DIR_ORIG) < 0) 763 goto nla_put_failure; 764 nla_nest_end(skb, nest_parms); 765 766 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 767 if (!nest_parms) 768 goto nla_put_failure; 769 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 770 goto nla_put_failure; 771 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 772 NF_CT_ZONE_DIR_REPL) < 0) 773 goto nla_put_failure; 774 nla_nest_end(skb, nest_parms); 775 776 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 777 NF_CT_DEFAULT_ZONE_DIR) < 0) 778 goto nla_put_failure; 779 780 if (ctnetlink_dump_id(skb, ct) < 0) 781 goto nla_put_failure; 782 783 if (ctnetlink_dump_status(skb, ct) < 0) 784 goto nla_put_failure; 785 786 if (events & (1 << IPCT_DESTROY)) { 787 if (ctnetlink_dump_timeout(skb, ct, true) < 0) 788 goto nla_put_failure; 789 790 if (ctnetlink_dump_acct(skb, ct, type) < 0 || 791 ctnetlink_dump_timestamp(skb, ct) < 0 || 792 ctnetlink_dump_protoinfo(skb, ct, true) < 0) 793 goto nla_put_failure; 794 } else { 795 if (ctnetlink_dump_timeout(skb, ct, false) < 0) 796 goto nla_put_failure; 797 798 if (events & (1 << IPCT_PROTOINFO) && 799 ctnetlink_dump_protoinfo(skb, ct, false) < 0) 800 goto nla_put_failure; 801 802 if ((events & (1 << IPCT_HELPER) || nfct_help(ct)) 803 && ctnetlink_dump_helpinfo(skb, ct) < 0) 804 goto nla_put_failure; 805 806#ifdef CONFIG_NF_CONNTRACK_SECMARK 807 if ((events & (1 << IPCT_SECMARK) || ct->secmark) 808 && ctnetlink_dump_secctx(skb, ct) < 0) 809 goto nla_put_failure; 810#endif 811 if (events & (1 << IPCT_LABEL) && 812 ctnetlink_dump_labels(skb, ct) < 0) 813 goto nla_put_failure; 814 815 if (events & (1 << IPCT_RELATED) && 816 ctnetlink_dump_master(skb, ct) < 0) 817 goto nla_put_failure; 818 819 if (events & (1 << IPCT_SEQADJ) && 820 ctnetlink_dump_ct_seq_adj(skb, ct) < 0) 821 goto nla_put_failure; 822 823 if (events & (1 << IPCT_SYNPROXY) && 824 ctnetlink_dump_ct_synproxy(skb, ct) < 0) 825 goto nla_put_failure; 826 } 827 828#ifdef CONFIG_NF_CONNTRACK_MARK 829 if ((events & (1 << IPCT_MARK) || ct->mark) 830 && ctnetlink_dump_mark(skb, ct) < 0) 831 goto nla_put_failure; 832#endif 833 nlmsg_end(skb, nlh); 834 err = nfnetlink_send(skb, net, item->portid, group, item->report, 835 GFP_ATOMIC); 836 if (err == -ENOBUFS || err == -EAGAIN) 837 return -ENOBUFS; 838 839 return 0; 840 841nla_put_failure: 842 nlmsg_cancel(skb, nlh); 843nlmsg_failure: 844 kfree_skb(skb); 845errout: 846 if (nfnetlink_set_err(net, 0, group, -ENOBUFS) > 0) 847 return -ENOBUFS; 848 849 return 0; 850} 851#endif /* CONFIG_NF_CONNTRACK_EVENTS */ 852 853static int ctnetlink_done(struct netlink_callback *cb) 854{ 855 if (cb->args[1]) 856 nf_ct_put((struct nf_conn *)cb->args[1]); 857 kfree(cb->data); 858 return 0; 859} 860 861struct ctnetlink_filter_u32 { 862 u32 val; 863 u32 mask; 864}; 865 866struct ctnetlink_filter { 867 u8 family; 868 869 u_int32_t orig_flags; 870 u_int32_t reply_flags; 871 872 struct nf_conntrack_tuple orig; 873 struct nf_conntrack_tuple reply; 874 struct nf_conntrack_zone zone; 875 876 struct ctnetlink_filter_u32 mark; 877 struct ctnetlink_filter_u32 status; 878}; 879 880static const struct nla_policy cta_filter_nla_policy[CTA_FILTER_MAX + 1] = { 881 [CTA_FILTER_ORIG_FLAGS] = { .type = NLA_U32 }, 882 [CTA_FILTER_REPLY_FLAGS] = { .type = NLA_U32 }, 883}; 884 885static int ctnetlink_parse_filter(const struct nlattr *attr, 886 struct ctnetlink_filter *filter) 887{ 888 struct nlattr *tb[CTA_FILTER_MAX + 1]; 889 int ret = 0; 890 891 ret = nla_parse_nested(tb, CTA_FILTER_MAX, attr, cta_filter_nla_policy, 892 NULL); 893 if (ret) 894 return ret; 895 896 if (tb[CTA_FILTER_ORIG_FLAGS]) { 897 filter->orig_flags = nla_get_u32(tb[CTA_FILTER_ORIG_FLAGS]); 898 if (filter->orig_flags & ~CTA_FILTER_F_ALL) 899 return -EOPNOTSUPP; 900 } 901 902 if (tb[CTA_FILTER_REPLY_FLAGS]) { 903 filter->reply_flags = nla_get_u32(tb[CTA_FILTER_REPLY_FLAGS]); 904 if (filter->reply_flags & ~CTA_FILTER_F_ALL) 905 return -EOPNOTSUPP; 906 } 907 908 return 0; 909} 910 911static int ctnetlink_parse_zone(const struct nlattr *attr, 912 struct nf_conntrack_zone *zone); 913static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], 914 struct nf_conntrack_tuple *tuple, 915 u32 type, u_int8_t l3num, 916 struct nf_conntrack_zone *zone, 917 u_int32_t flags); 918 919static int ctnetlink_filter_parse_mark(struct ctnetlink_filter_u32 *mark, 920 const struct nlattr * const cda[]) 921{ 922#ifdef CONFIG_NF_CONNTRACK_MARK 923 if (cda[CTA_MARK]) { 924 mark->val = ntohl(nla_get_be32(cda[CTA_MARK])); 925 926 if (cda[CTA_MARK_MASK]) 927 mark->mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK])); 928 else 929 mark->mask = 0xffffffff; 930 } else if (cda[CTA_MARK_MASK]) { 931 return -EINVAL; 932 } 933#endif 934 return 0; 935} 936 937static int ctnetlink_filter_parse_status(struct ctnetlink_filter_u32 *status, 938 const struct nlattr * const cda[]) 939{ 940 if (cda[CTA_STATUS]) { 941 status->val = ntohl(nla_get_be32(cda[CTA_STATUS])); 942 if (cda[CTA_STATUS_MASK]) 943 status->mask = ntohl(nla_get_be32(cda[CTA_STATUS_MASK])); 944 else 945 status->mask = status->val; 946 947 /* status->val == 0? always true, else always false. */ 948 if (status->mask == 0) 949 return -EINVAL; 950 } else if (cda[CTA_STATUS_MASK]) { 951 return -EINVAL; 952 } 953 954 /* CTA_STATUS is NLA_U32, if this fires UAPI needs to be extended */ 955 BUILD_BUG_ON(__IPS_MAX_BIT >= 32); 956 return 0; 957} 958 959static struct ctnetlink_filter * 960ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) 961{ 962 struct ctnetlink_filter *filter; 963 int err; 964 965#ifndef CONFIG_NF_CONNTRACK_MARK 966 if (cda[CTA_MARK] || cda[CTA_MARK_MASK]) 967 return ERR_PTR(-EOPNOTSUPP); 968#endif 969 970 filter = kzalloc(sizeof(*filter), GFP_KERNEL); 971 if (filter == NULL) 972 return ERR_PTR(-ENOMEM); 973 974 filter->family = family; 975 976 err = ctnetlink_filter_parse_mark(&filter->mark, cda); 977 if (err) 978 goto err_filter; 979 980 err = ctnetlink_filter_parse_status(&filter->status, cda); 981 if (err) 982 goto err_filter; 983 984 if (!cda[CTA_FILTER]) 985 return filter; 986 987 err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone); 988 if (err < 0) 989 goto err_filter; 990 991 err = ctnetlink_parse_filter(cda[CTA_FILTER], filter); 992 if (err < 0) 993 goto err_filter; 994 995 if (filter->orig_flags) { 996 if (!cda[CTA_TUPLE_ORIG]) { 997 err = -EINVAL; 998 goto err_filter; 999 } 1000 1001 err = ctnetlink_parse_tuple_filter(cda, &filter->orig, 1002 CTA_TUPLE_ORIG, 1003 filter->family, 1004 &filter->zone, 1005 filter->orig_flags); 1006 if (err < 0) 1007 goto err_filter; 1008 } 1009 1010 if (filter->reply_flags) { 1011 if (!cda[CTA_TUPLE_REPLY]) { 1012 err = -EINVAL; 1013 goto err_filter; 1014 } 1015 1016 err = ctnetlink_parse_tuple_filter(cda, &filter->reply, 1017 CTA_TUPLE_REPLY, 1018 filter->family, 1019 &filter->zone, 1020 filter->reply_flags); 1021 if (err < 0) 1022 goto err_filter; 1023 } 1024 1025 return filter; 1026 1027err_filter: 1028 kfree(filter); 1029 1030 return ERR_PTR(err); 1031} 1032 1033static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda) 1034{ 1035 return family || cda[CTA_MARK] || cda[CTA_FILTER] || cda[CTA_STATUS]; 1036} 1037 1038static int ctnetlink_start(struct netlink_callback *cb) 1039{ 1040 const struct nlattr * const *cda = cb->data; 1041 struct ctnetlink_filter *filter = NULL; 1042 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1043 u8 family = nfmsg->nfgen_family; 1044 1045 if (ctnetlink_needs_filter(family, cda)) { 1046 filter = ctnetlink_alloc_filter(cda, family); 1047 if (IS_ERR(filter)) 1048 return PTR_ERR(filter); 1049 } 1050 1051 cb->data = filter; 1052 return 0; 1053} 1054 1055static int ctnetlink_filter_match_tuple(struct nf_conntrack_tuple *filter_tuple, 1056 struct nf_conntrack_tuple *ct_tuple, 1057 u_int32_t flags, int family) 1058{ 1059 switch (family) { 1060 case NFPROTO_IPV4: 1061 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && 1062 filter_tuple->src.u3.ip != ct_tuple->src.u3.ip) 1063 return 0; 1064 1065 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && 1066 filter_tuple->dst.u3.ip != ct_tuple->dst.u3.ip) 1067 return 0; 1068 break; 1069 case NFPROTO_IPV6: 1070 if ((flags & CTA_FILTER_FLAG(CTA_IP_SRC)) && 1071 !ipv6_addr_cmp(&filter_tuple->src.u3.in6, 1072 &ct_tuple->src.u3.in6)) 1073 return 0; 1074 1075 if ((flags & CTA_FILTER_FLAG(CTA_IP_DST)) && 1076 !ipv6_addr_cmp(&filter_tuple->dst.u3.in6, 1077 &ct_tuple->dst.u3.in6)) 1078 return 0; 1079 break; 1080 } 1081 1082 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) && 1083 filter_tuple->dst.protonum != ct_tuple->dst.protonum) 1084 return 0; 1085 1086 switch (ct_tuple->dst.protonum) { 1087 case IPPROTO_TCP: 1088 case IPPROTO_UDP: 1089 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_SRC_PORT)) && 1090 filter_tuple->src.u.tcp.port != ct_tuple->src.u.tcp.port) 1091 return 0; 1092 1093 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_DST_PORT)) && 1094 filter_tuple->dst.u.tcp.port != ct_tuple->dst.u.tcp.port) 1095 return 0; 1096 break; 1097 case IPPROTO_ICMP: 1098 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_TYPE)) && 1099 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) 1100 return 0; 1101 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_CODE)) && 1102 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) 1103 return 0; 1104 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMP_ID)) && 1105 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) 1106 return 0; 1107 break; 1108 case IPPROTO_ICMPV6: 1109 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_TYPE)) && 1110 filter_tuple->dst.u.icmp.type != ct_tuple->dst.u.icmp.type) 1111 return 0; 1112 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_CODE)) && 1113 filter_tuple->dst.u.icmp.code != ct_tuple->dst.u.icmp.code) 1114 return 0; 1115 if ((flags & CTA_FILTER_FLAG(CTA_PROTO_ICMPV6_ID)) && 1116 filter_tuple->src.u.icmp.id != ct_tuple->src.u.icmp.id) 1117 return 0; 1118 break; 1119 } 1120 1121 return 1; 1122} 1123 1124static int ctnetlink_filter_match(struct nf_conn *ct, void *data) 1125{ 1126 struct ctnetlink_filter *filter = data; 1127 struct nf_conntrack_tuple *tuple; 1128 u32 status; 1129 1130 if (filter == NULL) 1131 goto out; 1132 1133 /* Match entries of a given L3 protocol number. 1134 * If it is not specified, ie. l3proto == 0, 1135 * then match everything. 1136 */ 1137 if (filter->family && nf_ct_l3num(ct) != filter->family) 1138 goto ignore_entry; 1139 1140 if (filter->orig_flags) { 1141 tuple = nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL); 1142 if (!ctnetlink_filter_match_tuple(&filter->orig, tuple, 1143 filter->orig_flags, 1144 filter->family)) 1145 goto ignore_entry; 1146 } 1147 1148 if (filter->reply_flags) { 1149 tuple = nf_ct_tuple(ct, IP_CT_DIR_REPLY); 1150 if (!ctnetlink_filter_match_tuple(&filter->reply, tuple, 1151 filter->reply_flags, 1152 filter->family)) 1153 goto ignore_entry; 1154 } 1155 1156#ifdef CONFIG_NF_CONNTRACK_MARK 1157 if ((ct->mark & filter->mark.mask) != filter->mark.val) 1158 goto ignore_entry; 1159#endif 1160 status = (u32)READ_ONCE(ct->status); 1161 if ((status & filter->status.mask) != filter->status.val) 1162 goto ignore_entry; 1163 1164out: 1165 return 1; 1166 1167ignore_entry: 1168 return 0; 1169} 1170 1171static int 1172ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 1173{ 1174 unsigned int flags = cb->data ? NLM_F_DUMP_FILTERED : 0; 1175 struct net *net = sock_net(skb->sk); 1176 struct nf_conn *ct, *last; 1177 struct nf_conntrack_tuple_hash *h; 1178 struct hlist_nulls_node *n; 1179 struct nf_conn *nf_ct_evict[8]; 1180 int res, i; 1181 spinlock_t *lockp; 1182 1183 last = (struct nf_conn *)cb->args[1]; 1184 i = 0; 1185 1186 local_bh_disable(); 1187 for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) { 1188restart: 1189 while (i) { 1190 i--; 1191 if (nf_ct_should_gc(nf_ct_evict[i])) 1192 nf_ct_kill(nf_ct_evict[i]); 1193 nf_ct_put(nf_ct_evict[i]); 1194 } 1195 1196 lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; 1197 nf_conntrack_lock(lockp); 1198 if (cb->args[0] >= nf_conntrack_htable_size) { 1199 spin_unlock(lockp); 1200 goto out; 1201 } 1202 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[cb->args[0]], 1203 hnnode) { 1204 ct = nf_ct_tuplehash_to_ctrack(h); 1205 if (nf_ct_is_expired(ct)) { 1206 if (i < ARRAY_SIZE(nf_ct_evict) && 1207 refcount_inc_not_zero(&ct->ct_general.use)) 1208 nf_ct_evict[i++] = ct; 1209 continue; 1210 } 1211 1212 if (!net_eq(net, nf_ct_net(ct))) 1213 continue; 1214 1215 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 1216 continue; 1217 1218 if (cb->args[1]) { 1219 if (ct != last) 1220 continue; 1221 cb->args[1] = 0; 1222 } 1223 if (!ctnetlink_filter_match(ct, cb->data)) 1224 continue; 1225 1226 res = 1227 ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, 1228 cb->nlh->nlmsg_seq, 1229 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1230 ct, true, flags); 1231 if (res < 0) { 1232 nf_conntrack_get(&ct->ct_general); 1233 cb->args[1] = (unsigned long)ct; 1234 spin_unlock(lockp); 1235 goto out; 1236 } 1237 } 1238 spin_unlock(lockp); 1239 if (cb->args[1]) { 1240 cb->args[1] = 0; 1241 goto restart; 1242 } 1243 } 1244out: 1245 local_bh_enable(); 1246 if (last) { 1247 /* nf ct hash resize happened, now clear the leftover. */ 1248 if ((struct nf_conn *)cb->args[1] == last) 1249 cb->args[1] = 0; 1250 1251 nf_ct_put(last); 1252 } 1253 1254 while (i) { 1255 i--; 1256 if (nf_ct_should_gc(nf_ct_evict[i])) 1257 nf_ct_kill(nf_ct_evict[i]); 1258 nf_ct_put(nf_ct_evict[i]); 1259 } 1260 1261 return skb->len; 1262} 1263 1264static int ipv4_nlattr_to_tuple(struct nlattr *tb[], 1265 struct nf_conntrack_tuple *t, 1266 u_int32_t flags) 1267{ 1268 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1269 if (!tb[CTA_IP_V4_SRC]) 1270 return -EINVAL; 1271 1272 t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]); 1273 } 1274 1275 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { 1276 if (!tb[CTA_IP_V4_DST]) 1277 return -EINVAL; 1278 1279 t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]); 1280 } 1281 1282 return 0; 1283} 1284 1285static int ipv6_nlattr_to_tuple(struct nlattr *tb[], 1286 struct nf_conntrack_tuple *t, 1287 u_int32_t flags) 1288{ 1289 if (flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1290 if (!tb[CTA_IP_V6_SRC]) 1291 return -EINVAL; 1292 1293 t->src.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_SRC]); 1294 } 1295 1296 if (flags & CTA_FILTER_FLAG(CTA_IP_DST)) { 1297 if (!tb[CTA_IP_V6_DST]) 1298 return -EINVAL; 1299 1300 t->dst.u3.in6 = nla_get_in6_addr(tb[CTA_IP_V6_DST]); 1301 } 1302 1303 return 0; 1304} 1305 1306static int ctnetlink_parse_tuple_ip(struct nlattr *attr, 1307 struct nf_conntrack_tuple *tuple, 1308 u_int32_t flags) 1309{ 1310 struct nlattr *tb[CTA_IP_MAX+1]; 1311 int ret = 0; 1312 1313 ret = nla_parse_nested_deprecated(tb, CTA_IP_MAX, attr, NULL, NULL); 1314 if (ret < 0) 1315 return ret; 1316 1317 ret = nla_validate_nested_deprecated(attr, CTA_IP_MAX, 1318 cta_ip_nla_policy, NULL); 1319 if (ret) 1320 return ret; 1321 1322 switch (tuple->src.l3num) { 1323 case NFPROTO_IPV4: 1324 ret = ipv4_nlattr_to_tuple(tb, tuple, flags); 1325 break; 1326 case NFPROTO_IPV6: 1327 ret = ipv6_nlattr_to_tuple(tb, tuple, flags); 1328 break; 1329 } 1330 1331 return ret; 1332} 1333 1334static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = { 1335 [CTA_PROTO_NUM] = { .type = NLA_U8 }, 1336}; 1337 1338static int ctnetlink_parse_tuple_proto(struct nlattr *attr, 1339 struct nf_conntrack_tuple *tuple, 1340 u_int32_t flags) 1341{ 1342 const struct nf_conntrack_l4proto *l4proto; 1343 struct nlattr *tb[CTA_PROTO_MAX+1]; 1344 int ret = 0; 1345 1346 ret = nla_parse_nested_deprecated(tb, CTA_PROTO_MAX, attr, 1347 proto_nla_policy, NULL); 1348 if (ret < 0) 1349 return ret; 1350 1351 if (!(flags & CTA_FILTER_FLAG(CTA_PROTO_NUM))) 1352 return 0; 1353 1354 if (!tb[CTA_PROTO_NUM]) 1355 return -EINVAL; 1356 1357 tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]); 1358 1359 rcu_read_lock(); 1360 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 1361 1362 if (likely(l4proto->nlattr_to_tuple)) { 1363 ret = nla_validate_nested_deprecated(attr, CTA_PROTO_MAX, 1364 l4proto->nla_policy, 1365 NULL); 1366 if (ret == 0) 1367 ret = l4proto->nlattr_to_tuple(tb, tuple, flags); 1368 } 1369 1370 rcu_read_unlock(); 1371 1372 return ret; 1373} 1374 1375static int 1376ctnetlink_parse_zone(const struct nlattr *attr, 1377 struct nf_conntrack_zone *zone) 1378{ 1379 nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID, 1380 NF_CT_DEFAULT_ZONE_DIR, 0); 1381#ifdef CONFIG_NF_CONNTRACK_ZONES 1382 if (attr) 1383 zone->id = ntohs(nla_get_be16(attr)); 1384#else 1385 if (attr) 1386 return -EOPNOTSUPP; 1387#endif 1388 return 0; 1389} 1390 1391static int 1392ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type, 1393 struct nf_conntrack_zone *zone) 1394{ 1395 int ret; 1396 1397 if (zone->id != NF_CT_DEFAULT_ZONE_ID) 1398 return -EINVAL; 1399 1400 ret = ctnetlink_parse_zone(attr, zone); 1401 if (ret < 0) 1402 return ret; 1403 1404 if (type == CTA_TUPLE_REPLY) 1405 zone->dir = NF_CT_ZONE_DIR_REPL; 1406 else 1407 zone->dir = NF_CT_ZONE_DIR_ORIG; 1408 1409 return 0; 1410} 1411 1412static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = { 1413 [CTA_TUPLE_IP] = { .type = NLA_NESTED }, 1414 [CTA_TUPLE_PROTO] = { .type = NLA_NESTED }, 1415 [CTA_TUPLE_ZONE] = { .type = NLA_U16 }, 1416}; 1417 1418#define CTA_FILTER_F_ALL_CTA_PROTO \ 1419 (CTA_FILTER_F_CTA_PROTO_SRC_PORT | \ 1420 CTA_FILTER_F_CTA_PROTO_DST_PORT | \ 1421 CTA_FILTER_F_CTA_PROTO_ICMP_TYPE | \ 1422 CTA_FILTER_F_CTA_PROTO_ICMP_CODE | \ 1423 CTA_FILTER_F_CTA_PROTO_ICMP_ID | \ 1424 CTA_FILTER_F_CTA_PROTO_ICMPV6_TYPE | \ 1425 CTA_FILTER_F_CTA_PROTO_ICMPV6_CODE | \ 1426 CTA_FILTER_F_CTA_PROTO_ICMPV6_ID) 1427 1428static int 1429ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], 1430 struct nf_conntrack_tuple *tuple, u32 type, 1431 u_int8_t l3num, struct nf_conntrack_zone *zone, 1432 u_int32_t flags) 1433{ 1434 struct nlattr *tb[CTA_TUPLE_MAX+1]; 1435 int err; 1436 1437 memset(tuple, 0, sizeof(*tuple)); 1438 1439 err = nla_parse_nested_deprecated(tb, CTA_TUPLE_MAX, cda[type], 1440 tuple_nla_policy, NULL); 1441 if (err < 0) 1442 return err; 1443 1444 if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) 1445 return -EOPNOTSUPP; 1446 tuple->src.l3num = l3num; 1447 1448 if (flags & CTA_FILTER_FLAG(CTA_IP_DST) || 1449 flags & CTA_FILTER_FLAG(CTA_IP_SRC)) { 1450 if (!tb[CTA_TUPLE_IP]) 1451 return -EINVAL; 1452 1453 err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple, flags); 1454 if (err < 0) 1455 return err; 1456 } 1457 1458 if (flags & CTA_FILTER_FLAG(CTA_PROTO_NUM)) { 1459 if (!tb[CTA_TUPLE_PROTO]) 1460 return -EINVAL; 1461 1462 err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple, flags); 1463 if (err < 0) 1464 return err; 1465 } else if (flags & CTA_FILTER_FLAG(ALL_CTA_PROTO)) { 1466 /* Can't manage proto flags without a protonum */ 1467 return -EINVAL; 1468 } 1469 1470 if ((flags & CTA_FILTER_FLAG(CTA_TUPLE_ZONE)) && tb[CTA_TUPLE_ZONE]) { 1471 if (!zone) 1472 return -EINVAL; 1473 1474 err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE], 1475 type, zone); 1476 if (err < 0) 1477 return err; 1478 } 1479 1480 /* orig and expect tuples get DIR_ORIGINAL */ 1481 if (type == CTA_TUPLE_REPLY) 1482 tuple->dst.dir = IP_CT_DIR_REPLY; 1483 else 1484 tuple->dst.dir = IP_CT_DIR_ORIGINAL; 1485 1486 return 0; 1487} 1488 1489static int 1490ctnetlink_parse_tuple(const struct nlattr * const cda[], 1491 struct nf_conntrack_tuple *tuple, u32 type, 1492 u_int8_t l3num, struct nf_conntrack_zone *zone) 1493{ 1494 return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone, 1495 CTA_FILTER_FLAG(ALL)); 1496} 1497 1498static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = { 1499 [CTA_HELP_NAME] = { .type = NLA_NUL_STRING, 1500 .len = NF_CT_HELPER_NAME_LEN - 1 }, 1501}; 1502 1503static int ctnetlink_parse_help(const struct nlattr *attr, char **helper_name, 1504 struct nlattr **helpinfo) 1505{ 1506 int err; 1507 struct nlattr *tb[CTA_HELP_MAX+1]; 1508 1509 err = nla_parse_nested_deprecated(tb, CTA_HELP_MAX, attr, 1510 help_nla_policy, NULL); 1511 if (err < 0) 1512 return err; 1513 1514 if (!tb[CTA_HELP_NAME]) 1515 return -EINVAL; 1516 1517 *helper_name = nla_data(tb[CTA_HELP_NAME]); 1518 1519 if (tb[CTA_HELP_INFO]) 1520 *helpinfo = tb[CTA_HELP_INFO]; 1521 1522 return 0; 1523} 1524 1525static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { 1526 [CTA_TUPLE_ORIG] = { .type = NLA_NESTED }, 1527 [CTA_TUPLE_REPLY] = { .type = NLA_NESTED }, 1528 [CTA_STATUS] = { .type = NLA_U32 }, 1529 [CTA_PROTOINFO] = { .type = NLA_NESTED }, 1530 [CTA_HELP] = { .type = NLA_NESTED }, 1531 [CTA_NAT_SRC] = { .type = NLA_NESTED }, 1532 [CTA_TIMEOUT] = { .type = NLA_U32 }, 1533 [CTA_MARK] = { .type = NLA_U32 }, 1534 [CTA_ID] = { .type = NLA_U32 }, 1535 [CTA_NAT_DST] = { .type = NLA_NESTED }, 1536 [CTA_TUPLE_MASTER] = { .type = NLA_NESTED }, 1537 [CTA_NAT_SEQ_ADJ_ORIG] = { .type = NLA_NESTED }, 1538 [CTA_NAT_SEQ_ADJ_REPLY] = { .type = NLA_NESTED }, 1539 [CTA_ZONE] = { .type = NLA_U16 }, 1540 [CTA_MARK_MASK] = { .type = NLA_U32 }, 1541 [CTA_LABELS] = { .type = NLA_BINARY, 1542 .len = NF_CT_LABELS_MAX_SIZE }, 1543 [CTA_LABELS_MASK] = { .type = NLA_BINARY, 1544 .len = NF_CT_LABELS_MAX_SIZE }, 1545 [CTA_FILTER] = { .type = NLA_NESTED }, 1546 [CTA_STATUS_MASK] = { .type = NLA_U32 }, 1547}; 1548 1549static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) 1550{ 1551 if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) 1552 return 0; 1553 1554 return ctnetlink_filter_match(ct, data); 1555} 1556 1557static int ctnetlink_flush_conntrack(struct net *net, 1558 const struct nlattr * const cda[], 1559 u32 portid, int report, u8 family) 1560{ 1561 struct ctnetlink_filter *filter = NULL; 1562 struct nf_ct_iter_data iter = { 1563 .net = net, 1564 .portid = portid, 1565 .report = report, 1566 }; 1567 1568 if (ctnetlink_needs_filter(family, cda)) { 1569 if (cda[CTA_FILTER]) 1570 return -EOPNOTSUPP; 1571 1572 filter = ctnetlink_alloc_filter(cda, family); 1573 if (IS_ERR(filter)) 1574 return PTR_ERR(filter); 1575 1576 iter.data = filter; 1577 } 1578 1579 nf_ct_iterate_cleanup_net(ctnetlink_flush_iterate, &iter); 1580 kfree(filter); 1581 1582 return 0; 1583} 1584 1585static int ctnetlink_del_conntrack(struct sk_buff *skb, 1586 const struct nfnl_info *info, 1587 const struct nlattr * const cda[]) 1588{ 1589 u8 family = info->nfmsg->nfgen_family; 1590 struct nf_conntrack_tuple_hash *h; 1591 struct nf_conntrack_tuple tuple; 1592 struct nf_conntrack_zone zone; 1593 struct nf_conn *ct; 1594 int err; 1595 1596 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1597 if (err < 0) 1598 return err; 1599 1600 if (cda[CTA_TUPLE_ORIG]) 1601 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, 1602 family, &zone); 1603 else if (cda[CTA_TUPLE_REPLY]) 1604 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, 1605 family, &zone); 1606 else { 1607 u_int8_t u3 = info->nfmsg->version ? family : AF_UNSPEC; 1608 1609 return ctnetlink_flush_conntrack(info->net, cda, 1610 NETLINK_CB(skb).portid, 1611 nlmsg_report(info->nlh), u3); 1612 } 1613 1614 if (err < 0) 1615 return err; 1616 1617 h = nf_conntrack_find_get(info->net, &zone, &tuple); 1618 if (!h) 1619 return -ENOENT; 1620 1621 ct = nf_ct_tuplehash_to_ctrack(h); 1622 1623 if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) { 1624 nf_ct_put(ct); 1625 return -EBUSY; 1626 } 1627 1628 if (cda[CTA_ID]) { 1629 __be32 id = nla_get_be32(cda[CTA_ID]); 1630 1631 if (id != (__force __be32)nf_ct_get_id(ct)) { 1632 nf_ct_put(ct); 1633 return -ENOENT; 1634 } 1635 } 1636 1637 nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(info->nlh)); 1638 nf_ct_put(ct); 1639 1640 return 0; 1641} 1642 1643static int ctnetlink_get_conntrack(struct sk_buff *skb, 1644 const struct nfnl_info *info, 1645 const struct nlattr * const cda[]) 1646{ 1647 u_int8_t u3 = info->nfmsg->nfgen_family; 1648 struct nf_conntrack_tuple_hash *h; 1649 struct nf_conntrack_tuple tuple; 1650 struct nf_conntrack_zone zone; 1651 struct sk_buff *skb2; 1652 struct nf_conn *ct; 1653 int err; 1654 1655 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1656 struct netlink_dump_control c = { 1657 .start = ctnetlink_start, 1658 .dump = ctnetlink_dump_table, 1659 .done = ctnetlink_done, 1660 .data = (void *)cda, 1661 }; 1662 1663 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1664 } 1665 1666 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 1667 if (err < 0) 1668 return err; 1669 1670 if (cda[CTA_TUPLE_ORIG]) 1671 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, 1672 u3, &zone); 1673 else if (cda[CTA_TUPLE_REPLY]) 1674 err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, 1675 u3, &zone); 1676 else 1677 return -EINVAL; 1678 1679 if (err < 0) 1680 return err; 1681 1682 h = nf_conntrack_find_get(info->net, &zone, &tuple); 1683 if (!h) 1684 return -ENOENT; 1685 1686 ct = nf_ct_tuplehash_to_ctrack(h); 1687 1688 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1689 if (!skb2) { 1690 nf_ct_put(ct); 1691 return -ENOMEM; 1692 } 1693 1694 err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).portid, 1695 info->nlh->nlmsg_seq, 1696 NFNL_MSG_TYPE(info->nlh->nlmsg_type), ct, 1697 true, 0); 1698 nf_ct_put(ct); 1699 if (err <= 0) { 1700 kfree_skb(skb2); 1701 return -ENOMEM; 1702 } 1703 1704 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 1705} 1706 1707static int ctnetlink_done_list(struct netlink_callback *cb) 1708{ 1709 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1710 1711 if (ctx->last) 1712 nf_ct_put(ctx->last); 1713 1714 return 0; 1715} 1716 1717#ifdef CONFIG_NF_CONNTRACK_EVENTS 1718static int ctnetlink_dump_one_entry(struct sk_buff *skb, 1719 struct netlink_callback *cb, 1720 struct nf_conn *ct, 1721 bool dying) 1722{ 1723 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1724 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1725 u8 l3proto = nfmsg->nfgen_family; 1726 int res; 1727 1728 if (l3proto && nf_ct_l3num(ct) != l3proto) 1729 return 0; 1730 1731 if (ctx->last) { 1732 if (ct != ctx->last) 1733 return 0; 1734 1735 ctx->last = NULL; 1736 } 1737 1738 /* We can't dump extension info for the unconfirmed 1739 * list because unconfirmed conntracks can have 1740 * ct->ext reallocated (and thus freed). 1741 * 1742 * In the dying list case ct->ext can't be free'd 1743 * until after we drop pcpu->lock. 1744 */ 1745 res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid, 1746 cb->nlh->nlmsg_seq, 1747 NFNL_MSG_TYPE(cb->nlh->nlmsg_type), 1748 ct, dying, 0); 1749 if (res < 0) { 1750 if (!refcount_inc_not_zero(&ct->ct_general.use)) 1751 return 0; 1752 1753 ctx->last = ct; 1754 } 1755 1756 return res; 1757} 1758#endif 1759 1760static int 1761ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb) 1762{ 1763 return 0; 1764} 1765 1766static int 1767ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) 1768{ 1769 struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; 1770 struct nf_conn *last = ctx->last; 1771#ifdef CONFIG_NF_CONNTRACK_EVENTS 1772 const struct net *net = sock_net(skb->sk); 1773 struct nf_conntrack_net_ecache *ecache_net; 1774 struct nf_conntrack_tuple_hash *h; 1775 struct hlist_nulls_node *n; 1776#endif 1777 1778 if (ctx->done) 1779 return 0; 1780 1781 ctx->last = NULL; 1782 1783#ifdef CONFIG_NF_CONNTRACK_EVENTS 1784 ecache_net = nf_conn_pernet_ecache(net); 1785 spin_lock_bh(&ecache_net->dying_lock); 1786 1787 hlist_nulls_for_each_entry(h, n, &ecache_net->dying_list, hnnode) { 1788 struct nf_conn *ct; 1789 int res; 1790 1791 ct = nf_ct_tuplehash_to_ctrack(h); 1792 if (last && last != ct) 1793 continue; 1794 1795 res = ctnetlink_dump_one_entry(skb, cb, ct, true); 1796 if (res < 0) { 1797 spin_unlock_bh(&ecache_net->dying_lock); 1798 nf_ct_put(last); 1799 return skb->len; 1800 } 1801 1802 nf_ct_put(last); 1803 last = NULL; 1804 } 1805 1806 spin_unlock_bh(&ecache_net->dying_lock); 1807#endif 1808 ctx->done = true; 1809 nf_ct_put(last); 1810 1811 return skb->len; 1812} 1813 1814static int ctnetlink_get_ct_dying(struct sk_buff *skb, 1815 const struct nfnl_info *info, 1816 const struct nlattr * const cda[]) 1817{ 1818 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1819 struct netlink_dump_control c = { 1820 .dump = ctnetlink_dump_dying, 1821 .done = ctnetlink_done_list, 1822 }; 1823 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1824 } 1825 1826 return -EOPNOTSUPP; 1827} 1828 1829static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb, 1830 const struct nfnl_info *info, 1831 const struct nlattr * const cda[]) 1832{ 1833 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 1834 struct netlink_dump_control c = { 1835 .dump = ctnetlink_dump_unconfirmed, 1836 .done = ctnetlink_done_list, 1837 }; 1838 return netlink_dump_start(info->sk, skb, info->nlh, &c); 1839 } 1840 1841 return -EOPNOTSUPP; 1842} 1843 1844#if IS_ENABLED(CONFIG_NF_NAT) 1845static int 1846ctnetlink_parse_nat_setup(struct nf_conn *ct, 1847 enum nf_nat_manip_type manip, 1848 const struct nlattr *attr) 1849 __must_hold(RCU) 1850{ 1851 const struct nf_nat_hook *nat_hook; 1852 int err; 1853 1854 nat_hook = rcu_dereference(nf_nat_hook); 1855 if (!nat_hook) { 1856#ifdef CONFIG_MODULES 1857 rcu_read_unlock(); 1858 nfnl_unlock(NFNL_SUBSYS_CTNETLINK); 1859 if (request_module("nf-nat") < 0) { 1860 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1861 rcu_read_lock(); 1862 return -EOPNOTSUPP; 1863 } 1864 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1865 rcu_read_lock(); 1866 nat_hook = rcu_dereference(nf_nat_hook); 1867 if (nat_hook) 1868 return -EAGAIN; 1869#endif 1870 return -EOPNOTSUPP; 1871 } 1872 1873 err = nat_hook->parse_nat_setup(ct, manip, attr); 1874 if (err == -EAGAIN) { 1875#ifdef CONFIG_MODULES 1876 rcu_read_unlock(); 1877 nfnl_unlock(NFNL_SUBSYS_CTNETLINK); 1878 if (request_module("nf-nat-%u", nf_ct_l3num(ct)) < 0) { 1879 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1880 rcu_read_lock(); 1881 return -EOPNOTSUPP; 1882 } 1883 nfnl_lock(NFNL_SUBSYS_CTNETLINK); 1884 rcu_read_lock(); 1885#else 1886 err = -EOPNOTSUPP; 1887#endif 1888 } 1889 return err; 1890} 1891#endif 1892 1893static void 1894__ctnetlink_change_status(struct nf_conn *ct, unsigned long on, 1895 unsigned long off) 1896{ 1897 unsigned int bit; 1898 1899 /* Ignore these unchangable bits */ 1900 on &= ~IPS_UNCHANGEABLE_MASK; 1901 off &= ~IPS_UNCHANGEABLE_MASK; 1902 1903 for (bit = 0; bit < __IPS_MAX_BIT; bit++) { 1904 if (on & (1 << bit)) 1905 set_bit(bit, &ct->status); 1906 else if (off & (1 << bit)) 1907 clear_bit(bit, &ct->status); 1908 } 1909} 1910 1911static int 1912ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) 1913{ 1914 unsigned long d; 1915 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); 1916 d = ct->status ^ status; 1917 1918 if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) 1919 /* unchangeable */ 1920 return -EBUSY; 1921 1922 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 1923 /* SEEN_REPLY bit can only be set */ 1924 return -EBUSY; 1925 1926 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 1927 /* ASSURED bit can only be set */ 1928 return -EBUSY; 1929 1930 __ctnetlink_change_status(ct, status, 0); 1931 return 0; 1932} 1933 1934static int 1935ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) 1936{ 1937#if IS_ENABLED(CONFIG_NF_NAT) 1938 int ret; 1939 1940 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) 1941 return 0; 1942 1943 ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, 1944 cda[CTA_NAT_DST]); 1945 if (ret < 0) 1946 return ret; 1947 1948 return ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, 1949 cda[CTA_NAT_SRC]); 1950#else 1951 if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) 1952 return 0; 1953 return -EOPNOTSUPP; 1954#endif 1955} 1956 1957static int ctnetlink_change_helper(struct nf_conn *ct, 1958 const struct nlattr * const cda[]) 1959{ 1960 struct nf_conntrack_helper *helper; 1961 struct nf_conn_help *help = nfct_help(ct); 1962 char *helpname = NULL; 1963 struct nlattr *helpinfo = NULL; 1964 int err; 1965 1966 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); 1967 if (err < 0) 1968 return err; 1969 1970 /* don't change helper of sibling connections */ 1971 if (ct->master) { 1972 /* If we try to change the helper to the same thing twice, 1973 * treat the second attempt as a no-op instead of returning 1974 * an error. 1975 */ 1976 err = -EBUSY; 1977 if (help) { 1978 rcu_read_lock(); 1979 helper = rcu_dereference(help->helper); 1980 if (helper && !strcmp(helper->name, helpname)) 1981 err = 0; 1982 rcu_read_unlock(); 1983 } 1984 1985 return err; 1986 } 1987 1988 if (!strcmp(helpname, "")) { 1989 if (help && help->helper) { 1990 /* we had a helper before ... */ 1991 nf_ct_remove_expectations(ct); 1992 RCU_INIT_POINTER(help->helper, NULL); 1993 } 1994 1995 return 0; 1996 } 1997 1998 rcu_read_lock(); 1999 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 2000 nf_ct_protonum(ct)); 2001 if (helper == NULL) { 2002 rcu_read_unlock(); 2003 return -EOPNOTSUPP; 2004 } 2005 2006 if (help) { 2007 if (help->helper == helper) { 2008 /* update private helper data if allowed. */ 2009 if (helper->from_nlattr) 2010 helper->from_nlattr(helpinfo, ct); 2011 err = 0; 2012 } else 2013 err = -EBUSY; 2014 } else { 2015 /* we cannot set a helper for an existing conntrack */ 2016 err = -EOPNOTSUPP; 2017 } 2018 2019 rcu_read_unlock(); 2020 return err; 2021} 2022 2023static int ctnetlink_change_timeout(struct nf_conn *ct, 2024 const struct nlattr * const cda[]) 2025{ 2026 u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 2027 2028 if (timeout > INT_MAX) 2029 timeout = INT_MAX; 2030 WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout); 2031 2032 if (test_bit(IPS_DYING_BIT, &ct->status)) 2033 return -ETIME; 2034 2035 return 0; 2036} 2037 2038#if defined(CONFIG_NF_CONNTRACK_MARK) 2039static void ctnetlink_change_mark(struct nf_conn *ct, 2040 const struct nlattr * const cda[]) 2041{ 2042 u32 mark, newmark, mask = 0; 2043 2044 if (cda[CTA_MARK_MASK]) 2045 mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK])); 2046 2047 mark = ntohl(nla_get_be32(cda[CTA_MARK])); 2048 newmark = (ct->mark & mask) ^ mark; 2049 if (newmark != ct->mark) 2050 ct->mark = newmark; 2051} 2052#endif 2053 2054static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = { 2055 [CTA_PROTOINFO_TCP] = { .type = NLA_NESTED }, 2056 [CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED }, 2057 [CTA_PROTOINFO_SCTP] = { .type = NLA_NESTED }, 2058}; 2059 2060static int ctnetlink_change_protoinfo(struct nf_conn *ct, 2061 const struct nlattr * const cda[]) 2062{ 2063 const struct nlattr *attr = cda[CTA_PROTOINFO]; 2064 const struct nf_conntrack_l4proto *l4proto; 2065 struct nlattr *tb[CTA_PROTOINFO_MAX+1]; 2066 int err = 0; 2067 2068 err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_MAX, attr, 2069 protoinfo_policy, NULL); 2070 if (err < 0) 2071 return err; 2072 2073 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); 2074 if (l4proto->from_nlattr) 2075 err = l4proto->from_nlattr(tb, ct); 2076 2077 return err; 2078} 2079 2080static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = { 2081 [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 }, 2082 [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 }, 2083 [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 }, 2084}; 2085 2086static int change_seq_adj(struct nf_ct_seqadj *seq, 2087 const struct nlattr * const attr) 2088{ 2089 int err; 2090 struct nlattr *cda[CTA_SEQADJ_MAX+1]; 2091 2092 err = nla_parse_nested_deprecated(cda, CTA_SEQADJ_MAX, attr, 2093 seqadj_policy, NULL); 2094 if (err < 0) 2095 return err; 2096 2097 if (!cda[CTA_SEQADJ_CORRECTION_POS]) 2098 return -EINVAL; 2099 2100 seq->correction_pos = 2101 ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS])); 2102 2103 if (!cda[CTA_SEQADJ_OFFSET_BEFORE]) 2104 return -EINVAL; 2105 2106 seq->offset_before = 2107 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE])); 2108 2109 if (!cda[CTA_SEQADJ_OFFSET_AFTER]) 2110 return -EINVAL; 2111 2112 seq->offset_after = 2113 ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER])); 2114 2115 return 0; 2116} 2117 2118static int 2119ctnetlink_change_seq_adj(struct nf_conn *ct, 2120 const struct nlattr * const cda[]) 2121{ 2122 struct nf_conn_seqadj *seqadj = nfct_seqadj(ct); 2123 int ret = 0; 2124 2125 if (!seqadj) 2126 return 0; 2127 2128 spin_lock_bh(&ct->lock); 2129 if (cda[CTA_SEQ_ADJ_ORIG]) { 2130 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL], 2131 cda[CTA_SEQ_ADJ_ORIG]); 2132 if (ret < 0) 2133 goto err; 2134 2135 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 2136 } 2137 2138 if (cda[CTA_SEQ_ADJ_REPLY]) { 2139 ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY], 2140 cda[CTA_SEQ_ADJ_REPLY]); 2141 if (ret < 0) 2142 goto err; 2143 2144 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status); 2145 } 2146 2147 spin_unlock_bh(&ct->lock); 2148 return 0; 2149err: 2150 spin_unlock_bh(&ct->lock); 2151 return ret; 2152} 2153 2154static const struct nla_policy synproxy_policy[CTA_SYNPROXY_MAX + 1] = { 2155 [CTA_SYNPROXY_ISN] = { .type = NLA_U32 }, 2156 [CTA_SYNPROXY_ITS] = { .type = NLA_U32 }, 2157 [CTA_SYNPROXY_TSOFF] = { .type = NLA_U32 }, 2158}; 2159 2160static int ctnetlink_change_synproxy(struct nf_conn *ct, 2161 const struct nlattr * const cda[]) 2162{ 2163 struct nf_conn_synproxy *synproxy = nfct_synproxy(ct); 2164 struct nlattr *tb[CTA_SYNPROXY_MAX + 1]; 2165 int err; 2166 2167 if (!synproxy) 2168 return 0; 2169 2170 err = nla_parse_nested_deprecated(tb, CTA_SYNPROXY_MAX, 2171 cda[CTA_SYNPROXY], synproxy_policy, 2172 NULL); 2173 if (err < 0) 2174 return err; 2175 2176 if (!tb[CTA_SYNPROXY_ISN] || 2177 !tb[CTA_SYNPROXY_ITS] || 2178 !tb[CTA_SYNPROXY_TSOFF]) 2179 return -EINVAL; 2180 2181 synproxy->isn = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ISN])); 2182 synproxy->its = ntohl(nla_get_be32(tb[CTA_SYNPROXY_ITS])); 2183 synproxy->tsoff = ntohl(nla_get_be32(tb[CTA_SYNPROXY_TSOFF])); 2184 2185 return 0; 2186} 2187 2188static int 2189ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[]) 2190{ 2191#ifdef CONFIG_NF_CONNTRACK_LABELS 2192 size_t len = nla_len(cda[CTA_LABELS]); 2193 const void *mask = cda[CTA_LABELS_MASK]; 2194 2195 if (len & (sizeof(u32)-1)) /* must be multiple of u32 */ 2196 return -EINVAL; 2197 2198 if (mask) { 2199 if (nla_len(cda[CTA_LABELS_MASK]) == 0 || 2200 nla_len(cda[CTA_LABELS_MASK]) != len) 2201 return -EINVAL; 2202 mask = nla_data(cda[CTA_LABELS_MASK]); 2203 } 2204 2205 len /= sizeof(u32); 2206 2207 return nf_connlabels_replace(ct, nla_data(cda[CTA_LABELS]), mask, len); 2208#else 2209 return -EOPNOTSUPP; 2210#endif 2211} 2212 2213static int 2214ctnetlink_change_conntrack(struct nf_conn *ct, 2215 const struct nlattr * const cda[]) 2216{ 2217 int err; 2218 2219 /* only allow NAT changes and master assignation for new conntracks */ 2220 if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER]) 2221 return -EOPNOTSUPP; 2222 2223 if (cda[CTA_HELP]) { 2224 err = ctnetlink_change_helper(ct, cda); 2225 if (err < 0) 2226 return err; 2227 } 2228 2229 if (cda[CTA_TIMEOUT]) { 2230 err = ctnetlink_change_timeout(ct, cda); 2231 if (err < 0) 2232 return err; 2233 } 2234 2235 if (cda[CTA_STATUS]) { 2236 err = ctnetlink_change_status(ct, cda); 2237 if (err < 0) 2238 return err; 2239 } 2240 2241 if (cda[CTA_PROTOINFO]) { 2242 err = ctnetlink_change_protoinfo(ct, cda); 2243 if (err < 0) 2244 return err; 2245 } 2246 2247#if defined(CONFIG_NF_CONNTRACK_MARK) 2248 if (cda[CTA_MARK]) 2249 ctnetlink_change_mark(ct, cda); 2250#endif 2251 2252 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { 2253 err = ctnetlink_change_seq_adj(ct, cda); 2254 if (err < 0) 2255 return err; 2256 } 2257 2258 if (cda[CTA_SYNPROXY]) { 2259 err = ctnetlink_change_synproxy(ct, cda); 2260 if (err < 0) 2261 return err; 2262 } 2263 2264 if (cda[CTA_LABELS]) { 2265 err = ctnetlink_attach_labels(ct, cda); 2266 if (err < 0) 2267 return err; 2268 } 2269 2270 return 0; 2271} 2272 2273static struct nf_conn * 2274ctnetlink_create_conntrack(struct net *net, 2275 const struct nf_conntrack_zone *zone, 2276 const struct nlattr * const cda[], 2277 struct nf_conntrack_tuple *otuple, 2278 struct nf_conntrack_tuple *rtuple, 2279 u8 u3) 2280{ 2281 struct nf_conn *ct; 2282 int err = -EINVAL; 2283 struct nf_conntrack_helper *helper; 2284 struct nf_conn_tstamp *tstamp; 2285 u64 timeout; 2286 2287 ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); 2288 if (IS_ERR(ct)) 2289 return ERR_PTR(-ENOMEM); 2290 2291 if (!cda[CTA_TIMEOUT]) 2292 goto err1; 2293 2294 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; 2295 if (timeout > INT_MAX) 2296 timeout = INT_MAX; 2297 ct->timeout = (u32)timeout + nfct_time_stamp; 2298 2299 rcu_read_lock(); 2300 if (cda[CTA_HELP]) { 2301 char *helpname = NULL; 2302 struct nlattr *helpinfo = NULL; 2303 2304 err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo); 2305 if (err < 0) 2306 goto err2; 2307 2308 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 2309 nf_ct_protonum(ct)); 2310 if (helper == NULL) { 2311 rcu_read_unlock(); 2312#ifdef CONFIG_MODULES 2313 if (request_module("nfct-helper-%s", helpname) < 0) { 2314 err = -EOPNOTSUPP; 2315 goto err1; 2316 } 2317 2318 rcu_read_lock(); 2319 helper = __nf_conntrack_helper_find(helpname, 2320 nf_ct_l3num(ct), 2321 nf_ct_protonum(ct)); 2322 if (helper) { 2323 err = -EAGAIN; 2324 goto err2; 2325 } 2326 rcu_read_unlock(); 2327#endif 2328 err = -EOPNOTSUPP; 2329 goto err1; 2330 } else { 2331 struct nf_conn_help *help; 2332 2333 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); 2334 if (help == NULL) { 2335 err = -ENOMEM; 2336 goto err2; 2337 } 2338 /* set private helper data if allowed. */ 2339 if (helper->from_nlattr) 2340 helper->from_nlattr(helpinfo, ct); 2341 2342 /* disable helper auto-assignment for this entry */ 2343 ct->status |= IPS_HELPER; 2344 RCU_INIT_POINTER(help->helper, helper); 2345 } 2346 } else { 2347 /* try an implicit helper assignation */ 2348 err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC); 2349 if (err < 0) 2350 goto err2; 2351 } 2352 2353 err = ctnetlink_setup_nat(ct, cda); 2354 if (err < 0) 2355 goto err2; 2356 2357 nf_ct_acct_ext_add(ct, GFP_ATOMIC); 2358 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 2359 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 2360 nf_ct_labels_ext_add(ct); 2361 nfct_seqadj_ext_add(ct); 2362 nfct_synproxy_ext_add(ct); 2363 2364 /* we must add conntrack extensions before confirmation. */ 2365 ct->status |= IPS_CONFIRMED; 2366 2367 if (cda[CTA_STATUS]) { 2368 err = ctnetlink_change_status(ct, cda); 2369 if (err < 0) 2370 goto err2; 2371 } 2372 2373 if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) { 2374 err = ctnetlink_change_seq_adj(ct, cda); 2375 if (err < 0) 2376 goto err2; 2377 } 2378 2379 memset(&ct->proto, 0, sizeof(ct->proto)); 2380 if (cda[CTA_PROTOINFO]) { 2381 err = ctnetlink_change_protoinfo(ct, cda); 2382 if (err < 0) 2383 goto err2; 2384 } 2385 2386 if (cda[CTA_SYNPROXY]) { 2387 err = ctnetlink_change_synproxy(ct, cda); 2388 if (err < 0) 2389 goto err2; 2390 } 2391 2392#if defined(CONFIG_NF_CONNTRACK_MARK) 2393 if (cda[CTA_MARK]) 2394 ctnetlink_change_mark(ct, cda); 2395#endif 2396 2397 /* setup master conntrack: this is a confirmed expectation */ 2398 if (cda[CTA_TUPLE_MASTER]) { 2399 struct nf_conntrack_tuple master; 2400 struct nf_conntrack_tuple_hash *master_h; 2401 struct nf_conn *master_ct; 2402 2403 err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, 2404 u3, NULL); 2405 if (err < 0) 2406 goto err2; 2407 2408 master_h = nf_conntrack_find_get(net, zone, &master); 2409 if (master_h == NULL) { 2410 err = -ENOENT; 2411 goto err2; 2412 } 2413 master_ct = nf_ct_tuplehash_to_ctrack(master_h); 2414 __set_bit(IPS_EXPECTED_BIT, &ct->status); 2415 ct->master = master_ct; 2416 } 2417 tstamp = nf_conn_tstamp_find(ct); 2418 if (tstamp) 2419 tstamp->start = ktime_get_real_ns(); 2420 2421 err = nf_conntrack_hash_check_insert(ct); 2422 if (err < 0) 2423 goto err2; 2424 2425 rcu_read_unlock(); 2426 2427 return ct; 2428 2429err2: 2430 rcu_read_unlock(); 2431err1: 2432 nf_conntrack_free(ct); 2433 return ERR_PTR(err); 2434} 2435 2436static int ctnetlink_new_conntrack(struct sk_buff *skb, 2437 const struct nfnl_info *info, 2438 const struct nlattr * const cda[]) 2439{ 2440 struct nf_conntrack_tuple otuple, rtuple; 2441 struct nf_conntrack_tuple_hash *h = NULL; 2442 u_int8_t u3 = info->nfmsg->nfgen_family; 2443 struct nf_conntrack_zone zone; 2444 struct nf_conn *ct; 2445 int err; 2446 2447 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 2448 if (err < 0) 2449 return err; 2450 2451 if (cda[CTA_TUPLE_ORIG]) { 2452 err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, 2453 u3, &zone); 2454 if (err < 0) 2455 return err; 2456 } 2457 2458 if (cda[CTA_TUPLE_REPLY]) { 2459 err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, 2460 u3, &zone); 2461 if (err < 0) 2462 return err; 2463 } 2464 2465 if (cda[CTA_TUPLE_ORIG]) 2466 h = nf_conntrack_find_get(info->net, &zone, &otuple); 2467 else if (cda[CTA_TUPLE_REPLY]) 2468 h = nf_conntrack_find_get(info->net, &zone, &rtuple); 2469 2470 if (h == NULL) { 2471 err = -ENOENT; 2472 if (info->nlh->nlmsg_flags & NLM_F_CREATE) { 2473 enum ip_conntrack_events events; 2474 2475 if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) 2476 return -EINVAL; 2477 if (otuple.dst.protonum != rtuple.dst.protonum) 2478 return -EINVAL; 2479 2480 ct = ctnetlink_create_conntrack(info->net, &zone, cda, 2481 &otuple, &rtuple, u3); 2482 if (IS_ERR(ct)) 2483 return PTR_ERR(ct); 2484 2485 err = 0; 2486 if (test_bit(IPS_EXPECTED_BIT, &ct->status)) 2487 events = 1 << IPCT_RELATED; 2488 else 2489 events = 1 << IPCT_NEW; 2490 2491 if (cda[CTA_LABELS] && 2492 ctnetlink_attach_labels(ct, cda) == 0) 2493 events |= (1 << IPCT_LABEL); 2494 2495 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 2496 (1 << IPCT_ASSURED) | 2497 (1 << IPCT_HELPER) | 2498 (1 << IPCT_PROTOINFO) | 2499 (1 << IPCT_SEQADJ) | 2500 (1 << IPCT_MARK) | 2501 (1 << IPCT_SYNPROXY) | 2502 events, 2503 ct, NETLINK_CB(skb).portid, 2504 nlmsg_report(info->nlh)); 2505 nf_ct_put(ct); 2506 } 2507 2508 return err; 2509 } 2510 /* implicit 'else' */ 2511 2512 err = -EEXIST; 2513 ct = nf_ct_tuplehash_to_ctrack(h); 2514 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) { 2515 err = ctnetlink_change_conntrack(ct, cda); 2516 if (err == 0) { 2517 nf_conntrack_eventmask_report((1 << IPCT_REPLY) | 2518 (1 << IPCT_ASSURED) | 2519 (1 << IPCT_HELPER) | 2520 (1 << IPCT_LABEL) | 2521 (1 << IPCT_PROTOINFO) | 2522 (1 << IPCT_SEQADJ) | 2523 (1 << IPCT_MARK) | 2524 (1 << IPCT_SYNPROXY), 2525 ct, NETLINK_CB(skb).portid, 2526 nlmsg_report(info->nlh)); 2527 } 2528 } 2529 2530 nf_ct_put(ct); 2531 return err; 2532} 2533 2534static int 2535ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, 2536 __u16 cpu, const struct ip_conntrack_stat *st) 2537{ 2538 struct nlmsghdr *nlh; 2539 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 2540 2541 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, 2542 IPCTNL_MSG_CT_GET_STATS_CPU); 2543 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 2544 NFNETLINK_V0, htons(cpu)); 2545 if (!nlh) 2546 goto nlmsg_failure; 2547 2548 if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || 2549 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || 2550 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || 2551 nla_put_be32(skb, CTA_STATS_INSERT_FAILED, 2552 htonl(st->insert_failed)) || 2553 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) || 2554 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) || 2555 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) || 2556 nla_put_be32(skb, CTA_STATS_SEARCH_RESTART, 2557 htonl(st->search_restart)) || 2558 nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE, 2559 htonl(st->clash_resolve)) || 2560 nla_put_be32(skb, CTA_STATS_CHAIN_TOOLONG, 2561 htonl(st->chaintoolong))) 2562 goto nla_put_failure; 2563 2564 nlmsg_end(skb, nlh); 2565 return skb->len; 2566 2567nla_put_failure: 2568nlmsg_failure: 2569 nlmsg_cancel(skb, nlh); 2570 return -1; 2571} 2572 2573static int 2574ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) 2575{ 2576 int cpu; 2577 struct net *net = sock_net(skb->sk); 2578 2579 if (cb->args[0] == nr_cpu_ids) 2580 return 0; 2581 2582 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 2583 const struct ip_conntrack_stat *st; 2584 2585 if (!cpu_possible(cpu)) 2586 continue; 2587 2588 st = per_cpu_ptr(net->ct.stat, cpu); 2589 if (ctnetlink_ct_stat_cpu_fill_info(skb, 2590 NETLINK_CB(cb->skb).portid, 2591 cb->nlh->nlmsg_seq, 2592 cpu, st) < 0) 2593 break; 2594 } 2595 cb->args[0] = cpu; 2596 2597 return skb->len; 2598} 2599 2600static int ctnetlink_stat_ct_cpu(struct sk_buff *skb, 2601 const struct nfnl_info *info, 2602 const struct nlattr * const cda[]) 2603{ 2604 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 2605 struct netlink_dump_control c = { 2606 .dump = ctnetlink_ct_stat_cpu_dump, 2607 }; 2608 return netlink_dump_start(info->sk, skb, info->nlh, &c); 2609 } 2610 2611 return 0; 2612} 2613 2614static int 2615ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, 2616 struct net *net) 2617{ 2618 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 2619 unsigned int nr_conntracks; 2620 struct nlmsghdr *nlh; 2621 2622 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET_STATS); 2623 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 2624 NFNETLINK_V0, 0); 2625 if (!nlh) 2626 goto nlmsg_failure; 2627 2628 nr_conntracks = nf_conntrack_count(net); 2629 if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks))) 2630 goto nla_put_failure; 2631 2632 if (nla_put_be32(skb, CTA_STATS_GLOBAL_MAX_ENTRIES, htonl(nf_conntrack_max))) 2633 goto nla_put_failure; 2634 2635 nlmsg_end(skb, nlh); 2636 return skb->len; 2637 2638nla_put_failure: 2639nlmsg_failure: 2640 nlmsg_cancel(skb, nlh); 2641 return -1; 2642} 2643 2644static int ctnetlink_stat_ct(struct sk_buff *skb, const struct nfnl_info *info, 2645 const struct nlattr * const cda[]) 2646{ 2647 struct sk_buff *skb2; 2648 int err; 2649 2650 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 2651 if (skb2 == NULL) 2652 return -ENOMEM; 2653 2654 err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).portid, 2655 info->nlh->nlmsg_seq, 2656 NFNL_MSG_TYPE(info->nlh->nlmsg_type), 2657 sock_net(skb->sk)); 2658 if (err <= 0) { 2659 kfree_skb(skb2); 2660 return -ENOMEM; 2661 } 2662 2663 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 2664} 2665 2666static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { 2667 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, 2668 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, 2669 [CTA_EXPECT_MASK] = { .type = NLA_NESTED }, 2670 [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 }, 2671 [CTA_EXPECT_ID] = { .type = NLA_U32 }, 2672 [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING, 2673 .len = NF_CT_HELPER_NAME_LEN - 1 }, 2674 [CTA_EXPECT_ZONE] = { .type = NLA_U16 }, 2675 [CTA_EXPECT_FLAGS] = { .type = NLA_U32 }, 2676 [CTA_EXPECT_CLASS] = { .type = NLA_U32 }, 2677 [CTA_EXPECT_NAT] = { .type = NLA_NESTED }, 2678 [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING }, 2679}; 2680 2681static struct nf_conntrack_expect * 2682ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct, 2683 struct nf_conntrack_helper *helper, 2684 struct nf_conntrack_tuple *tuple, 2685 struct nf_conntrack_tuple *mask); 2686 2687#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 2688static size_t 2689ctnetlink_glue_build_size(const struct nf_conn *ct) 2690{ 2691 return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */ 2692 + 3 * nla_total_size(0) /* CTA_TUPLE_IP */ 2693 + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */ 2694 + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */ 2695 + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */ 2696 + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */ 2697 + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */ 2698 + nla_total_size(0) /* CTA_PROTOINFO */ 2699 + nla_total_size(0) /* CTA_HELP */ 2700 + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */ 2701 + ctnetlink_secctx_size(ct) 2702 + ctnetlink_acct_size(ct) 2703 + ctnetlink_timestamp_size(ct) 2704#if IS_ENABLED(CONFIG_NF_NAT) 2705 + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */ 2706 + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */ 2707#endif 2708#ifdef CONFIG_NF_CONNTRACK_MARK 2709 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 2710#endif 2711#ifdef CONFIG_NF_CONNTRACK_ZONES 2712 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */ 2713#endif 2714 + ctnetlink_proto_size(ct) 2715 ; 2716} 2717 2718static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) 2719{ 2720 const struct nf_conntrack_zone *zone; 2721 struct nlattr *nest_parms; 2722 2723 zone = nf_ct_zone(ct); 2724 2725 nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG); 2726 if (!nest_parms) 2727 goto nla_put_failure; 2728 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0) 2729 goto nla_put_failure; 2730 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 2731 NF_CT_ZONE_DIR_ORIG) < 0) 2732 goto nla_put_failure; 2733 nla_nest_end(skb, nest_parms); 2734 2735 nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY); 2736 if (!nest_parms) 2737 goto nla_put_failure; 2738 if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0) 2739 goto nla_put_failure; 2740 if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone, 2741 NF_CT_ZONE_DIR_REPL) < 0) 2742 goto nla_put_failure; 2743 nla_nest_end(skb, nest_parms); 2744 2745 if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone, 2746 NF_CT_DEFAULT_ZONE_DIR) < 0) 2747 goto nla_put_failure; 2748 2749 if (ctnetlink_dump_id(skb, ct) < 0) 2750 goto nla_put_failure; 2751 2752 if (ctnetlink_dump_status(skb, ct) < 0) 2753 goto nla_put_failure; 2754 2755 if (ctnetlink_dump_timeout(skb, ct, false) < 0) 2756 goto nla_put_failure; 2757 2758 if (ctnetlink_dump_protoinfo(skb, ct, false) < 0) 2759 goto nla_put_failure; 2760 2761 if (ctnetlink_dump_acct(skb, ct, IPCTNL_MSG_CT_GET) < 0 || 2762 ctnetlink_dump_timestamp(skb, ct) < 0) 2763 goto nla_put_failure; 2764 2765 if (ctnetlink_dump_helpinfo(skb, ct) < 0) 2766 goto nla_put_failure; 2767 2768#ifdef CONFIG_NF_CONNTRACK_SECMARK 2769 if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0) 2770 goto nla_put_failure; 2771#endif 2772 if (ct->master && ctnetlink_dump_master(skb, ct) < 0) 2773 goto nla_put_failure; 2774 2775 if ((ct->status & IPS_SEQ_ADJUST) && 2776 ctnetlink_dump_ct_seq_adj(skb, ct) < 0) 2777 goto nla_put_failure; 2778 2779 if (ctnetlink_dump_ct_synproxy(skb, ct) < 0) 2780 goto nla_put_failure; 2781 2782#ifdef CONFIG_NF_CONNTRACK_MARK 2783 if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0) 2784 goto nla_put_failure; 2785#endif 2786 if (ctnetlink_dump_labels(skb, ct) < 0) 2787 goto nla_put_failure; 2788 return 0; 2789 2790nla_put_failure: 2791 return -ENOSPC; 2792} 2793 2794static int 2795ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct, 2796 enum ip_conntrack_info ctinfo, 2797 u_int16_t ct_attr, u_int16_t ct_info_attr) 2798{ 2799 struct nlattr *nest_parms; 2800 2801 nest_parms = nla_nest_start(skb, ct_attr); 2802 if (!nest_parms) 2803 goto nla_put_failure; 2804 2805 if (__ctnetlink_glue_build(skb, ct) < 0) 2806 goto nla_put_failure; 2807 2808 nla_nest_end(skb, nest_parms); 2809 2810 if (nla_put_be32(skb, ct_info_attr, htonl(ctinfo))) 2811 goto nla_put_failure; 2812 2813 return 0; 2814 2815nla_put_failure: 2816 return -ENOSPC; 2817} 2818 2819static int 2820ctnetlink_update_status(struct nf_conn *ct, const struct nlattr * const cda[]) 2821{ 2822 unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS])); 2823 unsigned long d = ct->status ^ status; 2824 2825 if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY)) 2826 /* SEEN_REPLY bit can only be set */ 2827 return -EBUSY; 2828 2829 if (d & IPS_ASSURED && !(status & IPS_ASSURED)) 2830 /* ASSURED bit can only be set */ 2831 return -EBUSY; 2832 2833 /* This check is less strict than ctnetlink_change_status() 2834 * because callers often flip IPS_EXPECTED bits when sending 2835 * an NFQA_CT attribute to the kernel. So ignore the 2836 * unchangeable bits but do not error out. Also user programs 2837 * are allowed to clear the bits that they are allowed to change. 2838 */ 2839 __ctnetlink_change_status(ct, status, ~status); 2840 return 0; 2841} 2842 2843static int 2844ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct) 2845{ 2846 int err; 2847 2848 if (cda[CTA_TIMEOUT]) { 2849 err = ctnetlink_change_timeout(ct, cda); 2850 if (err < 0) 2851 return err; 2852 } 2853 if (cda[CTA_STATUS]) { 2854 err = ctnetlink_update_status(ct, cda); 2855 if (err < 0) 2856 return err; 2857 } 2858 if (cda[CTA_HELP]) { 2859 err = ctnetlink_change_helper(ct, cda); 2860 if (err < 0) 2861 return err; 2862 } 2863 if (cda[CTA_LABELS]) { 2864 err = ctnetlink_attach_labels(ct, cda); 2865 if (err < 0) 2866 return err; 2867 } 2868#if defined(CONFIG_NF_CONNTRACK_MARK) 2869 if (cda[CTA_MARK]) { 2870 ctnetlink_change_mark(ct, cda); 2871 } 2872#endif 2873 return 0; 2874} 2875 2876static int 2877ctnetlink_glue_parse(const struct nlattr *attr, struct nf_conn *ct) 2878{ 2879 struct nlattr *cda[CTA_MAX+1]; 2880 int ret; 2881 2882 ret = nla_parse_nested_deprecated(cda, CTA_MAX, attr, ct_nla_policy, 2883 NULL); 2884 if (ret < 0) 2885 return ret; 2886 2887 return ctnetlink_glue_parse_ct((const struct nlattr **)cda, ct); 2888} 2889 2890static int ctnetlink_glue_exp_parse(const struct nlattr * const *cda, 2891 const struct nf_conn *ct, 2892 struct nf_conntrack_tuple *tuple, 2893 struct nf_conntrack_tuple *mask) 2894{ 2895 int err; 2896 2897 err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE, 2898 nf_ct_l3num(ct), NULL); 2899 if (err < 0) 2900 return err; 2901 2902 return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK, 2903 nf_ct_l3num(ct), NULL); 2904} 2905 2906static int 2907ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, 2908 u32 portid, u32 report) 2909{ 2910 struct nlattr *cda[CTA_EXPECT_MAX+1]; 2911 struct nf_conntrack_tuple tuple, mask; 2912 struct nf_conntrack_helper *helper = NULL; 2913 struct nf_conntrack_expect *exp; 2914 int err; 2915 2916 err = nla_parse_nested_deprecated(cda, CTA_EXPECT_MAX, attr, 2917 exp_nla_policy, NULL); 2918 if (err < 0) 2919 return err; 2920 2921 err = ctnetlink_glue_exp_parse((const struct nlattr * const *)cda, 2922 ct, &tuple, &mask); 2923 if (err < 0) 2924 return err; 2925 2926 if (cda[CTA_EXPECT_HELP_NAME]) { 2927 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 2928 2929 helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct), 2930 nf_ct_protonum(ct)); 2931 if (helper == NULL) 2932 return -EOPNOTSUPP; 2933 } 2934 2935 exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct, 2936 helper, &tuple, &mask); 2937 if (IS_ERR(exp)) 2938 return PTR_ERR(exp); 2939 2940 err = nf_ct_expect_related_report(exp, portid, report, 0); 2941 nf_ct_expect_put(exp); 2942 return err; 2943} 2944 2945static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, 2946 enum ip_conntrack_info ctinfo, int diff) 2947{ 2948 if (!(ct->status & IPS_NAT_MASK)) 2949 return; 2950 2951 nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff); 2952} 2953 2954static const struct nfnl_ct_hook ctnetlink_glue_hook = { 2955 .build_size = ctnetlink_glue_build_size, 2956 .build = ctnetlink_glue_build, 2957 .parse = ctnetlink_glue_parse, 2958 .attach_expect = ctnetlink_glue_attach_expect, 2959 .seq_adjust = ctnetlink_glue_seqadj, 2960}; 2961#endif /* CONFIG_NETFILTER_NETLINK_GLUE_CT */ 2962 2963/*********************************************************************** 2964 * EXPECT 2965 ***********************************************************************/ 2966 2967static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, 2968 const struct nf_conntrack_tuple *tuple, 2969 u32 type) 2970{ 2971 struct nlattr *nest_parms; 2972 2973 nest_parms = nla_nest_start(skb, type); 2974 if (!nest_parms) 2975 goto nla_put_failure; 2976 if (ctnetlink_dump_tuples(skb, tuple) < 0) 2977 goto nla_put_failure; 2978 nla_nest_end(skb, nest_parms); 2979 2980 return 0; 2981 2982nla_put_failure: 2983 return -1; 2984} 2985 2986static int ctnetlink_exp_dump_mask(struct sk_buff *skb, 2987 const struct nf_conntrack_tuple *tuple, 2988 const struct nf_conntrack_tuple_mask *mask) 2989{ 2990 const struct nf_conntrack_l4proto *l4proto; 2991 struct nf_conntrack_tuple m; 2992 struct nlattr *nest_parms; 2993 int ret; 2994 2995 memset(&m, 0xFF, sizeof(m)); 2996 memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3)); 2997 m.src.u.all = mask->src.u.all; 2998 m.src.l3num = tuple->src.l3num; 2999 m.dst.protonum = tuple->dst.protonum; 3000 3001 nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK); 3002 if (!nest_parms) 3003 goto nla_put_failure; 3004 3005 rcu_read_lock(); 3006 ret = ctnetlink_dump_tuples_ip(skb, &m); 3007 if (ret >= 0) { 3008 l4proto = nf_ct_l4proto_find(tuple->dst.protonum); 3009 ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto); 3010 } 3011 rcu_read_unlock(); 3012 3013 if (unlikely(ret < 0)) 3014 goto nla_put_failure; 3015 3016 nla_nest_end(skb, nest_parms); 3017 3018 return 0; 3019 3020nla_put_failure: 3021 return -1; 3022} 3023 3024static const union nf_inet_addr any_addr; 3025 3026static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp) 3027{ 3028 static siphash_aligned_key_t exp_id_seed; 3029 unsigned long a, b, c, d; 3030 3031 net_get_random_once(&exp_id_seed, sizeof(exp_id_seed)); 3032 3033 a = (unsigned long)exp; 3034 b = (unsigned long)exp->helper; 3035 c = (unsigned long)exp->master; 3036 d = (unsigned long)siphash(&exp->tuple, sizeof(exp->tuple), &exp_id_seed); 3037 3038#ifdef CONFIG_64BIT 3039 return (__force __be32)siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &exp_id_seed); 3040#else 3041 return (__force __be32)siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &exp_id_seed); 3042#endif 3043} 3044 3045static int 3046ctnetlink_exp_dump_expect(struct sk_buff *skb, 3047 const struct nf_conntrack_expect *exp) 3048{ 3049 struct nf_conn *master = exp->master; 3050 long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ; 3051 struct nf_conn_help *help; 3052#if IS_ENABLED(CONFIG_NF_NAT) 3053 struct nlattr *nest_parms; 3054 struct nf_conntrack_tuple nat_tuple = {}; 3055#endif 3056 struct nf_ct_helper_expectfn *expfn; 3057 3058 if (timeout < 0) 3059 timeout = 0; 3060 3061 if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0) 3062 goto nla_put_failure; 3063 if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0) 3064 goto nla_put_failure; 3065 if (ctnetlink_exp_dump_tuple(skb, 3066 &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple, 3067 CTA_EXPECT_MASTER) < 0) 3068 goto nla_put_failure; 3069 3070#if IS_ENABLED(CONFIG_NF_NAT) 3071 if (!nf_inet_addr_cmp(&exp->saved_addr, &any_addr) || 3072 exp->saved_proto.all) { 3073 nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT); 3074 if (!nest_parms) 3075 goto nla_put_failure; 3076 3077 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir))) 3078 goto nla_put_failure; 3079 3080 nat_tuple.src.l3num = nf_ct_l3num(master); 3081 nat_tuple.src.u3 = exp->saved_addr; 3082 nat_tuple.dst.protonum = nf_ct_protonum(master); 3083 nat_tuple.src.u = exp->saved_proto; 3084 3085 if (ctnetlink_exp_dump_tuple(skb, &nat_tuple, 3086 CTA_EXPECT_NAT_TUPLE) < 0) 3087 goto nla_put_failure; 3088 nla_nest_end(skb, nest_parms); 3089 } 3090#endif 3091 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) || 3092 nla_put_be32(skb, CTA_EXPECT_ID, nf_expect_get_id(exp)) || 3093 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) || 3094 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class))) 3095 goto nla_put_failure; 3096 help = nfct_help(master); 3097 if (help) { 3098 struct nf_conntrack_helper *helper; 3099 3100 helper = rcu_dereference(help->helper); 3101 if (helper && 3102 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name)) 3103 goto nla_put_failure; 3104 } 3105 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn); 3106 if (expfn != NULL && 3107 nla_put_string(skb, CTA_EXPECT_FN, expfn->name)) 3108 goto nla_put_failure; 3109 3110 return 0; 3111 3112nla_put_failure: 3113 return -1; 3114} 3115 3116static int 3117ctnetlink_exp_fill_info(struct sk_buff *skb, u32 portid, u32 seq, 3118 int event, const struct nf_conntrack_expect *exp) 3119{ 3120 struct nlmsghdr *nlh; 3121 unsigned int flags = portid ? NLM_F_MULTI : 0; 3122 3123 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, event); 3124 nlh = nfnl_msg_put(skb, portid, seq, event, flags, 3125 exp->tuple.src.l3num, NFNETLINK_V0, 0); 3126 if (!nlh) 3127 goto nlmsg_failure; 3128 3129 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 3130 goto nla_put_failure; 3131 3132 nlmsg_end(skb, nlh); 3133 return skb->len; 3134 3135nlmsg_failure: 3136nla_put_failure: 3137 nlmsg_cancel(skb, nlh); 3138 return -1; 3139} 3140 3141#ifdef CONFIG_NF_CONNTRACK_EVENTS 3142static int 3143ctnetlink_expect_event(unsigned int events, const struct nf_exp_event *item) 3144{ 3145 struct nf_conntrack_expect *exp = item->exp; 3146 struct net *net = nf_ct_exp_net(exp); 3147 struct nlmsghdr *nlh; 3148 struct sk_buff *skb; 3149 unsigned int type, group; 3150 int flags = 0; 3151 3152 if (events & (1 << IPEXP_DESTROY)) { 3153 type = IPCTNL_MSG_EXP_DELETE; 3154 group = NFNLGRP_CONNTRACK_EXP_DESTROY; 3155 } else if (events & (1 << IPEXP_NEW)) { 3156 type = IPCTNL_MSG_EXP_NEW; 3157 flags = NLM_F_CREATE|NLM_F_EXCL; 3158 group = NFNLGRP_CONNTRACK_EXP_NEW; 3159 } else 3160 return 0; 3161 3162 if (!item->report && !nfnetlink_has_listeners(net, group)) 3163 return 0; 3164 3165 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 3166 if (skb == NULL) 3167 goto errout; 3168 3169 type = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK_EXP, type); 3170 nlh = nfnl_msg_put(skb, item->portid, 0, type, flags, 3171 exp->tuple.src.l3num, NFNETLINK_V0, 0); 3172 if (!nlh) 3173 goto nlmsg_failure; 3174 3175 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 3176 goto nla_put_failure; 3177 3178 nlmsg_end(skb, nlh); 3179 nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); 3180 return 0; 3181 3182nla_put_failure: 3183 nlmsg_cancel(skb, nlh); 3184nlmsg_failure: 3185 kfree_skb(skb); 3186errout: 3187 nfnetlink_set_err(net, 0, 0, -ENOBUFS); 3188 return 0; 3189} 3190#endif 3191static int ctnetlink_exp_done(struct netlink_callback *cb) 3192{ 3193 if (cb->args[1]) 3194 nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]); 3195 return 0; 3196} 3197 3198static int 3199ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3200{ 3201 struct net *net = sock_net(skb->sk); 3202 struct nf_conntrack_expect *exp, *last; 3203 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3204 u_int8_t l3proto = nfmsg->nfgen_family; 3205 3206 rcu_read_lock(); 3207 last = (struct nf_conntrack_expect *)cb->args[1]; 3208 for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) { 3209restart: 3210 hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]], 3211 hnode) { 3212 if (l3proto && exp->tuple.src.l3num != l3proto) 3213 continue; 3214 3215 if (!net_eq(nf_ct_net(exp->master), net)) 3216 continue; 3217 3218 if (cb->args[1]) { 3219 if (exp != last) 3220 continue; 3221 cb->args[1] = 0; 3222 } 3223 if (ctnetlink_exp_fill_info(skb, 3224 NETLINK_CB(cb->skb).portid, 3225 cb->nlh->nlmsg_seq, 3226 IPCTNL_MSG_EXP_NEW, 3227 exp) < 0) { 3228 if (!refcount_inc_not_zero(&exp->use)) 3229 continue; 3230 cb->args[1] = (unsigned long)exp; 3231 goto out; 3232 } 3233 } 3234 if (cb->args[1]) { 3235 cb->args[1] = 0; 3236 goto restart; 3237 } 3238 } 3239out: 3240 rcu_read_unlock(); 3241 if (last) 3242 nf_ct_expect_put(last); 3243 3244 return skb->len; 3245} 3246 3247static int 3248ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb) 3249{ 3250 struct nf_conntrack_expect *exp, *last; 3251 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 3252 struct nf_conn *ct = cb->data; 3253 struct nf_conn_help *help = nfct_help(ct); 3254 u_int8_t l3proto = nfmsg->nfgen_family; 3255 3256 if (cb->args[0]) 3257 return 0; 3258 3259 rcu_read_lock(); 3260 last = (struct nf_conntrack_expect *)cb->args[1]; 3261restart: 3262 hlist_for_each_entry_rcu(exp, &help->expectations, lnode) { 3263 if (l3proto && exp->tuple.src.l3num != l3proto) 3264 continue; 3265 if (cb->args[1]) { 3266 if (exp != last) 3267 continue; 3268 cb->args[1] = 0; 3269 } 3270 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid, 3271 cb->nlh->nlmsg_seq, 3272 IPCTNL_MSG_EXP_NEW, 3273 exp) < 0) { 3274 if (!refcount_inc_not_zero(&exp->use)) 3275 continue; 3276 cb->args[1] = (unsigned long)exp; 3277 goto out; 3278 } 3279 } 3280 if (cb->args[1]) { 3281 cb->args[1] = 0; 3282 goto restart; 3283 } 3284 cb->args[0] = 1; 3285out: 3286 rcu_read_unlock(); 3287 if (last) 3288 nf_ct_expect_put(last); 3289 3290 return skb->len; 3291} 3292 3293static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl, 3294 struct sk_buff *skb, 3295 const struct nlmsghdr *nlh, 3296 const struct nlattr * const cda[], 3297 struct netlink_ext_ack *extack) 3298{ 3299 int err; 3300 struct nfgenmsg *nfmsg = nlmsg_data(nlh); 3301 u_int8_t u3 = nfmsg->nfgen_family; 3302 struct nf_conntrack_tuple tuple; 3303 struct nf_conntrack_tuple_hash *h; 3304 struct nf_conn *ct; 3305 struct nf_conntrack_zone zone; 3306 struct netlink_dump_control c = { 3307 .dump = ctnetlink_exp_ct_dump_table, 3308 .done = ctnetlink_exp_done, 3309 }; 3310 3311 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, 3312 u3, NULL); 3313 if (err < 0) 3314 return err; 3315 3316 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3317 if (err < 0) 3318 return err; 3319 3320 h = nf_conntrack_find_get(net, &zone, &tuple); 3321 if (!h) 3322 return -ENOENT; 3323 3324 ct = nf_ct_tuplehash_to_ctrack(h); 3325 /* No expectation linked to this connection tracking. */ 3326 if (!nfct_help(ct)) { 3327 nf_ct_put(ct); 3328 return 0; 3329 } 3330 3331 c.data = ct; 3332 3333 err = netlink_dump_start(ctnl, skb, nlh, &c); 3334 nf_ct_put(ct); 3335 3336 return err; 3337} 3338 3339static int ctnetlink_get_expect(struct sk_buff *skb, 3340 const struct nfnl_info *info, 3341 const struct nlattr * const cda[]) 3342{ 3343 u_int8_t u3 = info->nfmsg->nfgen_family; 3344 struct nf_conntrack_tuple tuple; 3345 struct nf_conntrack_expect *exp; 3346 struct nf_conntrack_zone zone; 3347 struct sk_buff *skb2; 3348 int err; 3349 3350 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 3351 if (cda[CTA_EXPECT_MASTER]) 3352 return ctnetlink_dump_exp_ct(info->net, info->sk, skb, 3353 info->nlh, cda, 3354 info->extack); 3355 else { 3356 struct netlink_dump_control c = { 3357 .dump = ctnetlink_exp_dump_table, 3358 .done = ctnetlink_exp_done, 3359 }; 3360 return netlink_dump_start(info->sk, skb, info->nlh, &c); 3361 } 3362 } 3363 3364 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3365 if (err < 0) 3366 return err; 3367 3368 if (cda[CTA_EXPECT_TUPLE]) 3369 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3370 u3, NULL); 3371 else if (cda[CTA_EXPECT_MASTER]) 3372 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, 3373 u3, NULL); 3374 else 3375 return -EINVAL; 3376 3377 if (err < 0) 3378 return err; 3379 3380 exp = nf_ct_expect_find_get(info->net, &zone, &tuple); 3381 if (!exp) 3382 return -ENOENT; 3383 3384 if (cda[CTA_EXPECT_ID]) { 3385 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); 3386 3387 if (id != nf_expect_get_id(exp)) { 3388 nf_ct_expect_put(exp); 3389 return -ENOENT; 3390 } 3391 } 3392 3393 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 3394 if (!skb2) { 3395 nf_ct_expect_put(exp); 3396 return -ENOMEM; 3397 } 3398 3399 rcu_read_lock(); 3400 err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).portid, 3401 info->nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, 3402 exp); 3403 rcu_read_unlock(); 3404 nf_ct_expect_put(exp); 3405 if (err <= 0) { 3406 kfree_skb(skb2); 3407 return -ENOMEM; 3408 } 3409 3410 return nfnetlink_unicast(skb2, info->net, NETLINK_CB(skb).portid); 3411} 3412 3413static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data) 3414{ 3415 const struct nf_conn_help *m_help; 3416 const char *name = data; 3417 3418 m_help = nfct_help(exp->master); 3419 3420 return strcmp(m_help->helper->name, name) == 0; 3421} 3422 3423static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data) 3424{ 3425 return true; 3426} 3427 3428static int ctnetlink_del_expect(struct sk_buff *skb, 3429 const struct nfnl_info *info, 3430 const struct nlattr * const cda[]) 3431{ 3432 u_int8_t u3 = info->nfmsg->nfgen_family; 3433 struct nf_conntrack_expect *exp; 3434 struct nf_conntrack_tuple tuple; 3435 struct nf_conntrack_zone zone; 3436 int err; 3437 3438 if (cda[CTA_EXPECT_TUPLE]) { 3439 /* delete a single expect by tuple */ 3440 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3441 if (err < 0) 3442 return err; 3443 3444 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3445 u3, NULL); 3446 if (err < 0) 3447 return err; 3448 3449 /* bump usage count to 2 */ 3450 exp = nf_ct_expect_find_get(info->net, &zone, &tuple); 3451 if (!exp) 3452 return -ENOENT; 3453 3454 if (cda[CTA_EXPECT_ID]) { 3455 __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]); 3456 if (ntohl(id) != (u32)(unsigned long)exp) { 3457 nf_ct_expect_put(exp); 3458 return -ENOENT; 3459 } 3460 } 3461 3462 /* after list removal, usage count == 1 */ 3463 spin_lock_bh(&nf_conntrack_expect_lock); 3464 if (del_timer(&exp->timeout)) { 3465 nf_ct_unlink_expect_report(exp, NETLINK_CB(skb).portid, 3466 nlmsg_report(info->nlh)); 3467 nf_ct_expect_put(exp); 3468 } 3469 spin_unlock_bh(&nf_conntrack_expect_lock); 3470 /* have to put what we 'get' above. 3471 * after this line usage count == 0 */ 3472 nf_ct_expect_put(exp); 3473 } else if (cda[CTA_EXPECT_HELP_NAME]) { 3474 char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3475 3476 nf_ct_expect_iterate_net(info->net, expect_iter_name, name, 3477 NETLINK_CB(skb).portid, 3478 nlmsg_report(info->nlh)); 3479 } else { 3480 /* This basically means we have to flush everything*/ 3481 nf_ct_expect_iterate_net(info->net, expect_iter_all, NULL, 3482 NETLINK_CB(skb).portid, 3483 nlmsg_report(info->nlh)); 3484 } 3485 3486 return 0; 3487} 3488static int 3489ctnetlink_change_expect(struct nf_conntrack_expect *x, 3490 const struct nlattr * const cda[]) 3491{ 3492 if (cda[CTA_EXPECT_TIMEOUT]) { 3493 if (!del_timer(&x->timeout)) 3494 return -ETIME; 3495 3496 x->timeout.expires = jiffies + 3497 ntohl(nla_get_be32(cda[CTA_EXPECT_TIMEOUT])) * HZ; 3498 add_timer(&x->timeout); 3499 } 3500 return 0; 3501} 3502 3503static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = { 3504 [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 }, 3505 [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED }, 3506}; 3507 3508static int 3509ctnetlink_parse_expect_nat(const struct nlattr *attr, 3510 struct nf_conntrack_expect *exp, 3511 u_int8_t u3) 3512{ 3513#if IS_ENABLED(CONFIG_NF_NAT) 3514 struct nlattr *tb[CTA_EXPECT_NAT_MAX+1]; 3515 struct nf_conntrack_tuple nat_tuple = {}; 3516 int err; 3517 3518 err = nla_parse_nested_deprecated(tb, CTA_EXPECT_NAT_MAX, attr, 3519 exp_nat_nla_policy, NULL); 3520 if (err < 0) 3521 return err; 3522 3523 if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE]) 3524 return -EINVAL; 3525 3526 err = ctnetlink_parse_tuple((const struct nlattr * const *)tb, 3527 &nat_tuple, CTA_EXPECT_NAT_TUPLE, 3528 u3, NULL); 3529 if (err < 0) 3530 return err; 3531 3532 exp->saved_addr = nat_tuple.src.u3; 3533 exp->saved_proto = nat_tuple.src.u; 3534 exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR])); 3535 3536 return 0; 3537#else 3538 return -EOPNOTSUPP; 3539#endif 3540} 3541 3542static struct nf_conntrack_expect * 3543ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct, 3544 struct nf_conntrack_helper *helper, 3545 struct nf_conntrack_tuple *tuple, 3546 struct nf_conntrack_tuple *mask) 3547{ 3548 u_int32_t class = 0; 3549 struct nf_conntrack_expect *exp; 3550 struct nf_conn_help *help; 3551 int err; 3552 3553 help = nfct_help(ct); 3554 if (!help) 3555 return ERR_PTR(-EOPNOTSUPP); 3556 3557 if (cda[CTA_EXPECT_CLASS] && helper) { 3558 class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS])); 3559 if (class > helper->expect_class_max) 3560 return ERR_PTR(-EINVAL); 3561 } 3562 exp = nf_ct_expect_alloc(ct); 3563 if (!exp) 3564 return ERR_PTR(-ENOMEM); 3565 3566 if (cda[CTA_EXPECT_FLAGS]) { 3567 exp->flags = ntohl(nla_get_be32(cda[CTA_EXPECT_FLAGS])); 3568 exp->flags &= ~NF_CT_EXPECT_USERSPACE; 3569 } else { 3570 exp->flags = 0; 3571 } 3572 if (cda[CTA_EXPECT_FN]) { 3573 const char *name = nla_data(cda[CTA_EXPECT_FN]); 3574 struct nf_ct_helper_expectfn *expfn; 3575 3576 expfn = nf_ct_helper_expectfn_find_by_name(name); 3577 if (expfn == NULL) { 3578 err = -EINVAL; 3579 goto err_out; 3580 } 3581 exp->expectfn = expfn->expectfn; 3582 } else 3583 exp->expectfn = NULL; 3584 3585 exp->class = class; 3586 exp->master = ct; 3587 exp->helper = helper; 3588 exp->tuple = *tuple; 3589 exp->mask.src.u3 = mask->src.u3; 3590 exp->mask.src.u.all = mask->src.u.all; 3591 3592 if (cda[CTA_EXPECT_NAT]) { 3593 err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT], 3594 exp, nf_ct_l3num(ct)); 3595 if (err < 0) 3596 goto err_out; 3597 } 3598 return exp; 3599err_out: 3600 nf_ct_expect_put(exp); 3601 return ERR_PTR(err); 3602} 3603 3604static int 3605ctnetlink_create_expect(struct net *net, 3606 const struct nf_conntrack_zone *zone, 3607 const struct nlattr * const cda[], 3608 u_int8_t u3, u32 portid, int report) 3609{ 3610 struct nf_conntrack_tuple tuple, mask, master_tuple; 3611 struct nf_conntrack_tuple_hash *h = NULL; 3612 struct nf_conntrack_helper *helper = NULL; 3613 struct nf_conntrack_expect *exp; 3614 struct nf_conn *ct; 3615 int err; 3616 3617 /* caller guarantees that those three CTA_EXPECT_* exist */ 3618 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3619 u3, NULL); 3620 if (err < 0) 3621 return err; 3622 err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, 3623 u3, NULL); 3624 if (err < 0) 3625 return err; 3626 err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, 3627 u3, NULL); 3628 if (err < 0) 3629 return err; 3630 3631 /* Look for master conntrack of this expectation */ 3632 h = nf_conntrack_find_get(net, zone, &master_tuple); 3633 if (!h) 3634 return -ENOENT; 3635 ct = nf_ct_tuplehash_to_ctrack(h); 3636 3637 rcu_read_lock(); 3638 if (cda[CTA_EXPECT_HELP_NAME]) { 3639 const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]); 3640 3641 helper = __nf_conntrack_helper_find(helpname, u3, 3642 nf_ct_protonum(ct)); 3643 if (helper == NULL) { 3644 rcu_read_unlock(); 3645#ifdef CONFIG_MODULES 3646 if (request_module("nfct-helper-%s", helpname) < 0) { 3647 err = -EOPNOTSUPP; 3648 goto err_ct; 3649 } 3650 rcu_read_lock(); 3651 helper = __nf_conntrack_helper_find(helpname, u3, 3652 nf_ct_protonum(ct)); 3653 if (helper) { 3654 err = -EAGAIN; 3655 goto err_rcu; 3656 } 3657 rcu_read_unlock(); 3658#endif 3659 err = -EOPNOTSUPP; 3660 goto err_ct; 3661 } 3662 } 3663 3664 exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask); 3665 if (IS_ERR(exp)) { 3666 err = PTR_ERR(exp); 3667 goto err_rcu; 3668 } 3669 3670 err = nf_ct_expect_related_report(exp, portid, report, 0); 3671 nf_ct_expect_put(exp); 3672err_rcu: 3673 rcu_read_unlock(); 3674err_ct: 3675 nf_ct_put(ct); 3676 return err; 3677} 3678 3679static int ctnetlink_new_expect(struct sk_buff *skb, 3680 const struct nfnl_info *info, 3681 const struct nlattr * const cda[]) 3682{ 3683 u_int8_t u3 = info->nfmsg->nfgen_family; 3684 struct nf_conntrack_tuple tuple; 3685 struct nf_conntrack_expect *exp; 3686 struct nf_conntrack_zone zone; 3687 int err; 3688 3689 if (!cda[CTA_EXPECT_TUPLE] 3690 || !cda[CTA_EXPECT_MASK] 3691 || !cda[CTA_EXPECT_MASTER]) 3692 return -EINVAL; 3693 3694 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 3695 if (err < 0) 3696 return err; 3697 3698 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, 3699 u3, NULL); 3700 if (err < 0) 3701 return err; 3702 3703 spin_lock_bh(&nf_conntrack_expect_lock); 3704 exp = __nf_ct_expect_find(info->net, &zone, &tuple); 3705 if (!exp) { 3706 spin_unlock_bh(&nf_conntrack_expect_lock); 3707 err = -ENOENT; 3708 if (info->nlh->nlmsg_flags & NLM_F_CREATE) { 3709 err = ctnetlink_create_expect(info->net, &zone, cda, u3, 3710 NETLINK_CB(skb).portid, 3711 nlmsg_report(info->nlh)); 3712 } 3713 return err; 3714 } 3715 3716 err = -EEXIST; 3717 if (!(info->nlh->nlmsg_flags & NLM_F_EXCL)) 3718 err = ctnetlink_change_expect(exp, cda); 3719 spin_unlock_bh(&nf_conntrack_expect_lock); 3720 3721 return err; 3722} 3723 3724static int 3725ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, 3726 const struct ip_conntrack_stat *st) 3727{ 3728 struct nlmsghdr *nlh; 3729 unsigned int flags = portid ? NLM_F_MULTI : 0, event; 3730 3731 event = nfnl_msg_type(NFNL_SUBSYS_CTNETLINK, 3732 IPCTNL_MSG_EXP_GET_STATS_CPU); 3733 nlh = nfnl_msg_put(skb, portid, seq, event, flags, AF_UNSPEC, 3734 NFNETLINK_V0, htons(cpu)); 3735 if (!nlh) 3736 goto nlmsg_failure; 3737 3738 if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) || 3739 nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) || 3740 nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete))) 3741 goto nla_put_failure; 3742 3743 nlmsg_end(skb, nlh); 3744 return skb->len; 3745 3746nla_put_failure: 3747nlmsg_failure: 3748 nlmsg_cancel(skb, nlh); 3749 return -1; 3750} 3751 3752static int 3753ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb) 3754{ 3755 int cpu; 3756 struct net *net = sock_net(skb->sk); 3757 3758 if (cb->args[0] == nr_cpu_ids) 3759 return 0; 3760 3761 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 3762 const struct ip_conntrack_stat *st; 3763 3764 if (!cpu_possible(cpu)) 3765 continue; 3766 3767 st = per_cpu_ptr(net->ct.stat, cpu); 3768 if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).portid, 3769 cb->nlh->nlmsg_seq, 3770 cpu, st) < 0) 3771 break; 3772 } 3773 cb->args[0] = cpu; 3774 3775 return skb->len; 3776} 3777 3778static int ctnetlink_stat_exp_cpu(struct sk_buff *skb, 3779 const struct nfnl_info *info, 3780 const struct nlattr * const cda[]) 3781{ 3782 if (info->nlh->nlmsg_flags & NLM_F_DUMP) { 3783 struct netlink_dump_control c = { 3784 .dump = ctnetlink_exp_stat_cpu_dump, 3785 }; 3786 return netlink_dump_start(info->sk, skb, info->nlh, &c); 3787 } 3788 3789 return 0; 3790} 3791 3792#ifdef CONFIG_NF_CONNTRACK_EVENTS 3793static struct nf_ct_event_notifier ctnl_notifier = { 3794 .ct_event = ctnetlink_conntrack_event, 3795 .exp_event = ctnetlink_expect_event, 3796}; 3797#endif 3798 3799static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = { 3800 [IPCTNL_MSG_CT_NEW] = { 3801 .call = ctnetlink_new_conntrack, 3802 .type = NFNL_CB_MUTEX, 3803 .attr_count = CTA_MAX, 3804 .policy = ct_nla_policy 3805 }, 3806 [IPCTNL_MSG_CT_GET] = { 3807 .call = ctnetlink_get_conntrack, 3808 .type = NFNL_CB_MUTEX, 3809 .attr_count = CTA_MAX, 3810 .policy = ct_nla_policy 3811 }, 3812 [IPCTNL_MSG_CT_DELETE] = { 3813 .call = ctnetlink_del_conntrack, 3814 .type = NFNL_CB_MUTEX, 3815 .attr_count = CTA_MAX, 3816 .policy = ct_nla_policy 3817 }, 3818 [IPCTNL_MSG_CT_GET_CTRZERO] = { 3819 .call = ctnetlink_get_conntrack, 3820 .type = NFNL_CB_MUTEX, 3821 .attr_count = CTA_MAX, 3822 .policy = ct_nla_policy 3823 }, 3824 [IPCTNL_MSG_CT_GET_STATS_CPU] = { 3825 .call = ctnetlink_stat_ct_cpu, 3826 .type = NFNL_CB_MUTEX, 3827 }, 3828 [IPCTNL_MSG_CT_GET_STATS] = { 3829 .call = ctnetlink_stat_ct, 3830 .type = NFNL_CB_MUTEX, 3831 }, 3832 [IPCTNL_MSG_CT_GET_DYING] = { 3833 .call = ctnetlink_get_ct_dying, 3834 .type = NFNL_CB_MUTEX, 3835 }, 3836 [IPCTNL_MSG_CT_GET_UNCONFIRMED] = { 3837 .call = ctnetlink_get_ct_unconfirmed, 3838 .type = NFNL_CB_MUTEX, 3839 }, 3840}; 3841 3842static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = { 3843 [IPCTNL_MSG_EXP_GET] = { 3844 .call = ctnetlink_get_expect, 3845 .type = NFNL_CB_MUTEX, 3846 .attr_count = CTA_EXPECT_MAX, 3847 .policy = exp_nla_policy 3848 }, 3849 [IPCTNL_MSG_EXP_NEW] = { 3850 .call = ctnetlink_new_expect, 3851 .type = NFNL_CB_MUTEX, 3852 .attr_count = CTA_EXPECT_MAX, 3853 .policy = exp_nla_policy 3854 }, 3855 [IPCTNL_MSG_EXP_DELETE] = { 3856 .call = ctnetlink_del_expect, 3857 .type = NFNL_CB_MUTEX, 3858 .attr_count = CTA_EXPECT_MAX, 3859 .policy = exp_nla_policy 3860 }, 3861 [IPCTNL_MSG_EXP_GET_STATS_CPU] = { 3862 .call = ctnetlink_stat_exp_cpu, 3863 .type = NFNL_CB_MUTEX, 3864 }, 3865}; 3866 3867static const struct nfnetlink_subsystem ctnl_subsys = { 3868 .name = "conntrack", 3869 .subsys_id = NFNL_SUBSYS_CTNETLINK, 3870 .cb_count = IPCTNL_MSG_MAX, 3871 .cb = ctnl_cb, 3872}; 3873 3874static const struct nfnetlink_subsystem ctnl_exp_subsys = { 3875 .name = "conntrack_expect", 3876 .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP, 3877 .cb_count = IPCTNL_MSG_EXP_MAX, 3878 .cb = ctnl_exp_cb, 3879}; 3880 3881MODULE_ALIAS("ip_conntrack_netlink"); 3882MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK); 3883MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP); 3884 3885static int __net_init ctnetlink_net_init(struct net *net) 3886{ 3887#ifdef CONFIG_NF_CONNTRACK_EVENTS 3888 nf_conntrack_register_notifier(net, &ctnl_notifier); 3889#endif 3890 return 0; 3891} 3892 3893static void ctnetlink_net_pre_exit(struct net *net) 3894{ 3895#ifdef CONFIG_NF_CONNTRACK_EVENTS 3896 nf_conntrack_unregister_notifier(net); 3897#endif 3898} 3899 3900static struct pernet_operations ctnetlink_net_ops = { 3901 .init = ctnetlink_net_init, 3902 .pre_exit = ctnetlink_net_pre_exit, 3903}; 3904 3905static int __init ctnetlink_init(void) 3906{ 3907 int ret; 3908 3909 BUILD_BUG_ON(sizeof(struct ctnetlink_list_dump_ctx) > sizeof_field(struct netlink_callback, ctx)); 3910 3911 ret = nfnetlink_subsys_register(&ctnl_subsys); 3912 if (ret < 0) { 3913 pr_err("ctnetlink_init: cannot register with nfnetlink.\n"); 3914 goto err_out; 3915 } 3916 3917 ret = nfnetlink_subsys_register(&ctnl_exp_subsys); 3918 if (ret < 0) { 3919 pr_err("ctnetlink_init: cannot register exp with nfnetlink.\n"); 3920 goto err_unreg_subsys; 3921 } 3922 3923 ret = register_pernet_subsys(&ctnetlink_net_ops); 3924 if (ret < 0) { 3925 pr_err("ctnetlink_init: cannot register pernet operations\n"); 3926 goto err_unreg_exp_subsys; 3927 } 3928#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3929 /* setup interaction between nf_queue and nf_conntrack_netlink. */ 3930 RCU_INIT_POINTER(nfnl_ct_hook, &ctnetlink_glue_hook); 3931#endif 3932 return 0; 3933 3934err_unreg_exp_subsys: 3935 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 3936err_unreg_subsys: 3937 nfnetlink_subsys_unregister(&ctnl_subsys); 3938err_out: 3939 return ret; 3940} 3941 3942static void __exit ctnetlink_exit(void) 3943{ 3944 unregister_pernet_subsys(&ctnetlink_net_ops); 3945 nfnetlink_subsys_unregister(&ctnl_exp_subsys); 3946 nfnetlink_subsys_unregister(&ctnl_subsys); 3947#ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT 3948 RCU_INIT_POINTER(nfnl_ct_hook, NULL); 3949#endif 3950 synchronize_rcu(); 3951} 3952 3953module_init(ctnetlink_init); 3954module_exit(ctnetlink_exit);