nfnetlink_log.c (30107B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * This is a module which is used for logging packets to userspace via 4 * nfetlink. 5 * 6 * (C) 2005 by Harald Welte <laforge@netfilter.org> 7 * (C) 2006-2012 Patrick McHardy <kaber@trash.net> 8 * 9 * Based on the old ipv4-only ipt_ULOG.c: 10 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org> 11 */ 12 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15#include <linux/module.h> 16#include <linux/skbuff.h> 17#include <linux/if_arp.h> 18#include <linux/init.h> 19#include <linux/ip.h> 20#include <linux/ipv6.h> 21#include <linux/netdevice.h> 22#include <linux/netfilter.h> 23#include <linux/netfilter_bridge.h> 24#include <net/netlink.h> 25#include <linux/netfilter/nfnetlink.h> 26#include <linux/netfilter/nfnetlink_log.h> 27#include <linux/netfilter/nf_conntrack_common.h> 28#include <linux/spinlock.h> 29#include <linux/sysctl.h> 30#include <linux/proc_fs.h> 31#include <linux/security.h> 32#include <linux/list.h> 33#include <linux/slab.h> 34#include <net/sock.h> 35#include <net/netfilter/nf_log.h> 36#include <net/netns/generic.h> 37 38#include <linux/atomic.h> 39#include <linux/refcount.h> 40 41 42#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 43#include "../bridge/br_private.h" 44#endif 45 46#if IS_ENABLED(CONFIG_NF_CONNTRACK) 47#include <net/netfilter/nf_conntrack.h> 48#endif 49 50#define NFULNL_COPY_DISABLED 0xff 51#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE 52#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ 53#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 54/* max packet size is limited by 16-bit struct nfattr nfa_len field */ 55#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN) 56 57#define PRINTR(x, args...) do { if (net_ratelimit()) \ 58 printk(x, ## args); } while (0); 59 60struct nfulnl_instance { 61 struct hlist_node hlist; /* global list of instances */ 62 spinlock_t lock; 63 refcount_t use; /* use count */ 64 65 unsigned int qlen; /* number of nlmsgs in skb */ 66 struct sk_buff *skb; /* pre-allocatd skb */ 67 struct timer_list timer; 68 struct net *net; 69 netns_tracker ns_tracker; 70 struct user_namespace *peer_user_ns; /* User namespace of the peer process */ 71 u32 peer_portid; /* PORTID of the peer process */ 72 73 /* configurable parameters */ 74 unsigned int flushtimeout; /* timeout until queue flush */ 75 unsigned int nlbufsiz; /* netlink buffer allocation size */ 76 unsigned int qthreshold; /* threshold of the queue */ 77 u_int32_t copy_range; 78 u_int32_t seq; /* instance-local sequential counter */ 79 u_int16_t group_num; /* number of this queue */ 80 u_int16_t flags; 81 u_int8_t copy_mode; 82 struct rcu_head rcu; 83}; 84 85#define INSTANCE_BUCKETS 16 86 87static unsigned int nfnl_log_net_id __read_mostly; 88 89struct nfnl_log_net { 90 spinlock_t instances_lock; 91 struct hlist_head instance_table[INSTANCE_BUCKETS]; 92 atomic_t global_seq; 93}; 94 95static struct nfnl_log_net *nfnl_log_pernet(struct net *net) 96{ 97 return net_generic(net, nfnl_log_net_id); 98} 99 100static inline u_int8_t instance_hashfn(u_int16_t group_num) 101{ 102 return ((group_num & 0xff) % INSTANCE_BUCKETS); 103} 104 105static struct nfulnl_instance * 106__instance_lookup(struct nfnl_log_net *log, u_int16_t group_num) 107{ 108 struct hlist_head *head; 109 struct nfulnl_instance *inst; 110 111 head = &log->instance_table[instance_hashfn(group_num)]; 112 hlist_for_each_entry_rcu(inst, head, hlist) { 113 if (inst->group_num == group_num) 114 return inst; 115 } 116 return NULL; 117} 118 119static inline void 120instance_get(struct nfulnl_instance *inst) 121{ 122 refcount_inc(&inst->use); 123} 124 125static struct nfulnl_instance * 126instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num) 127{ 128 struct nfulnl_instance *inst; 129 130 rcu_read_lock_bh(); 131 inst = __instance_lookup(log, group_num); 132 if (inst && !refcount_inc_not_zero(&inst->use)) 133 inst = NULL; 134 rcu_read_unlock_bh(); 135 136 return inst; 137} 138 139static void nfulnl_instance_free_rcu(struct rcu_head *head) 140{ 141 struct nfulnl_instance *inst = 142 container_of(head, struct nfulnl_instance, rcu); 143 144 put_net_track(inst->net, &inst->ns_tracker); 145 kfree(inst); 146 module_put(THIS_MODULE); 147} 148 149static void 150instance_put(struct nfulnl_instance *inst) 151{ 152 if (inst && refcount_dec_and_test(&inst->use)) 153 call_rcu(&inst->rcu, nfulnl_instance_free_rcu); 154} 155 156static void nfulnl_timer(struct timer_list *t); 157 158static struct nfulnl_instance * 159instance_create(struct net *net, u_int16_t group_num, 160 u32 portid, struct user_namespace *user_ns) 161{ 162 struct nfulnl_instance *inst; 163 struct nfnl_log_net *log = nfnl_log_pernet(net); 164 int err; 165 166 spin_lock_bh(&log->instances_lock); 167 if (__instance_lookup(log, group_num)) { 168 err = -EEXIST; 169 goto out_unlock; 170 } 171 172 inst = kzalloc(sizeof(*inst), GFP_ATOMIC); 173 if (!inst) { 174 err = -ENOMEM; 175 goto out_unlock; 176 } 177 178 if (!try_module_get(THIS_MODULE)) { 179 kfree(inst); 180 err = -EAGAIN; 181 goto out_unlock; 182 } 183 184 INIT_HLIST_NODE(&inst->hlist); 185 spin_lock_init(&inst->lock); 186 /* needs to be two, since we _put() after creation */ 187 refcount_set(&inst->use, 2); 188 189 timer_setup(&inst->timer, nfulnl_timer, 0); 190 191 inst->net = get_net_track(net, &inst->ns_tracker, GFP_ATOMIC); 192 inst->peer_user_ns = user_ns; 193 inst->peer_portid = portid; 194 inst->group_num = group_num; 195 196 inst->qthreshold = NFULNL_QTHRESH_DEFAULT; 197 inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; 198 inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; 199 inst->copy_mode = NFULNL_COPY_PACKET; 200 inst->copy_range = NFULNL_COPY_RANGE_MAX; 201 202 hlist_add_head_rcu(&inst->hlist, 203 &log->instance_table[instance_hashfn(group_num)]); 204 205 206 spin_unlock_bh(&log->instances_lock); 207 208 return inst; 209 210out_unlock: 211 spin_unlock_bh(&log->instances_lock); 212 return ERR_PTR(err); 213} 214 215static void __nfulnl_flush(struct nfulnl_instance *inst); 216 217/* called with BH disabled */ 218static void 219__instance_destroy(struct nfulnl_instance *inst) 220{ 221 /* first pull it out of the global list */ 222 hlist_del_rcu(&inst->hlist); 223 224 /* then flush all pending packets from skb */ 225 226 spin_lock(&inst->lock); 227 228 /* lockless readers wont be able to use us */ 229 inst->copy_mode = NFULNL_COPY_DISABLED; 230 231 if (inst->skb) 232 __nfulnl_flush(inst); 233 spin_unlock(&inst->lock); 234 235 /* and finally put the refcount */ 236 instance_put(inst); 237} 238 239static inline void 240instance_destroy(struct nfnl_log_net *log, 241 struct nfulnl_instance *inst) 242{ 243 spin_lock_bh(&log->instances_lock); 244 __instance_destroy(inst); 245 spin_unlock_bh(&log->instances_lock); 246} 247 248static int 249nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, 250 unsigned int range) 251{ 252 int status = 0; 253 254 spin_lock_bh(&inst->lock); 255 256 switch (mode) { 257 case NFULNL_COPY_NONE: 258 case NFULNL_COPY_META: 259 inst->copy_mode = mode; 260 inst->copy_range = 0; 261 break; 262 263 case NFULNL_COPY_PACKET: 264 inst->copy_mode = mode; 265 if (range == 0) 266 range = NFULNL_COPY_RANGE_MAX; 267 inst->copy_range = min_t(unsigned int, 268 range, NFULNL_COPY_RANGE_MAX); 269 break; 270 271 default: 272 status = -EINVAL; 273 break; 274 } 275 276 spin_unlock_bh(&inst->lock); 277 278 return status; 279} 280 281static int 282nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz) 283{ 284 int status; 285 286 spin_lock_bh(&inst->lock); 287 if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT) 288 status = -ERANGE; 289 else if (nlbufsiz > 131072) 290 status = -ERANGE; 291 else { 292 inst->nlbufsiz = nlbufsiz; 293 status = 0; 294 } 295 spin_unlock_bh(&inst->lock); 296 297 return status; 298} 299 300static void 301nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout) 302{ 303 spin_lock_bh(&inst->lock); 304 inst->flushtimeout = timeout; 305 spin_unlock_bh(&inst->lock); 306} 307 308static void 309nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh) 310{ 311 spin_lock_bh(&inst->lock); 312 inst->qthreshold = qthresh; 313 spin_unlock_bh(&inst->lock); 314} 315 316static int 317nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) 318{ 319 spin_lock_bh(&inst->lock); 320 inst->flags = flags; 321 spin_unlock_bh(&inst->lock); 322 323 return 0; 324} 325 326static struct sk_buff * 327nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size, 328 unsigned int pkt_size) 329{ 330 struct sk_buff *skb; 331 unsigned int n; 332 333 /* alloc skb which should be big enough for a whole multipart 334 * message. WARNING: has to be <= 128k due to slab restrictions */ 335 336 n = max(inst_size, pkt_size); 337 skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN); 338 if (!skb) { 339 if (n > pkt_size) { 340 /* try to allocate only as much as we need for current 341 * packet */ 342 343 skb = alloc_skb(pkt_size, GFP_ATOMIC); 344 } 345 } 346 347 return skb; 348} 349 350static void 351__nfulnl_send(struct nfulnl_instance *inst) 352{ 353 if (inst->qlen > 1) { 354 struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, 355 NLMSG_DONE, 356 sizeof(struct nfgenmsg), 357 0); 358 if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n", 359 inst->skb->len, skb_tailroom(inst->skb))) { 360 kfree_skb(inst->skb); 361 goto out; 362 } 363 } 364 nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid); 365out: 366 inst->qlen = 0; 367 inst->skb = NULL; 368} 369 370static void 371__nfulnl_flush(struct nfulnl_instance *inst) 372{ 373 /* timer holds a reference */ 374 if (del_timer(&inst->timer)) 375 instance_put(inst); 376 if (inst->skb) 377 __nfulnl_send(inst); 378} 379 380static void 381nfulnl_timer(struct timer_list *t) 382{ 383 struct nfulnl_instance *inst = from_timer(inst, t, timer); 384 385 spin_lock_bh(&inst->lock); 386 if (inst->skb) 387 __nfulnl_send(inst); 388 spin_unlock_bh(&inst->lock); 389 instance_put(inst); 390} 391 392static u32 nfulnl_get_bridge_size(const struct sk_buff *skb) 393{ 394 u32 size = 0; 395 396 if (!skb_mac_header_was_set(skb)) 397 return 0; 398 399 if (skb_vlan_tag_present(skb)) { 400 size += nla_total_size(0); /* nested */ 401 size += nla_total_size(sizeof(u16)); /* id */ 402 size += nla_total_size(sizeof(u16)); /* tag */ 403 } 404 405 if (skb->network_header > skb->mac_header) 406 size += nla_total_size(skb->network_header - skb->mac_header); 407 408 return size; 409} 410 411static int nfulnl_put_bridge(struct nfulnl_instance *inst, const struct sk_buff *skb) 412{ 413 if (!skb_mac_header_was_set(skb)) 414 return 0; 415 416 if (skb_vlan_tag_present(skb)) { 417 struct nlattr *nest; 418 419 nest = nla_nest_start(inst->skb, NFULA_VLAN); 420 if (!nest) 421 goto nla_put_failure; 422 423 if (nla_put_be16(inst->skb, NFULA_VLAN_TCI, htons(skb->vlan_tci)) || 424 nla_put_be16(inst->skb, NFULA_VLAN_PROTO, skb->vlan_proto)) 425 goto nla_put_failure; 426 427 nla_nest_end(inst->skb, nest); 428 } 429 430 if (skb->mac_header < skb->network_header) { 431 int len = (int)(skb->network_header - skb->mac_header); 432 433 if (nla_put(inst->skb, NFULA_L2HDR, len, skb_mac_header(skb))) 434 goto nla_put_failure; 435 } 436 437 return 0; 438 439nla_put_failure: 440 return -1; 441} 442 443/* This is an inline function, we don't really care about a long 444 * list of arguments */ 445static inline int 446__build_packet_message(struct nfnl_log_net *log, 447 struct nfulnl_instance *inst, 448 const struct sk_buff *skb, 449 unsigned int data_len, 450 u_int8_t pf, 451 unsigned int hooknum, 452 const struct net_device *indev, 453 const struct net_device *outdev, 454 const char *prefix, unsigned int plen, 455 const struct nfnl_ct_hook *nfnl_ct, 456 struct nf_conn *ct, enum ip_conntrack_info ctinfo) 457{ 458 struct nfulnl_msg_packet_hdr pmsg; 459 struct nlmsghdr *nlh; 460 sk_buff_data_t old_tail = inst->skb->tail; 461 struct sock *sk; 462 const unsigned char *hwhdrp; 463 ktime_t tstamp; 464 465 nlh = nfnl_msg_put(inst->skb, 0, 0, 466 nfnl_msg_type(NFNL_SUBSYS_ULOG, NFULNL_MSG_PACKET), 467 0, pf, NFNETLINK_V0, htons(inst->group_num)); 468 if (!nlh) 469 return -1; 470 471 memset(&pmsg, 0, sizeof(pmsg)); 472 pmsg.hw_protocol = skb->protocol; 473 pmsg.hook = hooknum; 474 475 if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg)) 476 goto nla_put_failure; 477 478 if (prefix && 479 nla_put(inst->skb, NFULA_PREFIX, plen, prefix)) 480 goto nla_put_failure; 481 482 if (indev) { 483#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 484 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, 485 htonl(indev->ifindex))) 486 goto nla_put_failure; 487#else 488 if (pf == PF_BRIDGE) { 489 /* Case 1: outdev is physical input device, we need to 490 * look for bridge group (when called from 491 * netfilter_bridge) */ 492 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, 493 htonl(indev->ifindex)) || 494 /* this is the bridge group "brX" */ 495 /* rcu_read_lock()ed by nf_hook_thresh or 496 * nf_log_packet. 497 */ 498 nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, 499 htonl(br_port_get_rcu(indev)->br->dev->ifindex))) 500 goto nla_put_failure; 501 } else { 502 struct net_device *physindev; 503 504 /* Case 2: indev is bridge group, we need to look for 505 * physical device (when called from ipv4) */ 506 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV, 507 htonl(indev->ifindex))) 508 goto nla_put_failure; 509 510 physindev = nf_bridge_get_physindev(skb); 511 if (physindev && 512 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV, 513 htonl(physindev->ifindex))) 514 goto nla_put_failure; 515 } 516#endif 517 } 518 519 if (outdev) { 520#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 521 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, 522 htonl(outdev->ifindex))) 523 goto nla_put_failure; 524#else 525 if (pf == PF_BRIDGE) { 526 /* Case 1: outdev is physical output device, we need to 527 * look for bridge group (when called from 528 * netfilter_bridge) */ 529 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, 530 htonl(outdev->ifindex)) || 531 /* this is the bridge group "brX" */ 532 /* rcu_read_lock()ed by nf_hook_thresh or 533 * nf_log_packet. 534 */ 535 nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, 536 htonl(br_port_get_rcu(outdev)->br->dev->ifindex))) 537 goto nla_put_failure; 538 } else { 539 struct net_device *physoutdev; 540 541 /* Case 2: indev is a bridge group, we need to look 542 * for physical device (when called from ipv4) */ 543 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV, 544 htonl(outdev->ifindex))) 545 goto nla_put_failure; 546 547 physoutdev = nf_bridge_get_physoutdev(skb); 548 if (physoutdev && 549 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, 550 htonl(physoutdev->ifindex))) 551 goto nla_put_failure; 552 } 553#endif 554 } 555 556 if (skb->mark && 557 nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark))) 558 goto nla_put_failure; 559 560 if (indev && skb->dev && 561 skb_mac_header_was_set(skb) && 562 skb_mac_header_len(skb) != 0) { 563 struct nfulnl_msg_packet_hw phw; 564 int len; 565 566 memset(&phw, 0, sizeof(phw)); 567 len = dev_parse_header(skb, phw.hw_addr); 568 if (len > 0) { 569 phw.hw_addrlen = htons(len); 570 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) 571 goto nla_put_failure; 572 } 573 } 574 575 if (indev && skb_mac_header_was_set(skb)) { 576 if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || 577 nla_put_be16(inst->skb, NFULA_HWLEN, 578 htons(skb->dev->hard_header_len))) 579 goto nla_put_failure; 580 581 hwhdrp = skb_mac_header(skb); 582 583 if (skb->dev->type == ARPHRD_SIT) 584 hwhdrp -= ETH_HLEN; 585 586 if (hwhdrp >= skb->head && 587 nla_put(inst->skb, NFULA_HWHEADER, 588 skb->dev->hard_header_len, hwhdrp)) 589 goto nla_put_failure; 590 } 591 592 tstamp = skb_tstamp_cond(skb, false); 593 if (hooknum <= NF_INET_FORWARD && tstamp) { 594 struct nfulnl_msg_packet_timestamp ts; 595 struct timespec64 kts = ktime_to_timespec64(tstamp); 596 ts.sec = cpu_to_be64(kts.tv_sec); 597 ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); 598 599 if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts)) 600 goto nla_put_failure; 601 } 602 603 /* UID */ 604 sk = skb->sk; 605 if (sk && sk_fullsock(sk)) { 606 read_lock_bh(&sk->sk_callback_lock); 607 if (sk->sk_socket && sk->sk_socket->file) { 608 struct file *file = sk->sk_socket->file; 609 const struct cred *cred = file->f_cred; 610 struct user_namespace *user_ns = inst->peer_user_ns; 611 __be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid)); 612 __be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid)); 613 read_unlock_bh(&sk->sk_callback_lock); 614 if (nla_put_be32(inst->skb, NFULA_UID, uid) || 615 nla_put_be32(inst->skb, NFULA_GID, gid)) 616 goto nla_put_failure; 617 } else 618 read_unlock_bh(&sk->sk_callback_lock); 619 } 620 621 /* local sequence number */ 622 if ((inst->flags & NFULNL_CFG_F_SEQ) && 623 nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++))) 624 goto nla_put_failure; 625 626 /* global sequence number */ 627 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) && 628 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL, 629 htonl(atomic_inc_return(&log->global_seq)))) 630 goto nla_put_failure; 631 632 if (ct && nfnl_ct->build(inst->skb, ct, ctinfo, 633 NFULA_CT, NFULA_CT_INFO) < 0) 634 goto nla_put_failure; 635 636 if ((pf == NFPROTO_NETDEV || pf == NFPROTO_BRIDGE) && 637 nfulnl_put_bridge(inst, skb) < 0) 638 goto nla_put_failure; 639 640 if (data_len) { 641 struct nlattr *nla; 642 int size = nla_attr_size(data_len); 643 644 if (skb_tailroom(inst->skb) < nla_total_size(data_len)) 645 goto nla_put_failure; 646 647 nla = skb_put(inst->skb, nla_total_size(data_len)); 648 nla->nla_type = NFULA_PAYLOAD; 649 nla->nla_len = size; 650 651 if (skb_copy_bits(skb, 0, nla_data(nla), data_len)) 652 BUG(); 653 } 654 655 nlh->nlmsg_len = inst->skb->tail - old_tail; 656 return 0; 657 658nla_put_failure: 659 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); 660 return -1; 661} 662 663static const struct nf_loginfo default_loginfo = { 664 .type = NF_LOG_TYPE_ULOG, 665 .u = { 666 .ulog = { 667 .copy_len = 0xffff, 668 .group = 0, 669 .qthreshold = 1, 670 }, 671 }, 672}; 673 674/* log handler for internal netfilter logging api */ 675static void 676nfulnl_log_packet(struct net *net, 677 u_int8_t pf, 678 unsigned int hooknum, 679 const struct sk_buff *skb, 680 const struct net_device *in, 681 const struct net_device *out, 682 const struct nf_loginfo *li_user, 683 const char *prefix) 684{ 685 size_t size; 686 unsigned int data_len; 687 struct nfulnl_instance *inst; 688 const struct nf_loginfo *li; 689 unsigned int qthreshold; 690 unsigned int plen = 0; 691 struct nfnl_log_net *log = nfnl_log_pernet(net); 692 const struct nfnl_ct_hook *nfnl_ct = NULL; 693 struct nf_conn *ct = NULL; 694 enum ip_conntrack_info ctinfo; 695 696 if (li_user && li_user->type == NF_LOG_TYPE_ULOG) 697 li = li_user; 698 else 699 li = &default_loginfo; 700 701 inst = instance_lookup_get(log, li->u.ulog.group); 702 if (!inst) 703 return; 704 705 if (prefix) 706 plen = strlen(prefix) + 1; 707 708 /* FIXME: do we want to make the size calculation conditional based on 709 * what is actually present? way more branches and checks, but more 710 * memory efficient... */ 711 size = nlmsg_total_size(sizeof(struct nfgenmsg)) 712 + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) 713 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 714 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 715#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 716 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 717 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 718#endif 719 + nla_total_size(sizeof(u_int32_t)) /* mark */ 720 + nla_total_size(sizeof(u_int32_t)) /* uid */ 721 + nla_total_size(sizeof(u_int32_t)) /* gid */ 722 + nla_total_size(plen) /* prefix */ 723 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) 724 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) 725 + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ 726 727 if (in && skb_mac_header_was_set(skb)) { 728 size += nla_total_size(skb->dev->hard_header_len) 729 + nla_total_size(sizeof(u_int16_t)) /* hwtype */ 730 + nla_total_size(sizeof(u_int16_t)); /* hwlen */ 731 } 732 733 spin_lock_bh(&inst->lock); 734 735 if (inst->flags & NFULNL_CFG_F_SEQ) 736 size += nla_total_size(sizeof(u_int32_t)); 737 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) 738 size += nla_total_size(sizeof(u_int32_t)); 739#if IS_ENABLED(CONFIG_NF_CONNTRACK) 740 if (inst->flags & NFULNL_CFG_F_CONNTRACK) { 741 nfnl_ct = rcu_dereference(nfnl_ct_hook); 742 if (nfnl_ct != NULL) { 743 ct = nf_ct_get(skb, &ctinfo); 744 if (ct != NULL) 745 size += nfnl_ct->build_size(ct); 746 } 747 } 748#endif 749 if (pf == NFPROTO_NETDEV || pf == NFPROTO_BRIDGE) 750 size += nfulnl_get_bridge_size(skb); 751 752 qthreshold = inst->qthreshold; 753 /* per-rule qthreshold overrides per-instance */ 754 if (li->u.ulog.qthreshold) 755 if (qthreshold > li->u.ulog.qthreshold) 756 qthreshold = li->u.ulog.qthreshold; 757 758 759 switch (inst->copy_mode) { 760 case NFULNL_COPY_META: 761 case NFULNL_COPY_NONE: 762 data_len = 0; 763 break; 764 765 case NFULNL_COPY_PACKET: 766 data_len = inst->copy_range; 767 if ((li->u.ulog.flags & NF_LOG_F_COPY_LEN) && 768 (li->u.ulog.copy_len < data_len)) 769 data_len = li->u.ulog.copy_len; 770 771 if (data_len > skb->len) 772 data_len = skb->len; 773 774 size += nla_total_size(data_len); 775 break; 776 777 case NFULNL_COPY_DISABLED: 778 default: 779 goto unlock_and_release; 780 } 781 782 if (inst->skb && size > skb_tailroom(inst->skb)) { 783 /* either the queue len is too high or we don't have 784 * enough room in the skb left. flush to userspace. */ 785 __nfulnl_flush(inst); 786 } 787 788 if (!inst->skb) { 789 inst->skb = nfulnl_alloc_skb(net, inst->peer_portid, 790 inst->nlbufsiz, size); 791 if (!inst->skb) 792 goto alloc_failure; 793 } 794 795 inst->qlen++; 796 797 __build_packet_message(log, inst, skb, data_len, pf, 798 hooknum, in, out, prefix, plen, 799 nfnl_ct, ct, ctinfo); 800 801 if (inst->qlen >= qthreshold) 802 __nfulnl_flush(inst); 803 /* timer_pending always called within inst->lock, so there 804 * is no chance of a race here */ 805 else if (!timer_pending(&inst->timer)) { 806 instance_get(inst); 807 inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); 808 add_timer(&inst->timer); 809 } 810 811unlock_and_release: 812 spin_unlock_bh(&inst->lock); 813 instance_put(inst); 814 return; 815 816alloc_failure: 817 /* FIXME: statistics */ 818 goto unlock_and_release; 819} 820 821static int 822nfulnl_rcv_nl_event(struct notifier_block *this, 823 unsigned long event, void *ptr) 824{ 825 struct netlink_notify *n = ptr; 826 struct nfnl_log_net *log = nfnl_log_pernet(n->net); 827 828 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 829 int i; 830 831 /* destroy all instances for this portid */ 832 spin_lock_bh(&log->instances_lock); 833 for (i = 0; i < INSTANCE_BUCKETS; i++) { 834 struct hlist_node *t2; 835 struct nfulnl_instance *inst; 836 struct hlist_head *head = &log->instance_table[i]; 837 838 hlist_for_each_entry_safe(inst, t2, head, hlist) { 839 if (n->portid == inst->peer_portid) 840 __instance_destroy(inst); 841 } 842 } 843 spin_unlock_bh(&log->instances_lock); 844 } 845 return NOTIFY_DONE; 846} 847 848static struct notifier_block nfulnl_rtnl_notifier = { 849 .notifier_call = nfulnl_rcv_nl_event, 850}; 851 852static int nfulnl_recv_unsupp(struct sk_buff *skb, const struct nfnl_info *info, 853 const struct nlattr * const nfula[]) 854{ 855 return -ENOTSUPP; 856} 857 858static struct nf_logger nfulnl_logger __read_mostly = { 859 .name = "nfnetlink_log", 860 .type = NF_LOG_TYPE_ULOG, 861 .logfn = nfulnl_log_packet, 862 .me = THIS_MODULE, 863}; 864 865static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = { 866 [NFULA_CFG_CMD] = { .len = sizeof(struct nfulnl_msg_config_cmd) }, 867 [NFULA_CFG_MODE] = { .len = sizeof(struct nfulnl_msg_config_mode) }, 868 [NFULA_CFG_TIMEOUT] = { .type = NLA_U32 }, 869 [NFULA_CFG_QTHRESH] = { .type = NLA_U32 }, 870 [NFULA_CFG_NLBUFSIZ] = { .type = NLA_U32 }, 871 [NFULA_CFG_FLAGS] = { .type = NLA_U16 }, 872}; 873 874static int nfulnl_recv_config(struct sk_buff *skb, const struct nfnl_info *info, 875 const struct nlattr * const nfula[]) 876{ 877 struct nfnl_log_net *log = nfnl_log_pernet(info->net); 878 u_int16_t group_num = ntohs(info->nfmsg->res_id); 879 struct nfulnl_msg_config_cmd *cmd = NULL; 880 struct nfulnl_instance *inst; 881 u16 flags = 0; 882 int ret = 0; 883 884 if (nfula[NFULA_CFG_CMD]) { 885 u_int8_t pf = info->nfmsg->nfgen_family; 886 cmd = nla_data(nfula[NFULA_CFG_CMD]); 887 888 /* Commands without queue context */ 889 switch (cmd->command) { 890 case NFULNL_CFG_CMD_PF_BIND: 891 return nf_log_bind_pf(info->net, pf, &nfulnl_logger); 892 case NFULNL_CFG_CMD_PF_UNBIND: 893 nf_log_unbind_pf(info->net, pf); 894 return 0; 895 } 896 } 897 898 inst = instance_lookup_get(log, group_num); 899 if (inst && inst->peer_portid != NETLINK_CB(skb).portid) { 900 ret = -EPERM; 901 goto out_put; 902 } 903 904 /* Check if we support these flags in first place, dependencies should 905 * be there too not to break atomicity. 906 */ 907 if (nfula[NFULA_CFG_FLAGS]) { 908 flags = ntohs(nla_get_be16(nfula[NFULA_CFG_FLAGS])); 909 910 if ((flags & NFULNL_CFG_F_CONNTRACK) && 911 !rcu_access_pointer(nfnl_ct_hook)) { 912#ifdef CONFIG_MODULES 913 nfnl_unlock(NFNL_SUBSYS_ULOG); 914 request_module("ip_conntrack_netlink"); 915 nfnl_lock(NFNL_SUBSYS_ULOG); 916 if (rcu_access_pointer(nfnl_ct_hook)) { 917 ret = -EAGAIN; 918 goto out_put; 919 } 920#endif 921 ret = -EOPNOTSUPP; 922 goto out_put; 923 } 924 } 925 926 if (cmd != NULL) { 927 switch (cmd->command) { 928 case NFULNL_CFG_CMD_BIND: 929 if (inst) { 930 ret = -EBUSY; 931 goto out_put; 932 } 933 934 inst = instance_create(info->net, group_num, 935 NETLINK_CB(skb).portid, 936 sk_user_ns(NETLINK_CB(skb).sk)); 937 if (IS_ERR(inst)) { 938 ret = PTR_ERR(inst); 939 goto out; 940 } 941 break; 942 case NFULNL_CFG_CMD_UNBIND: 943 if (!inst) { 944 ret = -ENODEV; 945 goto out; 946 } 947 948 instance_destroy(log, inst); 949 goto out_put; 950 default: 951 ret = -ENOTSUPP; 952 goto out_put; 953 } 954 } else if (!inst) { 955 ret = -ENODEV; 956 goto out; 957 } 958 959 if (nfula[NFULA_CFG_MODE]) { 960 struct nfulnl_msg_config_mode *params = 961 nla_data(nfula[NFULA_CFG_MODE]); 962 963 nfulnl_set_mode(inst, params->copy_mode, 964 ntohl(params->copy_range)); 965 } 966 967 if (nfula[NFULA_CFG_TIMEOUT]) { 968 __be32 timeout = nla_get_be32(nfula[NFULA_CFG_TIMEOUT]); 969 970 nfulnl_set_timeout(inst, ntohl(timeout)); 971 } 972 973 if (nfula[NFULA_CFG_NLBUFSIZ]) { 974 __be32 nlbufsiz = nla_get_be32(nfula[NFULA_CFG_NLBUFSIZ]); 975 976 nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz)); 977 } 978 979 if (nfula[NFULA_CFG_QTHRESH]) { 980 __be32 qthresh = nla_get_be32(nfula[NFULA_CFG_QTHRESH]); 981 982 nfulnl_set_qthresh(inst, ntohl(qthresh)); 983 } 984 985 if (nfula[NFULA_CFG_FLAGS]) 986 nfulnl_set_flags(inst, flags); 987 988out_put: 989 instance_put(inst); 990out: 991 return ret; 992} 993 994static const struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = { 995 [NFULNL_MSG_PACKET] = { 996 .call = nfulnl_recv_unsupp, 997 .type = NFNL_CB_MUTEX, 998 .attr_count = NFULA_MAX, 999 }, 1000 [NFULNL_MSG_CONFIG] = { 1001 .call = nfulnl_recv_config, 1002 .type = NFNL_CB_MUTEX, 1003 .attr_count = NFULA_CFG_MAX, 1004 .policy = nfula_cfg_policy 1005 }, 1006}; 1007 1008static const struct nfnetlink_subsystem nfulnl_subsys = { 1009 .name = "log", 1010 .subsys_id = NFNL_SUBSYS_ULOG, 1011 .cb_count = NFULNL_MSG_MAX, 1012 .cb = nfulnl_cb, 1013}; 1014 1015#ifdef CONFIG_PROC_FS 1016struct iter_state { 1017 struct seq_net_private p; 1018 unsigned int bucket; 1019}; 1020 1021static struct hlist_node *get_first(struct net *net, struct iter_state *st) 1022{ 1023 struct nfnl_log_net *log; 1024 if (!st) 1025 return NULL; 1026 1027 log = nfnl_log_pernet(net); 1028 1029 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 1030 struct hlist_head *head = &log->instance_table[st->bucket]; 1031 1032 if (!hlist_empty(head)) 1033 return rcu_dereference_bh(hlist_first_rcu(head)); 1034 } 1035 return NULL; 1036} 1037 1038static struct hlist_node *get_next(struct net *net, struct iter_state *st, 1039 struct hlist_node *h) 1040{ 1041 h = rcu_dereference_bh(hlist_next_rcu(h)); 1042 while (!h) { 1043 struct nfnl_log_net *log; 1044 struct hlist_head *head; 1045 1046 if (++st->bucket >= INSTANCE_BUCKETS) 1047 return NULL; 1048 1049 log = nfnl_log_pernet(net); 1050 head = &log->instance_table[st->bucket]; 1051 h = rcu_dereference_bh(hlist_first_rcu(head)); 1052 } 1053 return h; 1054} 1055 1056static struct hlist_node *get_idx(struct net *net, struct iter_state *st, 1057 loff_t pos) 1058{ 1059 struct hlist_node *head; 1060 head = get_first(net, st); 1061 1062 if (head) 1063 while (pos && (head = get_next(net, st, head))) 1064 pos--; 1065 return pos ? NULL : head; 1066} 1067 1068static void *seq_start(struct seq_file *s, loff_t *pos) 1069 __acquires(rcu_bh) 1070{ 1071 rcu_read_lock_bh(); 1072 return get_idx(seq_file_net(s), s->private, *pos); 1073} 1074 1075static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 1076{ 1077 (*pos)++; 1078 return get_next(seq_file_net(s), s->private, v); 1079} 1080 1081static void seq_stop(struct seq_file *s, void *v) 1082 __releases(rcu_bh) 1083{ 1084 rcu_read_unlock_bh(); 1085} 1086 1087static int seq_show(struct seq_file *s, void *v) 1088{ 1089 const struct nfulnl_instance *inst = v; 1090 1091 seq_printf(s, "%5u %6u %5u %1u %5u %6u %2u\n", 1092 inst->group_num, 1093 inst->peer_portid, inst->qlen, 1094 inst->copy_mode, inst->copy_range, 1095 inst->flushtimeout, refcount_read(&inst->use)); 1096 1097 return 0; 1098} 1099 1100static const struct seq_operations nful_seq_ops = { 1101 .start = seq_start, 1102 .next = seq_next, 1103 .stop = seq_stop, 1104 .show = seq_show, 1105}; 1106#endif /* PROC_FS */ 1107 1108static int __net_init nfnl_log_net_init(struct net *net) 1109{ 1110 unsigned int i; 1111 struct nfnl_log_net *log = nfnl_log_pernet(net); 1112#ifdef CONFIG_PROC_FS 1113 struct proc_dir_entry *proc; 1114 kuid_t root_uid; 1115 kgid_t root_gid; 1116#endif 1117 1118 for (i = 0; i < INSTANCE_BUCKETS; i++) 1119 INIT_HLIST_HEAD(&log->instance_table[i]); 1120 spin_lock_init(&log->instances_lock); 1121 1122#ifdef CONFIG_PROC_FS 1123 proc = proc_create_net("nfnetlink_log", 0440, net->nf.proc_netfilter, 1124 &nful_seq_ops, sizeof(struct iter_state)); 1125 if (!proc) 1126 return -ENOMEM; 1127 1128 root_uid = make_kuid(net->user_ns, 0); 1129 root_gid = make_kgid(net->user_ns, 0); 1130 if (uid_valid(root_uid) && gid_valid(root_gid)) 1131 proc_set_user(proc, root_uid, root_gid); 1132#endif 1133 return 0; 1134} 1135 1136static void __net_exit nfnl_log_net_exit(struct net *net) 1137{ 1138 struct nfnl_log_net *log = nfnl_log_pernet(net); 1139 unsigned int i; 1140 1141#ifdef CONFIG_PROC_FS 1142 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); 1143#endif 1144 nf_log_unset(net, &nfulnl_logger); 1145 for (i = 0; i < INSTANCE_BUCKETS; i++) 1146 WARN_ON_ONCE(!hlist_empty(&log->instance_table[i])); 1147} 1148 1149static struct pernet_operations nfnl_log_net_ops = { 1150 .init = nfnl_log_net_init, 1151 .exit = nfnl_log_net_exit, 1152 .id = &nfnl_log_net_id, 1153 .size = sizeof(struct nfnl_log_net), 1154}; 1155 1156static int __init nfnetlink_log_init(void) 1157{ 1158 int status; 1159 1160 status = register_pernet_subsys(&nfnl_log_net_ops); 1161 if (status < 0) { 1162 pr_err("failed to register pernet ops\n"); 1163 goto out; 1164 } 1165 1166 netlink_register_notifier(&nfulnl_rtnl_notifier); 1167 status = nfnetlink_subsys_register(&nfulnl_subsys); 1168 if (status < 0) { 1169 pr_err("failed to create netlink socket\n"); 1170 goto cleanup_netlink_notifier; 1171 } 1172 1173 status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger); 1174 if (status < 0) { 1175 pr_err("failed to register logger\n"); 1176 goto cleanup_subsys; 1177 } 1178 1179 return status; 1180 1181cleanup_subsys: 1182 nfnetlink_subsys_unregister(&nfulnl_subsys); 1183cleanup_netlink_notifier: 1184 netlink_unregister_notifier(&nfulnl_rtnl_notifier); 1185 unregister_pernet_subsys(&nfnl_log_net_ops); 1186out: 1187 return status; 1188} 1189 1190static void __exit nfnetlink_log_fini(void) 1191{ 1192 nfnetlink_subsys_unregister(&nfulnl_subsys); 1193 netlink_unregister_notifier(&nfulnl_rtnl_notifier); 1194 unregister_pernet_subsys(&nfnl_log_net_ops); 1195 nf_log_unregister(&nfulnl_logger); 1196} 1197 1198MODULE_DESCRIPTION("netfilter userspace logging"); 1199MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 1200MODULE_LICENSE("GPL"); 1201MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG); 1202MODULE_ALIAS_NF_LOGGER(AF_INET, 1); 1203MODULE_ALIAS_NF_LOGGER(AF_INET6, 1); 1204MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1); 1205MODULE_ALIAS_NF_LOGGER(3, 1); /* NFPROTO_ARP */ 1206MODULE_ALIAS_NF_LOGGER(5, 1); /* NFPROTO_NETDEV */ 1207 1208module_init(nfnetlink_log_init); 1209module_exit(nfnetlink_log_fini);