ebtables.c (66747B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * ebtables 4 * 5 * Author: 6 * Bart De Schuymer <bdschuym@pandora.be> 7 * 8 * ebtables.c,v 2.0, July, 2002 9 * 10 * This code is strongly inspired by the iptables code which is 11 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling 12 */ 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#include <linux/kmod.h> 15#include <linux/module.h> 16#include <linux/vmalloc.h> 17#include <linux/netfilter/x_tables.h> 18#include <linux/netfilter_bridge/ebtables.h> 19#include <linux/spinlock.h> 20#include <linux/mutex.h> 21#include <linux/slab.h> 22#include <linux/uaccess.h> 23#include <linux/smp.h> 24#include <linux/cpumask.h> 25#include <linux/audit.h> 26#include <net/sock.h> 27#include <net/netns/generic.h> 28/* needed for logical [in,out]-dev filtering */ 29#include "../br_private.h" 30 31/* Each cpu has its own set of counters, so there is no need for write_lock in 32 * the softirq 33 * For reading or updating the counters, the user context needs to 34 * get a write_lock 35 */ 36 37/* The size of each set of counters is altered to get cache alignment */ 38#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) 39#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter))) 40#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ 41 COUNTER_OFFSET(n) * cpu)) 42 43struct ebt_pernet { 44 struct list_head tables; 45}; 46 47struct ebt_template { 48 struct list_head list; 49 char name[EBT_TABLE_MAXNAMELEN]; 50 struct module *owner; 51 /* called when table is needed in the given netns */ 52 int (*table_init)(struct net *net); 53}; 54 55static unsigned int ebt_pernet_id __read_mostly; 56static LIST_HEAD(template_tables); 57static DEFINE_MUTEX(ebt_mutex); 58 59#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 60static void ebt_standard_compat_from_user(void *dst, const void *src) 61{ 62 int v = *(compat_int_t *)src; 63 64 if (v >= 0) 65 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v); 66 memcpy(dst, &v, sizeof(v)); 67} 68 69static int ebt_standard_compat_to_user(void __user *dst, const void *src) 70{ 71 compat_int_t cv = *(int *)src; 72 73 if (cv >= 0) 74 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv); 75 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 76} 77#endif 78 79 80static struct xt_target ebt_standard_target = { 81 .name = "standard", 82 .revision = 0, 83 .family = NFPROTO_BRIDGE, 84 .targetsize = sizeof(int), 85#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 86 .compatsize = sizeof(compat_int_t), 87 .compat_from_user = ebt_standard_compat_from_user, 88 .compat_to_user = ebt_standard_compat_to_user, 89#endif 90}; 91 92static inline int 93ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb, 94 struct xt_action_param *par) 95{ 96 par->target = w->u.watcher; 97 par->targinfo = w->data; 98 w->u.watcher->target(skb, par); 99 /* watchers don't give a verdict */ 100 return 0; 101} 102 103static inline int 104ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb, 105 struct xt_action_param *par) 106{ 107 par->match = m->u.match; 108 par->matchinfo = m->data; 109 return !m->u.match->match(skb, par); 110} 111 112static inline int 113ebt_dev_check(const char *entry, const struct net_device *device) 114{ 115 int i = 0; 116 const char *devname; 117 118 if (*entry == '\0') 119 return 0; 120 if (!device) 121 return 1; 122 devname = device->name; 123 /* 1 is the wildcard token */ 124 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i]) 125 i++; 126 return devname[i] != entry[i] && entry[i] != 1; 127} 128 129/* process standard matches */ 130static inline int 131ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb, 132 const struct net_device *in, const struct net_device *out) 133{ 134 const struct ethhdr *h = eth_hdr(skb); 135 const struct net_bridge_port *p; 136 __be16 ethproto; 137 138 if (skb_vlan_tag_present(skb)) 139 ethproto = htons(ETH_P_8021Q); 140 else 141 ethproto = h->h_proto; 142 143 if (e->bitmask & EBT_802_3) { 144 if (NF_INVF(e, EBT_IPROTO, eth_proto_is_802_3(ethproto))) 145 return 1; 146 } else if (!(e->bitmask & EBT_NOPROTO) && 147 NF_INVF(e, EBT_IPROTO, e->ethproto != ethproto)) 148 return 1; 149 150 if (NF_INVF(e, EBT_IIN, ebt_dev_check(e->in, in))) 151 return 1; 152 if (NF_INVF(e, EBT_IOUT, ebt_dev_check(e->out, out))) 153 return 1; 154 /* rcu_read_lock()ed by nf_hook_thresh */ 155 if (in && (p = br_port_get_rcu(in)) != NULL && 156 NF_INVF(e, EBT_ILOGICALIN, 157 ebt_dev_check(e->logical_in, p->br->dev))) 158 return 1; 159 if (out && (p = br_port_get_rcu(out)) != NULL && 160 NF_INVF(e, EBT_ILOGICALOUT, 161 ebt_dev_check(e->logical_out, p->br->dev))) 162 return 1; 163 164 if (e->bitmask & EBT_SOURCEMAC) { 165 if (NF_INVF(e, EBT_ISOURCE, 166 !ether_addr_equal_masked(h->h_source, e->sourcemac, 167 e->sourcemsk))) 168 return 1; 169 } 170 if (e->bitmask & EBT_DESTMAC) { 171 if (NF_INVF(e, EBT_IDEST, 172 !ether_addr_equal_masked(h->h_dest, e->destmac, 173 e->destmsk))) 174 return 1; 175 } 176 return 0; 177} 178 179static inline 180struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry) 181{ 182 return (void *)entry + entry->next_offset; 183} 184 185static inline const struct ebt_entry_target * 186ebt_get_target_c(const struct ebt_entry *e) 187{ 188 return ebt_get_target((struct ebt_entry *)e); 189} 190 191/* Do some firewalling */ 192unsigned int ebt_do_table(void *priv, struct sk_buff *skb, 193 const struct nf_hook_state *state) 194{ 195 struct ebt_table *table = priv; 196 unsigned int hook = state->hook; 197 int i, nentries; 198 struct ebt_entry *point; 199 struct ebt_counter *counter_base, *cb_base; 200 const struct ebt_entry_target *t; 201 int verdict, sp = 0; 202 struct ebt_chainstack *cs; 203 struct ebt_entries *chaininfo; 204 const char *base; 205 const struct ebt_table_info *private; 206 struct xt_action_param acpar; 207 208 acpar.state = state; 209 acpar.hotdrop = false; 210 211 read_lock_bh(&table->lock); 212 private = table->private; 213 cb_base = COUNTER_BASE(private->counters, private->nentries, 214 smp_processor_id()); 215 if (private->chainstack) 216 cs = private->chainstack[smp_processor_id()]; 217 else 218 cs = NULL; 219 chaininfo = private->hook_entry[hook]; 220 nentries = private->hook_entry[hook]->nentries; 221 point = (struct ebt_entry *)(private->hook_entry[hook]->data); 222 counter_base = cb_base + private->hook_entry[hook]->counter_offset; 223 /* base for chain jumps */ 224 base = private->entries; 225 i = 0; 226 while (i < nentries) { 227 if (ebt_basic_match(point, skb, state->in, state->out)) 228 goto letscontinue; 229 230 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0) 231 goto letscontinue; 232 if (acpar.hotdrop) { 233 read_unlock_bh(&table->lock); 234 return NF_DROP; 235 } 236 237 ADD_COUNTER(*(counter_base + i), skb->len, 1); 238 239 /* these should only watch: not modify, nor tell us 240 * what to do with the packet 241 */ 242 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar); 243 244 t = ebt_get_target_c(point); 245 /* standard target */ 246 if (!t->u.target->target) 247 verdict = ((struct ebt_standard_target *)t)->verdict; 248 else { 249 acpar.target = t->u.target; 250 acpar.targinfo = t->data; 251 verdict = t->u.target->target(skb, &acpar); 252 } 253 if (verdict == EBT_ACCEPT) { 254 read_unlock_bh(&table->lock); 255 return NF_ACCEPT; 256 } 257 if (verdict == EBT_DROP) { 258 read_unlock_bh(&table->lock); 259 return NF_DROP; 260 } 261 if (verdict == EBT_RETURN) { 262letsreturn: 263 if (WARN(sp == 0, "RETURN on base chain")) { 264 /* act like this is EBT_CONTINUE */ 265 goto letscontinue; 266 } 267 268 sp--; 269 /* put all the local variables right */ 270 i = cs[sp].n; 271 chaininfo = cs[sp].chaininfo; 272 nentries = chaininfo->nentries; 273 point = cs[sp].e; 274 counter_base = cb_base + 275 chaininfo->counter_offset; 276 continue; 277 } 278 if (verdict == EBT_CONTINUE) 279 goto letscontinue; 280 281 if (WARN(verdict < 0, "bogus standard verdict\n")) { 282 read_unlock_bh(&table->lock); 283 return NF_DROP; 284 } 285 286 /* jump to a udc */ 287 cs[sp].n = i + 1; 288 cs[sp].chaininfo = chaininfo; 289 cs[sp].e = ebt_next_entry(point); 290 i = 0; 291 chaininfo = (struct ebt_entries *) (base + verdict); 292 293 if (WARN(chaininfo->distinguisher, "jump to non-chain\n")) { 294 read_unlock_bh(&table->lock); 295 return NF_DROP; 296 } 297 298 nentries = chaininfo->nentries; 299 point = (struct ebt_entry *)chaininfo->data; 300 counter_base = cb_base + chaininfo->counter_offset; 301 sp++; 302 continue; 303letscontinue: 304 point = ebt_next_entry(point); 305 i++; 306 } 307 308 /* I actually like this :) */ 309 if (chaininfo->policy == EBT_RETURN) 310 goto letsreturn; 311 if (chaininfo->policy == EBT_ACCEPT) { 312 read_unlock_bh(&table->lock); 313 return NF_ACCEPT; 314 } 315 read_unlock_bh(&table->lock); 316 return NF_DROP; 317} 318 319/* If it succeeds, returns element and locks mutex */ 320static inline void * 321find_inlist_lock_noload(struct net *net, const char *name, int *error, 322 struct mutex *mutex) 323{ 324 struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); 325 struct ebt_template *tmpl; 326 struct ebt_table *table; 327 328 mutex_lock(mutex); 329 list_for_each_entry(table, &ebt_net->tables, list) { 330 if (strcmp(table->name, name) == 0) 331 return table; 332 } 333 334 list_for_each_entry(tmpl, &template_tables, list) { 335 if (strcmp(name, tmpl->name) == 0) { 336 struct module *owner = tmpl->owner; 337 338 if (!try_module_get(owner)) 339 goto out; 340 341 mutex_unlock(mutex); 342 343 *error = tmpl->table_init(net); 344 if (*error) { 345 module_put(owner); 346 return NULL; 347 } 348 349 mutex_lock(mutex); 350 module_put(owner); 351 break; 352 } 353 } 354 355 list_for_each_entry(table, &ebt_net->tables, list) { 356 if (strcmp(table->name, name) == 0) 357 return table; 358 } 359 360out: 361 *error = -ENOENT; 362 mutex_unlock(mutex); 363 return NULL; 364} 365 366static void * 367find_inlist_lock(struct net *net, const char *name, const char *prefix, 368 int *error, struct mutex *mutex) 369{ 370 return try_then_request_module( 371 find_inlist_lock_noload(net, name, error, mutex), 372 "%s%s", prefix, name); 373} 374 375static inline struct ebt_table * 376find_table_lock(struct net *net, const char *name, int *error, 377 struct mutex *mutex) 378{ 379 return find_inlist_lock(net, name, "ebtable_", error, mutex); 380} 381 382static inline void ebt_free_table_info(struct ebt_table_info *info) 383{ 384 int i; 385 386 if (info->chainstack) { 387 for_each_possible_cpu(i) 388 vfree(info->chainstack[i]); 389 vfree(info->chainstack); 390 } 391} 392static inline int 393ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par, 394 unsigned int *cnt) 395{ 396 const struct ebt_entry *e = par->entryinfo; 397 struct xt_match *match; 398 size_t left = ((char *)e + e->watchers_offset) - (char *)m; 399 int ret; 400 401 if (left < sizeof(struct ebt_entry_match) || 402 left - sizeof(struct ebt_entry_match) < m->match_size) 403 return -EINVAL; 404 405 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision); 406 if (IS_ERR(match) || match->family != NFPROTO_BRIDGE) { 407 if (!IS_ERR(match)) 408 module_put(match->me); 409 request_module("ebt_%s", m->u.name); 410 match = xt_find_match(NFPROTO_BRIDGE, m->u.name, m->u.revision); 411 } 412 if (IS_ERR(match)) 413 return PTR_ERR(match); 414 m->u.match = match; 415 416 par->match = match; 417 par->matchinfo = m->data; 418 ret = xt_check_match(par, m->match_size, 419 ntohs(e->ethproto), e->invflags & EBT_IPROTO); 420 if (ret < 0) { 421 module_put(match->me); 422 return ret; 423 } 424 425 (*cnt)++; 426 return 0; 427} 428 429static inline int 430ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, 431 unsigned int *cnt) 432{ 433 const struct ebt_entry *e = par->entryinfo; 434 struct xt_target *watcher; 435 size_t left = ((char *)e + e->target_offset) - (char *)w; 436 int ret; 437 438 if (left < sizeof(struct ebt_entry_watcher) || 439 left - sizeof(struct ebt_entry_watcher) < w->watcher_size) 440 return -EINVAL; 441 442 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); 443 if (IS_ERR(watcher)) 444 return PTR_ERR(watcher); 445 446 if (watcher->family != NFPROTO_BRIDGE) { 447 module_put(watcher->me); 448 return -ENOENT; 449 } 450 451 w->u.watcher = watcher; 452 453 par->target = watcher; 454 par->targinfo = w->data; 455 ret = xt_check_target(par, w->watcher_size, 456 ntohs(e->ethproto), e->invflags & EBT_IPROTO); 457 if (ret < 0) { 458 module_put(watcher->me); 459 return ret; 460 } 461 462 (*cnt)++; 463 return 0; 464} 465 466static int ebt_verify_pointers(const struct ebt_replace *repl, 467 struct ebt_table_info *newinfo) 468{ 469 unsigned int limit = repl->entries_size; 470 unsigned int valid_hooks = repl->valid_hooks; 471 unsigned int offset = 0; 472 int i; 473 474 for (i = 0; i < NF_BR_NUMHOOKS; i++) 475 newinfo->hook_entry[i] = NULL; 476 477 newinfo->entries_size = repl->entries_size; 478 newinfo->nentries = repl->nentries; 479 480 while (offset < limit) { 481 size_t left = limit - offset; 482 struct ebt_entry *e = (void *)newinfo->entries + offset; 483 484 if (left < sizeof(unsigned int)) 485 break; 486 487 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 488 if ((valid_hooks & (1 << i)) == 0) 489 continue; 490 if ((char __user *)repl->hook_entry[i] == 491 repl->entries + offset) 492 break; 493 } 494 495 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) { 496 if (e->bitmask != 0) { 497 /* we make userspace set this right, 498 * so there is no misunderstanding 499 */ 500 return -EINVAL; 501 } 502 if (i != NF_BR_NUMHOOKS) 503 newinfo->hook_entry[i] = (struct ebt_entries *)e; 504 if (left < sizeof(struct ebt_entries)) 505 break; 506 offset += sizeof(struct ebt_entries); 507 } else { 508 if (left < sizeof(struct ebt_entry)) 509 break; 510 if (left < e->next_offset) 511 break; 512 if (e->next_offset < sizeof(struct ebt_entry)) 513 return -EINVAL; 514 offset += e->next_offset; 515 } 516 } 517 if (offset != limit) 518 return -EINVAL; 519 520 /* check if all valid hooks have a chain */ 521 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 522 if (!newinfo->hook_entry[i] && 523 (valid_hooks & (1 << i))) 524 return -EINVAL; 525 } 526 return 0; 527} 528 529/* this one is very careful, as it is the first function 530 * to parse the userspace data 531 */ 532static inline int 533ebt_check_entry_size_and_hooks(const struct ebt_entry *e, 534 const struct ebt_table_info *newinfo, 535 unsigned int *n, unsigned int *cnt, 536 unsigned int *totalcnt, unsigned int *udc_cnt) 537{ 538 int i; 539 540 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 541 if ((void *)e == (void *)newinfo->hook_entry[i]) 542 break; 543 } 544 /* beginning of a new chain 545 * if i == NF_BR_NUMHOOKS it must be a user defined chain 546 */ 547 if (i != NF_BR_NUMHOOKS || !e->bitmask) { 548 /* this checks if the previous chain has as many entries 549 * as it said it has 550 */ 551 if (*n != *cnt) 552 return -EINVAL; 553 554 if (((struct ebt_entries *)e)->policy != EBT_DROP && 555 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) { 556 /* only RETURN from udc */ 557 if (i != NF_BR_NUMHOOKS || 558 ((struct ebt_entries *)e)->policy != EBT_RETURN) 559 return -EINVAL; 560 } 561 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */ 562 (*udc_cnt)++; 563 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) 564 return -EINVAL; 565 *n = ((struct ebt_entries *)e)->nentries; 566 *cnt = 0; 567 return 0; 568 } 569 /* a plain old entry, heh */ 570 if (sizeof(struct ebt_entry) > e->watchers_offset || 571 e->watchers_offset > e->target_offset || 572 e->target_offset >= e->next_offset) 573 return -EINVAL; 574 575 /* this is not checked anywhere else */ 576 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) 577 return -EINVAL; 578 579 (*cnt)++; 580 (*totalcnt)++; 581 return 0; 582} 583 584struct ebt_cl_stack { 585 struct ebt_chainstack cs; 586 int from; 587 unsigned int hookmask; 588}; 589 590/* We need these positions to check that the jumps to a different part of the 591 * entries is a jump to the beginning of a new chain. 592 */ 593static inline int 594ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo, 595 unsigned int *n, struct ebt_cl_stack *udc) 596{ 597 int i; 598 599 /* we're only interested in chain starts */ 600 if (e->bitmask) 601 return 0; 602 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 603 if (newinfo->hook_entry[i] == (struct ebt_entries *)e) 604 break; 605 } 606 /* only care about udc */ 607 if (i != NF_BR_NUMHOOKS) 608 return 0; 609 610 udc[*n].cs.chaininfo = (struct ebt_entries *)e; 611 /* these initialisations are depended on later in check_chainloops() */ 612 udc[*n].cs.n = 0; 613 udc[*n].hookmask = 0; 614 615 (*n)++; 616 return 0; 617} 618 619static inline int 620ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i) 621{ 622 struct xt_mtdtor_param par; 623 624 if (i && (*i)-- == 0) 625 return 1; 626 627 par.net = net; 628 par.match = m->u.match; 629 par.matchinfo = m->data; 630 par.family = NFPROTO_BRIDGE; 631 if (par.match->destroy != NULL) 632 par.match->destroy(&par); 633 module_put(par.match->me); 634 return 0; 635} 636 637static inline int 638ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i) 639{ 640 struct xt_tgdtor_param par; 641 642 if (i && (*i)-- == 0) 643 return 1; 644 645 par.net = net; 646 par.target = w->u.watcher; 647 par.targinfo = w->data; 648 par.family = NFPROTO_BRIDGE; 649 if (par.target->destroy != NULL) 650 par.target->destroy(&par); 651 module_put(par.target->me); 652 return 0; 653} 654 655static inline int 656ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt) 657{ 658 struct xt_tgdtor_param par; 659 struct ebt_entry_target *t; 660 661 if (e->bitmask == 0) 662 return 0; 663 /* we're done */ 664 if (cnt && (*cnt)-- == 0) 665 return 1; 666 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL); 667 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL); 668 t = ebt_get_target(e); 669 670 par.net = net; 671 par.target = t->u.target; 672 par.targinfo = t->data; 673 par.family = NFPROTO_BRIDGE; 674 if (par.target->destroy != NULL) 675 par.target->destroy(&par); 676 module_put(par.target->me); 677 return 0; 678} 679 680static inline int 681ebt_check_entry(struct ebt_entry *e, struct net *net, 682 const struct ebt_table_info *newinfo, 683 const char *name, unsigned int *cnt, 684 struct ebt_cl_stack *cl_s, unsigned int udc_cnt) 685{ 686 struct ebt_entry_target *t; 687 struct xt_target *target; 688 unsigned int i, j, hook = 0, hookmask = 0; 689 size_t gap; 690 int ret; 691 struct xt_mtchk_param mtpar; 692 struct xt_tgchk_param tgpar; 693 694 /* don't mess with the struct ebt_entries */ 695 if (e->bitmask == 0) 696 return 0; 697 698 if (e->bitmask & ~EBT_F_MASK) 699 return -EINVAL; 700 701 if (e->invflags & ~EBT_INV_MASK) 702 return -EINVAL; 703 704 if ((e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3)) 705 return -EINVAL; 706 707 /* what hook do we belong to? */ 708 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 709 if (!newinfo->hook_entry[i]) 710 continue; 711 if ((char *)newinfo->hook_entry[i] < (char *)e) 712 hook = i; 713 else 714 break; 715 } 716 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on 717 * a base chain 718 */ 719 if (i < NF_BR_NUMHOOKS) 720 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); 721 else { 722 for (i = 0; i < udc_cnt; i++) 723 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e) 724 break; 725 if (i == 0) 726 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS); 727 else 728 hookmask = cl_s[i - 1].hookmask; 729 } 730 i = 0; 731 732 memset(&mtpar, 0, sizeof(mtpar)); 733 memset(&tgpar, 0, sizeof(tgpar)); 734 mtpar.net = tgpar.net = net; 735 mtpar.table = tgpar.table = name; 736 mtpar.entryinfo = tgpar.entryinfo = e; 737 mtpar.hook_mask = tgpar.hook_mask = hookmask; 738 mtpar.family = tgpar.family = NFPROTO_BRIDGE; 739 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i); 740 if (ret != 0) 741 goto cleanup_matches; 742 j = 0; 743 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j); 744 if (ret != 0) 745 goto cleanup_watchers; 746 t = ebt_get_target(e); 747 gap = e->next_offset - e->target_offset; 748 749 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0); 750 if (IS_ERR(target)) { 751 ret = PTR_ERR(target); 752 goto cleanup_watchers; 753 } 754 755 /* Reject UNSPEC, xtables verdicts/return values are incompatible */ 756 if (target->family != NFPROTO_BRIDGE) { 757 module_put(target->me); 758 ret = -ENOENT; 759 goto cleanup_watchers; 760 } 761 762 t->u.target = target; 763 if (t->u.target == &ebt_standard_target) { 764 if (gap < sizeof(struct ebt_standard_target)) { 765 ret = -EFAULT; 766 goto cleanup_watchers; 767 } 768 if (((struct ebt_standard_target *)t)->verdict < 769 -NUM_STANDARD_TARGETS) { 770 ret = -EFAULT; 771 goto cleanup_watchers; 772 } 773 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) { 774 module_put(t->u.target->me); 775 ret = -EFAULT; 776 goto cleanup_watchers; 777 } 778 779 tgpar.target = target; 780 tgpar.targinfo = t->data; 781 ret = xt_check_target(&tgpar, t->target_size, 782 ntohs(e->ethproto), e->invflags & EBT_IPROTO); 783 if (ret < 0) { 784 module_put(target->me); 785 goto cleanup_watchers; 786 } 787 (*cnt)++; 788 return 0; 789cleanup_watchers: 790 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j); 791cleanup_matches: 792 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i); 793 return ret; 794} 795 796/* checks for loops and sets the hook mask for udc 797 * the hook mask for udc tells us from which base chains the udc can be 798 * accessed. This mask is a parameter to the check() functions of the extensions 799 */ 800static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s, 801 unsigned int udc_cnt, unsigned int hooknr, char *base) 802{ 803 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; 804 const struct ebt_entry *e = (struct ebt_entry *)chain->data; 805 const struct ebt_entry_target *t; 806 807 while (pos < nentries || chain_nr != -1) { 808 /* end of udc, go back one 'recursion' step */ 809 if (pos == nentries) { 810 /* put back values of the time when this chain was called */ 811 e = cl_s[chain_nr].cs.e; 812 if (cl_s[chain_nr].from != -1) 813 nentries = 814 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries; 815 else 816 nentries = chain->nentries; 817 pos = cl_s[chain_nr].cs.n; 818 /* make sure we won't see a loop that isn't one */ 819 cl_s[chain_nr].cs.n = 0; 820 chain_nr = cl_s[chain_nr].from; 821 if (pos == nentries) 822 continue; 823 } 824 t = ebt_get_target_c(e); 825 if (strcmp(t->u.name, EBT_STANDARD_TARGET)) 826 goto letscontinue; 827 if (e->target_offset + sizeof(struct ebt_standard_target) > 828 e->next_offset) 829 return -1; 830 831 verdict = ((struct ebt_standard_target *)t)->verdict; 832 if (verdict >= 0) { /* jump to another chain */ 833 struct ebt_entries *hlp2 = 834 (struct ebt_entries *)(base + verdict); 835 for (i = 0; i < udc_cnt; i++) 836 if (hlp2 == cl_s[i].cs.chaininfo) 837 break; 838 /* bad destination or loop */ 839 if (i == udc_cnt) 840 return -1; 841 842 if (cl_s[i].cs.n) 843 return -1; 844 845 if (cl_s[i].hookmask & (1 << hooknr)) 846 goto letscontinue; 847 /* this can't be 0, so the loop test is correct */ 848 cl_s[i].cs.n = pos + 1; 849 pos = 0; 850 cl_s[i].cs.e = ebt_next_entry(e); 851 e = (struct ebt_entry *)(hlp2->data); 852 nentries = hlp2->nentries; 853 cl_s[i].from = chain_nr; 854 chain_nr = i; 855 /* this udc is accessible from the base chain for hooknr */ 856 cl_s[i].hookmask |= (1 << hooknr); 857 continue; 858 } 859letscontinue: 860 e = ebt_next_entry(e); 861 pos++; 862 } 863 return 0; 864} 865 866/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ 867static int translate_table(struct net *net, const char *name, 868 struct ebt_table_info *newinfo) 869{ 870 unsigned int i, j, k, udc_cnt; 871 int ret; 872 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */ 873 874 i = 0; 875 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i]) 876 i++; 877 if (i == NF_BR_NUMHOOKS) 878 return -EINVAL; 879 880 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) 881 return -EINVAL; 882 883 /* make sure chains are ordered after each other in same order 884 * as their corresponding hooks 885 */ 886 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) { 887 if (!newinfo->hook_entry[j]) 888 continue; 889 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) 890 return -EINVAL; 891 892 i = j; 893 } 894 895 /* do some early checkings and initialize some things */ 896 i = 0; /* holds the expected nr. of entries for the chain */ 897 j = 0; /* holds the up to now counted entries for the chain */ 898 k = 0; /* holds the total nr. of entries, should equal 899 * newinfo->nentries afterwards 900 */ 901 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */ 902 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 903 ebt_check_entry_size_and_hooks, newinfo, 904 &i, &j, &k, &udc_cnt); 905 906 if (ret != 0) 907 return ret; 908 909 if (i != j) 910 return -EINVAL; 911 912 if (k != newinfo->nentries) 913 return -EINVAL; 914 915 /* get the location of the udc, put them in an array 916 * while we're at it, allocate the chainstack 917 */ 918 if (udc_cnt) { 919 /* this will get free'd in do_replace()/ebt_register_table() 920 * if an error occurs 921 */ 922 newinfo->chainstack = 923 vmalloc(array_size(nr_cpu_ids, 924 sizeof(*(newinfo->chainstack)))); 925 if (!newinfo->chainstack) 926 return -ENOMEM; 927 for_each_possible_cpu(i) { 928 newinfo->chainstack[i] = 929 vmalloc_node(array_size(udc_cnt, 930 sizeof(*(newinfo->chainstack[0]))), 931 cpu_to_node(i)); 932 if (!newinfo->chainstack[i]) { 933 while (i) 934 vfree(newinfo->chainstack[--i]); 935 vfree(newinfo->chainstack); 936 newinfo->chainstack = NULL; 937 return -ENOMEM; 938 } 939 } 940 941 cl_s = vmalloc(array_size(udc_cnt, sizeof(*cl_s))); 942 if (!cl_s) 943 return -ENOMEM; 944 i = 0; /* the i'th udc */ 945 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 946 ebt_get_udc_positions, newinfo, &i, cl_s); 947 /* sanity check */ 948 if (i != udc_cnt) { 949 vfree(cl_s); 950 return -EFAULT; 951 } 952 } 953 954 /* Check for loops */ 955 for (i = 0; i < NF_BR_NUMHOOKS; i++) 956 if (newinfo->hook_entry[i]) 957 if (check_chainloops(newinfo->hook_entry[i], 958 cl_s, udc_cnt, i, newinfo->entries)) { 959 vfree(cl_s); 960 return -EINVAL; 961 } 962 963 /* we now know the following (along with E=mc²): 964 * - the nr of entries in each chain is right 965 * - the size of the allocated space is right 966 * - all valid hooks have a corresponding chain 967 * - there are no loops 968 * - wrong data can still be on the level of a single entry 969 * - could be there are jumps to places that are not the 970 * beginning of a chain. This can only occur in chains that 971 * are not accessible from any base chains, so we don't care. 972 */ 973 974 /* used to know what we need to clean up if something goes wrong */ 975 i = 0; 976 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 977 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt); 978 if (ret != 0) { 979 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 980 ebt_cleanup_entry, net, &i); 981 } 982 vfree(cl_s); 983 return ret; 984} 985 986/* called under write_lock */ 987static void get_counters(const struct ebt_counter *oldcounters, 988 struct ebt_counter *counters, unsigned int nentries) 989{ 990 int i, cpu; 991 struct ebt_counter *counter_base; 992 993 /* counters of cpu 0 */ 994 memcpy(counters, oldcounters, 995 sizeof(struct ebt_counter) * nentries); 996 997 /* add other counters to those of cpu 0 */ 998 for_each_possible_cpu(cpu) { 999 if (cpu == 0) 1000 continue; 1001 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 1002 for (i = 0; i < nentries; i++) 1003 ADD_COUNTER(counters[i], counter_base[i].bcnt, 1004 counter_base[i].pcnt); 1005 } 1006} 1007 1008static int do_replace_finish(struct net *net, struct ebt_replace *repl, 1009 struct ebt_table_info *newinfo) 1010{ 1011 int ret; 1012 struct ebt_counter *counterstmp = NULL; 1013 /* used to be able to unlock earlier */ 1014 struct ebt_table_info *table; 1015 struct ebt_table *t; 1016 1017 /* the user wants counters back 1018 * the check on the size is done later, when we have the lock 1019 */ 1020 if (repl->num_counters) { 1021 unsigned long size = repl->num_counters * sizeof(*counterstmp); 1022 counterstmp = vmalloc(size); 1023 if (!counterstmp) 1024 return -ENOMEM; 1025 } 1026 1027 newinfo->chainstack = NULL; 1028 ret = ebt_verify_pointers(repl, newinfo); 1029 if (ret != 0) 1030 goto free_counterstmp; 1031 1032 ret = translate_table(net, repl->name, newinfo); 1033 1034 if (ret != 0) 1035 goto free_counterstmp; 1036 1037 t = find_table_lock(net, repl->name, &ret, &ebt_mutex); 1038 if (!t) { 1039 ret = -ENOENT; 1040 goto free_iterate; 1041 } 1042 1043 /* the table doesn't like it */ 1044 if (t->check && (ret = t->check(newinfo, repl->valid_hooks))) 1045 goto free_unlock; 1046 1047 if (repl->num_counters && repl->num_counters != t->private->nentries) { 1048 ret = -EINVAL; 1049 goto free_unlock; 1050 } 1051 1052 /* we have the mutex lock, so no danger in reading this pointer */ 1053 table = t->private; 1054 /* make sure the table can only be rmmod'ed if it contains no rules */ 1055 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) { 1056 ret = -ENOENT; 1057 goto free_unlock; 1058 } else if (table->nentries && !newinfo->nentries) 1059 module_put(t->me); 1060 /* we need an atomic snapshot of the counters */ 1061 write_lock_bh(&t->lock); 1062 if (repl->num_counters) 1063 get_counters(t->private->counters, counterstmp, 1064 t->private->nentries); 1065 1066 t->private = newinfo; 1067 write_unlock_bh(&t->lock); 1068 mutex_unlock(&ebt_mutex); 1069 /* so, a user can change the chains while having messed up her counter 1070 * allocation. Only reason why this is done is because this way the lock 1071 * is held only once, while this doesn't bring the kernel into a 1072 * dangerous state. 1073 */ 1074 if (repl->num_counters && 1075 copy_to_user(repl->counters, counterstmp, 1076 array_size(repl->num_counters, sizeof(struct ebt_counter)))) { 1077 /* Silent error, can't fail, new table is already in place */ 1078 net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n"); 1079 } 1080 1081 /* decrease module count and free resources */ 1082 EBT_ENTRY_ITERATE(table->entries, table->entries_size, 1083 ebt_cleanup_entry, net, NULL); 1084 1085 vfree(table->entries); 1086 ebt_free_table_info(table); 1087 vfree(table); 1088 vfree(counterstmp); 1089 1090 audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, 1091 AUDIT_XT_OP_REPLACE, GFP_KERNEL); 1092 return ret; 1093 1094free_unlock: 1095 mutex_unlock(&ebt_mutex); 1096free_iterate: 1097 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 1098 ebt_cleanup_entry, net, NULL); 1099free_counterstmp: 1100 vfree(counterstmp); 1101 /* can be initialized in translate_table() */ 1102 ebt_free_table_info(newinfo); 1103 return ret; 1104} 1105 1106/* replace the table */ 1107static int do_replace(struct net *net, sockptr_t arg, unsigned int len) 1108{ 1109 int ret, countersize; 1110 struct ebt_table_info *newinfo; 1111 struct ebt_replace tmp; 1112 1113 if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) 1114 return -EFAULT; 1115 1116 if (len != sizeof(tmp) + tmp.entries_size) 1117 return -EINVAL; 1118 1119 if (tmp.entries_size == 0) 1120 return -EINVAL; 1121 1122 /* overflow check */ 1123 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / 1124 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) 1125 return -ENOMEM; 1126 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 1127 return -ENOMEM; 1128 1129 tmp.name[sizeof(tmp.name) - 1] = 0; 1130 1131 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1132 newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT); 1133 if (!newinfo) 1134 return -ENOMEM; 1135 1136 if (countersize) 1137 memset(newinfo->counters, 0, countersize); 1138 1139 newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT); 1140 if (!newinfo->entries) { 1141 ret = -ENOMEM; 1142 goto free_newinfo; 1143 } 1144 if (copy_from_user( 1145 newinfo->entries, tmp.entries, tmp.entries_size) != 0) { 1146 ret = -EFAULT; 1147 goto free_entries; 1148 } 1149 1150 ret = do_replace_finish(net, &tmp, newinfo); 1151 if (ret == 0) 1152 return ret; 1153free_entries: 1154 vfree(newinfo->entries); 1155free_newinfo: 1156 vfree(newinfo); 1157 return ret; 1158} 1159 1160static void __ebt_unregister_table(struct net *net, struct ebt_table *table) 1161{ 1162 mutex_lock(&ebt_mutex); 1163 list_del(&table->list); 1164 mutex_unlock(&ebt_mutex); 1165 audit_log_nfcfg(table->name, AF_BRIDGE, table->private->nentries, 1166 AUDIT_XT_OP_UNREGISTER, GFP_KERNEL); 1167 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, 1168 ebt_cleanup_entry, net, NULL); 1169 if (table->private->nentries) 1170 module_put(table->me); 1171 vfree(table->private->entries); 1172 ebt_free_table_info(table->private); 1173 vfree(table->private); 1174 kfree(table->ops); 1175 kfree(table); 1176} 1177 1178int ebt_register_table(struct net *net, const struct ebt_table *input_table, 1179 const struct nf_hook_ops *template_ops) 1180{ 1181 struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); 1182 struct ebt_table_info *newinfo; 1183 struct ebt_table *t, *table; 1184 struct nf_hook_ops *ops; 1185 unsigned int num_ops; 1186 struct ebt_replace_kernel *repl; 1187 int ret, i, countersize; 1188 void *p; 1189 1190 if (input_table == NULL || (repl = input_table->table) == NULL || 1191 repl->entries == NULL || repl->entries_size == 0 || 1192 repl->counters != NULL || input_table->private != NULL) 1193 return -EINVAL; 1194 1195 /* Don't add one table to multiple lists. */ 1196 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL); 1197 if (!table) { 1198 ret = -ENOMEM; 1199 goto out; 1200 } 1201 1202 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids; 1203 newinfo = vmalloc(sizeof(*newinfo) + countersize); 1204 ret = -ENOMEM; 1205 if (!newinfo) 1206 goto free_table; 1207 1208 p = vmalloc(repl->entries_size); 1209 if (!p) 1210 goto free_newinfo; 1211 1212 memcpy(p, repl->entries, repl->entries_size); 1213 newinfo->entries = p; 1214 1215 newinfo->entries_size = repl->entries_size; 1216 newinfo->nentries = repl->nentries; 1217 1218 if (countersize) 1219 memset(newinfo->counters, 0, countersize); 1220 1221 /* fill in newinfo and parse the entries */ 1222 newinfo->chainstack = NULL; 1223 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 1224 if ((repl->valid_hooks & (1 << i)) == 0) 1225 newinfo->hook_entry[i] = NULL; 1226 else 1227 newinfo->hook_entry[i] = p + 1228 ((char *)repl->hook_entry[i] - repl->entries); 1229 } 1230 ret = translate_table(net, repl->name, newinfo); 1231 if (ret != 0) 1232 goto free_chainstack; 1233 1234 if (table->check && table->check(newinfo, table->valid_hooks)) { 1235 ret = -EINVAL; 1236 goto free_chainstack; 1237 } 1238 1239 table->private = newinfo; 1240 rwlock_init(&table->lock); 1241 mutex_lock(&ebt_mutex); 1242 list_for_each_entry(t, &ebt_net->tables, list) { 1243 if (strcmp(t->name, table->name) == 0) { 1244 ret = -EEXIST; 1245 goto free_unlock; 1246 } 1247 } 1248 1249 /* Hold a reference count if the chains aren't empty */ 1250 if (newinfo->nentries && !try_module_get(table->me)) { 1251 ret = -ENOENT; 1252 goto free_unlock; 1253 } 1254 1255 num_ops = hweight32(table->valid_hooks); 1256 if (num_ops == 0) { 1257 ret = -EINVAL; 1258 goto free_unlock; 1259 } 1260 1261 ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL); 1262 if (!ops) { 1263 ret = -ENOMEM; 1264 if (newinfo->nentries) 1265 module_put(table->me); 1266 goto free_unlock; 1267 } 1268 1269 for (i = 0; i < num_ops; i++) 1270 ops[i].priv = table; 1271 1272 list_add(&table->list, &ebt_net->tables); 1273 mutex_unlock(&ebt_mutex); 1274 1275 table->ops = ops; 1276 ret = nf_register_net_hooks(net, ops, num_ops); 1277 if (ret) 1278 __ebt_unregister_table(net, table); 1279 1280 audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, 1281 AUDIT_XT_OP_REGISTER, GFP_KERNEL); 1282 return ret; 1283free_unlock: 1284 mutex_unlock(&ebt_mutex); 1285free_chainstack: 1286 ebt_free_table_info(newinfo); 1287 vfree(newinfo->entries); 1288free_newinfo: 1289 vfree(newinfo); 1290free_table: 1291 kfree(table); 1292out: 1293 return ret; 1294} 1295 1296int ebt_register_template(const struct ebt_table *t, int (*table_init)(struct net *net)) 1297{ 1298 struct ebt_template *tmpl; 1299 1300 mutex_lock(&ebt_mutex); 1301 list_for_each_entry(tmpl, &template_tables, list) { 1302 if (WARN_ON_ONCE(strcmp(t->name, tmpl->name) == 0)) { 1303 mutex_unlock(&ebt_mutex); 1304 return -EEXIST; 1305 } 1306 } 1307 1308 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); 1309 if (!tmpl) { 1310 mutex_unlock(&ebt_mutex); 1311 return -ENOMEM; 1312 } 1313 1314 tmpl->table_init = table_init; 1315 strscpy(tmpl->name, t->name, sizeof(tmpl->name)); 1316 tmpl->owner = t->me; 1317 list_add(&tmpl->list, &template_tables); 1318 1319 mutex_unlock(&ebt_mutex); 1320 return 0; 1321} 1322EXPORT_SYMBOL(ebt_register_template); 1323 1324void ebt_unregister_template(const struct ebt_table *t) 1325{ 1326 struct ebt_template *tmpl; 1327 1328 mutex_lock(&ebt_mutex); 1329 list_for_each_entry(tmpl, &template_tables, list) { 1330 if (strcmp(t->name, tmpl->name)) 1331 continue; 1332 1333 list_del(&tmpl->list); 1334 mutex_unlock(&ebt_mutex); 1335 kfree(tmpl); 1336 return; 1337 } 1338 1339 mutex_unlock(&ebt_mutex); 1340 WARN_ON_ONCE(1); 1341} 1342EXPORT_SYMBOL(ebt_unregister_template); 1343 1344static struct ebt_table *__ebt_find_table(struct net *net, const char *name) 1345{ 1346 struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); 1347 struct ebt_table *t; 1348 1349 mutex_lock(&ebt_mutex); 1350 1351 list_for_each_entry(t, &ebt_net->tables, list) { 1352 if (strcmp(t->name, name) == 0) { 1353 mutex_unlock(&ebt_mutex); 1354 return t; 1355 } 1356 } 1357 1358 mutex_unlock(&ebt_mutex); 1359 return NULL; 1360} 1361 1362void ebt_unregister_table_pre_exit(struct net *net, const char *name) 1363{ 1364 struct ebt_table *table = __ebt_find_table(net, name); 1365 1366 if (table) 1367 nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks)); 1368} 1369EXPORT_SYMBOL(ebt_unregister_table_pre_exit); 1370 1371void ebt_unregister_table(struct net *net, const char *name) 1372{ 1373 struct ebt_table *table = __ebt_find_table(net, name); 1374 1375 if (table) 1376 __ebt_unregister_table(net, table); 1377} 1378 1379/* userspace just supplied us with counters */ 1380static int do_update_counters(struct net *net, const char *name, 1381 struct ebt_counter __user *counters, 1382 unsigned int num_counters, unsigned int len) 1383{ 1384 int i, ret; 1385 struct ebt_counter *tmp; 1386 struct ebt_table *t; 1387 1388 if (num_counters == 0) 1389 return -EINVAL; 1390 1391 tmp = vmalloc(array_size(num_counters, sizeof(*tmp))); 1392 if (!tmp) 1393 return -ENOMEM; 1394 1395 t = find_table_lock(net, name, &ret, &ebt_mutex); 1396 if (!t) 1397 goto free_tmp; 1398 1399 if (num_counters != t->private->nentries) { 1400 ret = -EINVAL; 1401 goto unlock_mutex; 1402 } 1403 1404 if (copy_from_user(tmp, counters, 1405 array_size(num_counters, sizeof(*counters)))) { 1406 ret = -EFAULT; 1407 goto unlock_mutex; 1408 } 1409 1410 /* we want an atomic add of the counters */ 1411 write_lock_bh(&t->lock); 1412 1413 /* we add to the counters of the first cpu */ 1414 for (i = 0; i < num_counters; i++) 1415 ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt); 1416 1417 write_unlock_bh(&t->lock); 1418 ret = 0; 1419unlock_mutex: 1420 mutex_unlock(&ebt_mutex); 1421free_tmp: 1422 vfree(tmp); 1423 return ret; 1424} 1425 1426static int update_counters(struct net *net, sockptr_t arg, unsigned int len) 1427{ 1428 struct ebt_replace hlp; 1429 1430 if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) 1431 return -EFAULT; 1432 1433 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) 1434 return -EINVAL; 1435 1436 return do_update_counters(net, hlp.name, hlp.counters, 1437 hlp.num_counters, len); 1438} 1439 1440static inline int ebt_obj_to_user(char __user *um, const char *_name, 1441 const char *data, int entrysize, 1442 int usersize, int datasize, u8 revision) 1443{ 1444 char name[EBT_EXTENSION_MAXNAMELEN] = {0}; 1445 1446 /* ebtables expects 31 bytes long names but xt_match names are 29 bytes 1447 * long. Copy 29 bytes and fill remaining bytes with zeroes. 1448 */ 1449 strlcpy(name, _name, sizeof(name)); 1450 if (copy_to_user(um, name, EBT_EXTENSION_MAXNAMELEN) || 1451 put_user(revision, (u8 __user *)(um + EBT_EXTENSION_MAXNAMELEN)) || 1452 put_user(datasize, (int __user *)(um + EBT_EXTENSION_MAXNAMELEN + 1)) || 1453 xt_data_to_user(um + entrysize, data, usersize, datasize, 1454 XT_ALIGN(datasize))) 1455 return -EFAULT; 1456 1457 return 0; 1458} 1459 1460static inline int ebt_match_to_user(const struct ebt_entry_match *m, 1461 const char *base, char __user *ubase) 1462{ 1463 return ebt_obj_to_user(ubase + ((char *)m - base), 1464 m->u.match->name, m->data, sizeof(*m), 1465 m->u.match->usersize, m->match_size, 1466 m->u.match->revision); 1467} 1468 1469static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w, 1470 const char *base, char __user *ubase) 1471{ 1472 return ebt_obj_to_user(ubase + ((char *)w - base), 1473 w->u.watcher->name, w->data, sizeof(*w), 1474 w->u.watcher->usersize, w->watcher_size, 1475 w->u.watcher->revision); 1476} 1477 1478static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base, 1479 char __user *ubase) 1480{ 1481 int ret; 1482 char __user *hlp; 1483 const struct ebt_entry_target *t; 1484 1485 if (e->bitmask == 0) { 1486 /* special case !EBT_ENTRY_OR_ENTRIES */ 1487 if (copy_to_user(ubase + ((char *)e - base), e, 1488 sizeof(struct ebt_entries))) 1489 return -EFAULT; 1490 return 0; 1491 } 1492 1493 if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e))) 1494 return -EFAULT; 1495 1496 hlp = ubase + (((char *)e + e->target_offset) - base); 1497 t = ebt_get_target_c(e); 1498 1499 ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase); 1500 if (ret != 0) 1501 return ret; 1502 ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase); 1503 if (ret != 0) 1504 return ret; 1505 ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t), 1506 t->u.target->usersize, t->target_size, 1507 t->u.target->revision); 1508 if (ret != 0) 1509 return ret; 1510 1511 return 0; 1512} 1513 1514static int copy_counters_to_user(struct ebt_table *t, 1515 const struct ebt_counter *oldcounters, 1516 void __user *user, unsigned int num_counters, 1517 unsigned int nentries) 1518{ 1519 struct ebt_counter *counterstmp; 1520 int ret = 0; 1521 1522 /* userspace might not need the counters */ 1523 if (num_counters == 0) 1524 return 0; 1525 1526 if (num_counters != nentries) 1527 return -EINVAL; 1528 1529 counterstmp = vmalloc(array_size(nentries, sizeof(*counterstmp))); 1530 if (!counterstmp) 1531 return -ENOMEM; 1532 1533 write_lock_bh(&t->lock); 1534 get_counters(oldcounters, counterstmp, nentries); 1535 write_unlock_bh(&t->lock); 1536 1537 if (copy_to_user(user, counterstmp, 1538 array_size(nentries, sizeof(struct ebt_counter)))) 1539 ret = -EFAULT; 1540 vfree(counterstmp); 1541 return ret; 1542} 1543 1544/* called with ebt_mutex locked */ 1545static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1546 const int *len, int cmd) 1547{ 1548 struct ebt_replace tmp; 1549 const struct ebt_counter *oldcounters; 1550 unsigned int entries_size, nentries; 1551 int ret; 1552 char *entries; 1553 1554 if (cmd == EBT_SO_GET_ENTRIES) { 1555 entries_size = t->private->entries_size; 1556 nentries = t->private->nentries; 1557 entries = t->private->entries; 1558 oldcounters = t->private->counters; 1559 } else { 1560 entries_size = t->table->entries_size; 1561 nentries = t->table->nentries; 1562 entries = t->table->entries; 1563 oldcounters = t->table->counters; 1564 } 1565 1566 if (copy_from_user(&tmp, user, sizeof(tmp))) 1567 return -EFAULT; 1568 1569 if (*len != sizeof(struct ebt_replace) + entries_size + 1570 (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0)) 1571 return -EINVAL; 1572 1573 if (tmp.nentries != nentries) 1574 return -EINVAL; 1575 1576 if (tmp.entries_size != entries_size) 1577 return -EINVAL; 1578 1579 ret = copy_counters_to_user(t, oldcounters, tmp.counters, 1580 tmp.num_counters, nentries); 1581 if (ret) 1582 return ret; 1583 1584 /* set the match/watcher/target names right */ 1585 return EBT_ENTRY_ITERATE(entries, entries_size, 1586 ebt_entry_to_user, entries, tmp.entries); 1587} 1588 1589#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 1590/* 32 bit-userspace compatibility definitions. */ 1591struct compat_ebt_replace { 1592 char name[EBT_TABLE_MAXNAMELEN]; 1593 compat_uint_t valid_hooks; 1594 compat_uint_t nentries; 1595 compat_uint_t entries_size; 1596 /* start of the chains */ 1597 compat_uptr_t hook_entry[NF_BR_NUMHOOKS]; 1598 /* nr of counters userspace expects back */ 1599 compat_uint_t num_counters; 1600 /* where the kernel will put the old counters. */ 1601 compat_uptr_t counters; 1602 compat_uptr_t entries; 1603}; 1604 1605/* struct ebt_entry_match, _target and _watcher have same layout */ 1606struct compat_ebt_entry_mwt { 1607 union { 1608 struct { 1609 char name[EBT_EXTENSION_MAXNAMELEN]; 1610 u8 revision; 1611 }; 1612 compat_uptr_t ptr; 1613 } u; 1614 compat_uint_t match_size; 1615 compat_uint_t data[] __aligned(__alignof__(struct compat_ebt_replace)); 1616}; 1617 1618/* account for possible padding between match_size and ->data */ 1619static int ebt_compat_entry_padsize(void) 1620{ 1621 BUILD_BUG_ON(sizeof(struct ebt_entry_match) < 1622 sizeof(struct compat_ebt_entry_mwt)); 1623 return (int) sizeof(struct ebt_entry_match) - 1624 sizeof(struct compat_ebt_entry_mwt); 1625} 1626 1627static int ebt_compat_match_offset(const struct xt_match *match, 1628 unsigned int userlen) 1629{ 1630 /* ebt_among needs special handling. The kernel .matchsize is 1631 * set to -1 at registration time; at runtime an EBT_ALIGN()ed 1632 * value is expected. 1633 * Example: userspace sends 4500, ebt_among.c wants 4504. 1634 */ 1635 if (unlikely(match->matchsize == -1)) 1636 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen); 1637 return xt_compat_match_offset(match); 1638} 1639 1640static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr, 1641 unsigned int *size) 1642{ 1643 const struct xt_match *match = m->u.match; 1644 struct compat_ebt_entry_mwt __user *cm = *dstptr; 1645 int off = ebt_compat_match_offset(match, m->match_size); 1646 compat_uint_t msize = m->match_size - off; 1647 1648 if (WARN_ON(off >= m->match_size)) 1649 return -EINVAL; 1650 1651 if (copy_to_user(cm->u.name, match->name, strlen(match->name) + 1) || 1652 put_user(match->revision, &cm->u.revision) || 1653 put_user(msize, &cm->match_size)) 1654 return -EFAULT; 1655 1656 if (match->compat_to_user) { 1657 if (match->compat_to_user(cm->data, m->data)) 1658 return -EFAULT; 1659 } else { 1660 if (xt_data_to_user(cm->data, m->data, match->usersize, msize, 1661 COMPAT_XT_ALIGN(msize))) 1662 return -EFAULT; 1663 } 1664 1665 *size -= ebt_compat_entry_padsize() + off; 1666 *dstptr = cm->data; 1667 *dstptr += msize; 1668 return 0; 1669} 1670 1671static int compat_target_to_user(struct ebt_entry_target *t, 1672 void __user **dstptr, 1673 unsigned int *size) 1674{ 1675 const struct xt_target *target = t->u.target; 1676 struct compat_ebt_entry_mwt __user *cm = *dstptr; 1677 int off = xt_compat_target_offset(target); 1678 compat_uint_t tsize = t->target_size - off; 1679 1680 if (WARN_ON(off >= t->target_size)) 1681 return -EINVAL; 1682 1683 if (copy_to_user(cm->u.name, target->name, strlen(target->name) + 1) || 1684 put_user(target->revision, &cm->u.revision) || 1685 put_user(tsize, &cm->match_size)) 1686 return -EFAULT; 1687 1688 if (target->compat_to_user) { 1689 if (target->compat_to_user(cm->data, t->data)) 1690 return -EFAULT; 1691 } else { 1692 if (xt_data_to_user(cm->data, t->data, target->usersize, tsize, 1693 COMPAT_XT_ALIGN(tsize))) 1694 return -EFAULT; 1695 } 1696 1697 *size -= ebt_compat_entry_padsize() + off; 1698 *dstptr = cm->data; 1699 *dstptr += tsize; 1700 return 0; 1701} 1702 1703static int compat_watcher_to_user(struct ebt_entry_watcher *w, 1704 void __user **dstptr, 1705 unsigned int *size) 1706{ 1707 return compat_target_to_user((struct ebt_entry_target *)w, 1708 dstptr, size); 1709} 1710 1711static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr, 1712 unsigned int *size) 1713{ 1714 struct ebt_entry_target *t; 1715 struct ebt_entry __user *ce; 1716 u32 watchers_offset, target_offset, next_offset; 1717 compat_uint_t origsize; 1718 int ret; 1719 1720 if (e->bitmask == 0) { 1721 if (*size < sizeof(struct ebt_entries)) 1722 return -EINVAL; 1723 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries))) 1724 return -EFAULT; 1725 1726 *dstptr += sizeof(struct ebt_entries); 1727 *size -= sizeof(struct ebt_entries); 1728 return 0; 1729 } 1730 1731 if (*size < sizeof(*ce)) 1732 return -EINVAL; 1733 1734 ce = *dstptr; 1735 if (copy_to_user(ce, e, sizeof(*ce))) 1736 return -EFAULT; 1737 1738 origsize = *size; 1739 *dstptr += sizeof(*ce); 1740 1741 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size); 1742 if (ret) 1743 return ret; 1744 watchers_offset = e->watchers_offset - (origsize - *size); 1745 1746 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size); 1747 if (ret) 1748 return ret; 1749 target_offset = e->target_offset - (origsize - *size); 1750 1751 t = ebt_get_target(e); 1752 1753 ret = compat_target_to_user(t, dstptr, size); 1754 if (ret) 1755 return ret; 1756 next_offset = e->next_offset - (origsize - *size); 1757 1758 if (put_user(watchers_offset, &ce->watchers_offset) || 1759 put_user(target_offset, &ce->target_offset) || 1760 put_user(next_offset, &ce->next_offset)) 1761 return -EFAULT; 1762 1763 *size -= sizeof(*ce); 1764 return 0; 1765} 1766 1767static int compat_calc_match(struct ebt_entry_match *m, int *off) 1768{ 1769 *off += ebt_compat_match_offset(m->u.match, m->match_size); 1770 *off += ebt_compat_entry_padsize(); 1771 return 0; 1772} 1773 1774static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off) 1775{ 1776 *off += xt_compat_target_offset(w->u.watcher); 1777 *off += ebt_compat_entry_padsize(); 1778 return 0; 1779} 1780 1781static int compat_calc_entry(const struct ebt_entry *e, 1782 const struct ebt_table_info *info, 1783 const void *base, 1784 struct compat_ebt_replace *newinfo) 1785{ 1786 const struct ebt_entry_target *t; 1787 unsigned int entry_offset; 1788 int off, ret, i; 1789 1790 if (e->bitmask == 0) 1791 return 0; 1792 1793 off = 0; 1794 entry_offset = (void *)e - base; 1795 1796 EBT_MATCH_ITERATE(e, compat_calc_match, &off); 1797 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off); 1798 1799 t = ebt_get_target_c(e); 1800 1801 off += xt_compat_target_offset(t->u.target); 1802 off += ebt_compat_entry_padsize(); 1803 1804 newinfo->entries_size -= off; 1805 1806 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off); 1807 if (ret) 1808 return ret; 1809 1810 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 1811 const void *hookptr = info->hook_entry[i]; 1812 if (info->hook_entry[i] && 1813 (e < (struct ebt_entry *)(base - hookptr))) { 1814 newinfo->hook_entry[i] -= off; 1815 pr_debug("0x%08X -> 0x%08X\n", 1816 newinfo->hook_entry[i] + off, 1817 newinfo->hook_entry[i]); 1818 } 1819 } 1820 1821 return 0; 1822} 1823 1824static int ebt_compat_init_offsets(unsigned int number) 1825{ 1826 if (number > INT_MAX) 1827 return -EINVAL; 1828 1829 /* also count the base chain policies */ 1830 number += NF_BR_NUMHOOKS; 1831 1832 return xt_compat_init_offsets(NFPROTO_BRIDGE, number); 1833} 1834 1835static int compat_table_info(const struct ebt_table_info *info, 1836 struct compat_ebt_replace *newinfo) 1837{ 1838 unsigned int size = info->entries_size; 1839 const void *entries = info->entries; 1840 int ret; 1841 1842 newinfo->entries_size = size; 1843 ret = ebt_compat_init_offsets(info->nentries); 1844 if (ret) 1845 return ret; 1846 1847 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1848 entries, newinfo); 1849} 1850 1851static int compat_copy_everything_to_user(struct ebt_table *t, 1852 void __user *user, int *len, int cmd) 1853{ 1854 struct compat_ebt_replace repl, tmp; 1855 struct ebt_counter *oldcounters; 1856 struct ebt_table_info tinfo; 1857 int ret; 1858 void __user *pos; 1859 1860 memset(&tinfo, 0, sizeof(tinfo)); 1861 1862 if (cmd == EBT_SO_GET_ENTRIES) { 1863 tinfo.entries_size = t->private->entries_size; 1864 tinfo.nentries = t->private->nentries; 1865 tinfo.entries = t->private->entries; 1866 oldcounters = t->private->counters; 1867 } else { 1868 tinfo.entries_size = t->table->entries_size; 1869 tinfo.nentries = t->table->nentries; 1870 tinfo.entries = t->table->entries; 1871 oldcounters = t->table->counters; 1872 } 1873 1874 if (copy_from_user(&tmp, user, sizeof(tmp))) 1875 return -EFAULT; 1876 1877 if (tmp.nentries != tinfo.nentries || 1878 (tmp.num_counters && tmp.num_counters != tinfo.nentries)) 1879 return -EINVAL; 1880 1881 memcpy(&repl, &tmp, sizeof(repl)); 1882 if (cmd == EBT_SO_GET_ENTRIES) 1883 ret = compat_table_info(t->private, &repl); 1884 else 1885 ret = compat_table_info(&tinfo, &repl); 1886 if (ret) 1887 return ret; 1888 1889 if (*len != sizeof(tmp) + repl.entries_size + 1890 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) { 1891 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n", 1892 *len, tinfo.entries_size, repl.entries_size); 1893 return -EINVAL; 1894 } 1895 1896 /* userspace might not need the counters */ 1897 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters), 1898 tmp.num_counters, tinfo.nentries); 1899 if (ret) 1900 return ret; 1901 1902 pos = compat_ptr(tmp.entries); 1903 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size, 1904 compat_copy_entry_to_user, &pos, &tmp.entries_size); 1905} 1906 1907struct ebt_entries_buf_state { 1908 char *buf_kern_start; /* kernel buffer to copy (translated) data to */ 1909 u32 buf_kern_len; /* total size of kernel buffer */ 1910 u32 buf_kern_offset; /* amount of data copied so far */ 1911 u32 buf_user_offset; /* read position in userspace buffer */ 1912}; 1913 1914static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz) 1915{ 1916 state->buf_kern_offset += sz; 1917 return state->buf_kern_offset >= sz ? 0 : -EINVAL; 1918} 1919 1920static int ebt_buf_add(struct ebt_entries_buf_state *state, 1921 const void *data, unsigned int sz) 1922{ 1923 if (state->buf_kern_start == NULL) 1924 goto count_only; 1925 1926 if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) 1927 return -EINVAL; 1928 1929 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); 1930 1931 count_only: 1932 state->buf_user_offset += sz; 1933 return ebt_buf_count(state, sz); 1934} 1935 1936static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz) 1937{ 1938 char *b = state->buf_kern_start; 1939 1940 if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) 1941 return -EINVAL; 1942 1943 if (b != NULL && sz > 0) 1944 memset(b + state->buf_kern_offset, 0, sz); 1945 /* do not adjust ->buf_user_offset here, we added kernel-side padding */ 1946 return ebt_buf_count(state, sz); 1947} 1948 1949enum compat_mwt { 1950 EBT_COMPAT_MATCH, 1951 EBT_COMPAT_WATCHER, 1952 EBT_COMPAT_TARGET, 1953}; 1954 1955static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt, 1956 enum compat_mwt compat_mwt, 1957 struct ebt_entries_buf_state *state, 1958 const unsigned char *base) 1959{ 1960 char name[EBT_EXTENSION_MAXNAMELEN]; 1961 struct xt_match *match; 1962 struct xt_target *wt; 1963 void *dst = NULL; 1964 int off, pad = 0; 1965 unsigned int size_kern, match_size = mwt->match_size; 1966 1967 if (strscpy(name, mwt->u.name, sizeof(name)) < 0) 1968 return -EINVAL; 1969 1970 if (state->buf_kern_start) 1971 dst = state->buf_kern_start + state->buf_kern_offset; 1972 1973 switch (compat_mwt) { 1974 case EBT_COMPAT_MATCH: 1975 match = xt_request_find_match(NFPROTO_BRIDGE, name, 1976 mwt->u.revision); 1977 if (IS_ERR(match)) 1978 return PTR_ERR(match); 1979 1980 off = ebt_compat_match_offset(match, match_size); 1981 if (dst) { 1982 if (match->compat_from_user) 1983 match->compat_from_user(dst, mwt->data); 1984 else 1985 memcpy(dst, mwt->data, match_size); 1986 } 1987 1988 size_kern = match->matchsize; 1989 if (unlikely(size_kern == -1)) 1990 size_kern = match_size; 1991 module_put(match->me); 1992 break; 1993 case EBT_COMPAT_WATCHER: 1994 case EBT_COMPAT_TARGET: 1995 wt = xt_request_find_target(NFPROTO_BRIDGE, name, 1996 mwt->u.revision); 1997 if (IS_ERR(wt)) 1998 return PTR_ERR(wt); 1999 off = xt_compat_target_offset(wt); 2000 2001 if (dst) { 2002 if (wt->compat_from_user) 2003 wt->compat_from_user(dst, mwt->data); 2004 else 2005 memcpy(dst, mwt->data, match_size); 2006 } 2007 2008 size_kern = wt->targetsize; 2009 module_put(wt->me); 2010 break; 2011 2012 default: 2013 return -EINVAL; 2014 } 2015 2016 state->buf_kern_offset += match_size + off; 2017 state->buf_user_offset += match_size; 2018 pad = XT_ALIGN(size_kern) - size_kern; 2019 2020 if (pad > 0 && dst) { 2021 if (WARN_ON(state->buf_kern_len <= pad)) 2022 return -EINVAL; 2023 if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) 2024 return -EINVAL; 2025 memset(dst + size_kern, 0, pad); 2026 } 2027 return off + match_size; 2028} 2029 2030/* return size of all matches, watchers or target, including necessary 2031 * alignment and padding. 2032 */ 2033static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32, 2034 unsigned int size_left, enum compat_mwt type, 2035 struct ebt_entries_buf_state *state, const void *base) 2036{ 2037 const char *buf = (const char *)match32; 2038 int growth = 0; 2039 2040 if (size_left == 0) 2041 return 0; 2042 2043 do { 2044 struct ebt_entry_match *match_kern; 2045 int ret; 2046 2047 if (size_left < sizeof(*match32)) 2048 return -EINVAL; 2049 2050 match_kern = (struct ebt_entry_match *) state->buf_kern_start; 2051 if (match_kern) { 2052 char *tmp; 2053 tmp = state->buf_kern_start + state->buf_kern_offset; 2054 match_kern = (struct ebt_entry_match *) tmp; 2055 } 2056 ret = ebt_buf_add(state, buf, sizeof(*match32)); 2057 if (ret < 0) 2058 return ret; 2059 size_left -= sizeof(*match32); 2060 2061 /* add padding before match->data (if any) */ 2062 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize()); 2063 if (ret < 0) 2064 return ret; 2065 2066 if (match32->match_size > size_left) 2067 return -EINVAL; 2068 2069 size_left -= match32->match_size; 2070 2071 ret = compat_mtw_from_user(match32, type, state, base); 2072 if (ret < 0) 2073 return ret; 2074 2075 if (WARN_ON(ret < match32->match_size)) 2076 return -EINVAL; 2077 growth += ret - match32->match_size; 2078 growth += ebt_compat_entry_padsize(); 2079 2080 buf += sizeof(*match32); 2081 buf += match32->match_size; 2082 2083 if (match_kern) 2084 match_kern->match_size = ret; 2085 2086 match32 = (struct compat_ebt_entry_mwt *) buf; 2087 } while (size_left); 2088 2089 return growth; 2090} 2091 2092/* called for all ebt_entry structures. */ 2093static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base, 2094 unsigned int *total, 2095 struct ebt_entries_buf_state *state) 2096{ 2097 unsigned int i, j, startoff, next_expected_off, new_offset = 0; 2098 /* stores match/watchers/targets & offset of next struct ebt_entry: */ 2099 unsigned int offsets[4]; 2100 unsigned int *offsets_update = NULL; 2101 int ret; 2102 char *buf_start; 2103 2104 if (*total < sizeof(struct ebt_entries)) 2105 return -EINVAL; 2106 2107 if (!entry->bitmask) { 2108 *total -= sizeof(struct ebt_entries); 2109 return ebt_buf_add(state, entry, sizeof(struct ebt_entries)); 2110 } 2111 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry)) 2112 return -EINVAL; 2113 2114 startoff = state->buf_user_offset; 2115 /* pull in most part of ebt_entry, it does not need to be changed. */ 2116 ret = ebt_buf_add(state, entry, 2117 offsetof(struct ebt_entry, watchers_offset)); 2118 if (ret < 0) 2119 return ret; 2120 2121 offsets[0] = sizeof(struct ebt_entry); /* matches come first */ 2122 memcpy(&offsets[1], &entry->watchers_offset, 2123 sizeof(offsets) - sizeof(offsets[0])); 2124 2125 if (state->buf_kern_start) { 2126 buf_start = state->buf_kern_start + state->buf_kern_offset; 2127 offsets_update = (unsigned int *) buf_start; 2128 } 2129 ret = ebt_buf_add(state, &offsets[1], 2130 sizeof(offsets) - sizeof(offsets[0])); 2131 if (ret < 0) 2132 return ret; 2133 buf_start = (char *) entry; 2134 /* 0: matches offset, always follows ebt_entry. 2135 * 1: watchers offset, from ebt_entry structure 2136 * 2: target offset, from ebt_entry structure 2137 * 3: next ebt_entry offset, from ebt_entry structure 2138 * 2139 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2140 */ 2141 for (i = 0; i < 4 ; ++i) { 2142 if (offsets[i] > *total) 2143 return -EINVAL; 2144 2145 if (i < 3 && offsets[i] == *total) 2146 return -EINVAL; 2147 2148 if (i == 0) 2149 continue; 2150 if (offsets[i-1] > offsets[i]) 2151 return -EINVAL; 2152 } 2153 2154 for (i = 0, j = 1 ; j < 4 ; j++, i++) { 2155 struct compat_ebt_entry_mwt *match32; 2156 unsigned int size; 2157 char *buf = buf_start + offsets[i]; 2158 2159 if (offsets[i] > offsets[j]) 2160 return -EINVAL; 2161 2162 match32 = (struct compat_ebt_entry_mwt *) buf; 2163 size = offsets[j] - offsets[i]; 2164 ret = ebt_size_mwt(match32, size, i, state, base); 2165 if (ret < 0) 2166 return ret; 2167 new_offset += ret; 2168 if (offsets_update && new_offset) { 2169 pr_debug("change offset %d to %d\n", 2170 offsets_update[i], offsets[j] + new_offset); 2171 offsets_update[i] = offsets[j] + new_offset; 2172 } 2173 } 2174 2175 if (state->buf_kern_start == NULL) { 2176 unsigned int offset = buf_start - (char *) base; 2177 2178 ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset); 2179 if (ret < 0) 2180 return ret; 2181 } 2182 2183 next_expected_off = state->buf_user_offset - startoff; 2184 if (next_expected_off != entry->next_offset) 2185 return -EINVAL; 2186 2187 if (*total < entry->next_offset) 2188 return -EINVAL; 2189 *total -= entry->next_offset; 2190 return 0; 2191} 2192 2193/* repl->entries_size is the size of the ebt_entry blob in userspace. 2194 * It might need more memory when copied to a 64 bit kernel in case 2195 * userspace is 32-bit. So, first task: find out how much memory is needed. 2196 * 2197 * Called before validation is performed. 2198 */ 2199static int compat_copy_entries(unsigned char *data, unsigned int size_user, 2200 struct ebt_entries_buf_state *state) 2201{ 2202 unsigned int size_remaining = size_user; 2203 int ret; 2204 2205 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data, 2206 &size_remaining, state); 2207 if (ret < 0) 2208 return ret; 2209 2210 if (size_remaining) 2211 return -EINVAL; 2212 2213 return state->buf_kern_offset; 2214} 2215 2216 2217static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, 2218 sockptr_t arg, unsigned int len) 2219{ 2220 struct compat_ebt_replace tmp; 2221 int i; 2222 2223 if (len < sizeof(tmp)) 2224 return -EINVAL; 2225 2226 if (copy_from_sockptr(&tmp, arg, sizeof(tmp))) 2227 return -EFAULT; 2228 2229 if (len != sizeof(tmp) + tmp.entries_size) 2230 return -EINVAL; 2231 2232 if (tmp.entries_size == 0) 2233 return -EINVAL; 2234 2235 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / 2236 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter)) 2237 return -ENOMEM; 2238 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 2239 return -ENOMEM; 2240 2241 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry)); 2242 2243 /* starting with hook_entry, 32 vs. 64 bit structures are different */ 2244 for (i = 0; i < NF_BR_NUMHOOKS; i++) 2245 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]); 2246 2247 repl->num_counters = tmp.num_counters; 2248 repl->counters = compat_ptr(tmp.counters); 2249 repl->entries = compat_ptr(tmp.entries); 2250 return 0; 2251} 2252 2253static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) 2254{ 2255 int ret, i, countersize, size64; 2256 struct ebt_table_info *newinfo; 2257 struct ebt_replace tmp; 2258 struct ebt_entries_buf_state state; 2259 void *entries_tmp; 2260 2261 ret = compat_copy_ebt_replace_from_user(&tmp, arg, len); 2262 if (ret) { 2263 /* try real handler in case userland supplied needed padding */ 2264 if (ret == -EINVAL && do_replace(net, arg, len) == 0) 2265 ret = 0; 2266 return ret; 2267 } 2268 2269 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 2270 newinfo = vmalloc(sizeof(*newinfo) + countersize); 2271 if (!newinfo) 2272 return -ENOMEM; 2273 2274 if (countersize) 2275 memset(newinfo->counters, 0, countersize); 2276 2277 memset(&state, 0, sizeof(state)); 2278 2279 newinfo->entries = vmalloc(tmp.entries_size); 2280 if (!newinfo->entries) { 2281 ret = -ENOMEM; 2282 goto free_newinfo; 2283 } 2284 if (copy_from_user( 2285 newinfo->entries, tmp.entries, tmp.entries_size) != 0) { 2286 ret = -EFAULT; 2287 goto free_entries; 2288 } 2289 2290 entries_tmp = newinfo->entries; 2291 2292 xt_compat_lock(NFPROTO_BRIDGE); 2293 2294 ret = ebt_compat_init_offsets(tmp.nentries); 2295 if (ret < 0) 2296 goto out_unlock; 2297 2298 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2299 if (ret < 0) 2300 goto out_unlock; 2301 2302 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n", 2303 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset, 2304 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size)); 2305 2306 size64 = ret; 2307 newinfo->entries = vmalloc(size64); 2308 if (!newinfo->entries) { 2309 vfree(entries_tmp); 2310 ret = -ENOMEM; 2311 goto out_unlock; 2312 } 2313 2314 memset(&state, 0, sizeof(state)); 2315 state.buf_kern_start = newinfo->entries; 2316 state.buf_kern_len = size64; 2317 2318 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2319 if (WARN_ON(ret < 0)) { 2320 vfree(entries_tmp); 2321 goto out_unlock; 2322 } 2323 2324 vfree(entries_tmp); 2325 tmp.entries_size = size64; 2326 2327 for (i = 0; i < NF_BR_NUMHOOKS; i++) { 2328 char __user *usrptr; 2329 if (tmp.hook_entry[i]) { 2330 unsigned int delta; 2331 usrptr = (char __user *) tmp.hook_entry[i]; 2332 delta = usrptr - tmp.entries; 2333 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta); 2334 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr; 2335 } 2336 } 2337 2338 xt_compat_flush_offsets(NFPROTO_BRIDGE); 2339 xt_compat_unlock(NFPROTO_BRIDGE); 2340 2341 ret = do_replace_finish(net, &tmp, newinfo); 2342 if (ret == 0) 2343 return ret; 2344free_entries: 2345 vfree(newinfo->entries); 2346free_newinfo: 2347 vfree(newinfo); 2348 return ret; 2349out_unlock: 2350 xt_compat_flush_offsets(NFPROTO_BRIDGE); 2351 xt_compat_unlock(NFPROTO_BRIDGE); 2352 goto free_entries; 2353} 2354 2355static int compat_update_counters(struct net *net, sockptr_t arg, 2356 unsigned int len) 2357{ 2358 struct compat_ebt_replace hlp; 2359 2360 if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) 2361 return -EFAULT; 2362 2363 /* try real handler in case userland supplied needed padding */ 2364 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) 2365 return update_counters(net, arg, len); 2366 2367 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), 2368 hlp.num_counters, len); 2369} 2370 2371static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, 2372 void __user *user, int *len) 2373{ 2374 int ret; 2375 struct compat_ebt_replace tmp; 2376 struct ebt_table *t; 2377 struct net *net = sock_net(sk); 2378 2379 if ((cmd == EBT_SO_GET_INFO || cmd == EBT_SO_GET_INIT_INFO) && 2380 *len != sizeof(struct compat_ebt_replace)) 2381 return -EINVAL; 2382 2383 if (copy_from_user(&tmp, user, sizeof(tmp))) 2384 return -EFAULT; 2385 2386 tmp.name[sizeof(tmp.name) - 1] = '\0'; 2387 2388 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 2389 if (!t) 2390 return ret; 2391 2392 xt_compat_lock(NFPROTO_BRIDGE); 2393 switch (cmd) { 2394 case EBT_SO_GET_INFO: 2395 tmp.nentries = t->private->nentries; 2396 ret = compat_table_info(t->private, &tmp); 2397 if (ret) 2398 goto out; 2399 tmp.valid_hooks = t->valid_hooks; 2400 2401 if (copy_to_user(user, &tmp, *len) != 0) { 2402 ret = -EFAULT; 2403 break; 2404 } 2405 ret = 0; 2406 break; 2407 case EBT_SO_GET_INIT_INFO: 2408 tmp.nentries = t->table->nentries; 2409 tmp.entries_size = t->table->entries_size; 2410 tmp.valid_hooks = t->table->valid_hooks; 2411 2412 if (copy_to_user(user, &tmp, *len) != 0) { 2413 ret = -EFAULT; 2414 break; 2415 } 2416 ret = 0; 2417 break; 2418 case EBT_SO_GET_ENTRIES: 2419 case EBT_SO_GET_INIT_ENTRIES: 2420 /* try real handler first in case of userland-side padding. 2421 * in case we are dealing with an 'ordinary' 32 bit binary 2422 * without 64bit compatibility padding, this will fail right 2423 * after copy_from_user when the *len argument is validated. 2424 * 2425 * the compat_ variant needs to do one pass over the kernel 2426 * data set to adjust for size differences before it the check. 2427 */ 2428 if (copy_everything_to_user(t, user, len, cmd) == 0) 2429 ret = 0; 2430 else 2431 ret = compat_copy_everything_to_user(t, user, len, cmd); 2432 break; 2433 default: 2434 ret = -EINVAL; 2435 } 2436 out: 2437 xt_compat_flush_offsets(NFPROTO_BRIDGE); 2438 xt_compat_unlock(NFPROTO_BRIDGE); 2439 mutex_unlock(&ebt_mutex); 2440 return ret; 2441} 2442#endif 2443 2444static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 2445{ 2446 struct net *net = sock_net(sk); 2447 struct ebt_replace tmp; 2448 struct ebt_table *t; 2449 int ret; 2450 2451 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2452 return -EPERM; 2453 2454#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 2455 /* try real handler in case userland supplied needed padding */ 2456 if (in_compat_syscall() && 2457 ((cmd != EBT_SO_GET_INFO && cmd != EBT_SO_GET_INIT_INFO) || 2458 *len != sizeof(tmp))) 2459 return compat_do_ebt_get_ctl(sk, cmd, user, len); 2460#endif 2461 2462 if (copy_from_user(&tmp, user, sizeof(tmp))) 2463 return -EFAULT; 2464 2465 tmp.name[sizeof(tmp.name) - 1] = '\0'; 2466 2467 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 2468 if (!t) 2469 return ret; 2470 2471 switch (cmd) { 2472 case EBT_SO_GET_INFO: 2473 case EBT_SO_GET_INIT_INFO: 2474 if (*len != sizeof(struct ebt_replace)) { 2475 ret = -EINVAL; 2476 mutex_unlock(&ebt_mutex); 2477 break; 2478 } 2479 if (cmd == EBT_SO_GET_INFO) { 2480 tmp.nentries = t->private->nentries; 2481 tmp.entries_size = t->private->entries_size; 2482 tmp.valid_hooks = t->valid_hooks; 2483 } else { 2484 tmp.nentries = t->table->nentries; 2485 tmp.entries_size = t->table->entries_size; 2486 tmp.valid_hooks = t->table->valid_hooks; 2487 } 2488 mutex_unlock(&ebt_mutex); 2489 if (copy_to_user(user, &tmp, *len) != 0) { 2490 ret = -EFAULT; 2491 break; 2492 } 2493 ret = 0; 2494 break; 2495 2496 case EBT_SO_GET_ENTRIES: 2497 case EBT_SO_GET_INIT_ENTRIES: 2498 ret = copy_everything_to_user(t, user, len, cmd); 2499 mutex_unlock(&ebt_mutex); 2500 break; 2501 2502 default: 2503 mutex_unlock(&ebt_mutex); 2504 ret = -EINVAL; 2505 } 2506 2507 return ret; 2508} 2509 2510static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, 2511 unsigned int len) 2512{ 2513 struct net *net = sock_net(sk); 2514 int ret; 2515 2516 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 2517 return -EPERM; 2518 2519 switch (cmd) { 2520 case EBT_SO_SET_ENTRIES: 2521#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 2522 if (in_compat_syscall()) 2523 ret = compat_do_replace(net, arg, len); 2524 else 2525#endif 2526 ret = do_replace(net, arg, len); 2527 break; 2528 case EBT_SO_SET_COUNTERS: 2529#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 2530 if (in_compat_syscall()) 2531 ret = compat_update_counters(net, arg, len); 2532 else 2533#endif 2534 ret = update_counters(net, arg, len); 2535 break; 2536 default: 2537 ret = -EINVAL; 2538 } 2539 return ret; 2540} 2541 2542static struct nf_sockopt_ops ebt_sockopts = { 2543 .pf = PF_INET, 2544 .set_optmin = EBT_BASE_CTL, 2545 .set_optmax = EBT_SO_SET_MAX + 1, 2546 .set = do_ebt_set_ctl, 2547 .get_optmin = EBT_BASE_CTL, 2548 .get_optmax = EBT_SO_GET_MAX + 1, 2549 .get = do_ebt_get_ctl, 2550 .owner = THIS_MODULE, 2551}; 2552 2553static int __net_init ebt_pernet_init(struct net *net) 2554{ 2555 struct ebt_pernet *ebt_net = net_generic(net, ebt_pernet_id); 2556 2557 INIT_LIST_HEAD(&ebt_net->tables); 2558 return 0; 2559} 2560 2561static struct pernet_operations ebt_net_ops = { 2562 .init = ebt_pernet_init, 2563 .id = &ebt_pernet_id, 2564 .size = sizeof(struct ebt_pernet), 2565}; 2566 2567static int __init ebtables_init(void) 2568{ 2569 int ret; 2570 2571 ret = xt_register_target(&ebt_standard_target); 2572 if (ret < 0) 2573 return ret; 2574 ret = nf_register_sockopt(&ebt_sockopts); 2575 if (ret < 0) { 2576 xt_unregister_target(&ebt_standard_target); 2577 return ret; 2578 } 2579 2580 ret = register_pernet_subsys(&ebt_net_ops); 2581 if (ret < 0) { 2582 nf_unregister_sockopt(&ebt_sockopts); 2583 xt_unregister_target(&ebt_standard_target); 2584 return ret; 2585 } 2586 2587 return 0; 2588} 2589 2590static void ebtables_fini(void) 2591{ 2592 nf_unregister_sockopt(&ebt_sockopts); 2593 xt_unregister_target(&ebt_standard_target); 2594 unregister_pernet_subsys(&ebt_net_ops); 2595} 2596 2597EXPORT_SYMBOL(ebt_register_table); 2598EXPORT_SYMBOL(ebt_unregister_table); 2599EXPORT_SYMBOL(ebt_do_table); 2600module_init(ebtables_init); 2601module_exit(ebtables_fini); 2602MODULE_LICENSE("GPL");