arp_tables.c (40085B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Packet matching code for ARP packets. 4 * 5 * Based heavily, if not almost entirely, upon ip_tables.c framework. 6 * 7 * Some ARP specific bits are: 8 * 9 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 10 * Copyright (C) 2006-2009 Patrick McHardy <kaber@trash.net> 11 * 12 */ 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14#include <linux/kernel.h> 15#include <linux/skbuff.h> 16#include <linux/netdevice.h> 17#include <linux/capability.h> 18#include <linux/if_arp.h> 19#include <linux/kmod.h> 20#include <linux/vmalloc.h> 21#include <linux/proc_fs.h> 22#include <linux/module.h> 23#include <linux/init.h> 24#include <linux/mutex.h> 25#include <linux/err.h> 26#include <net/compat.h> 27#include <net/sock.h> 28#include <linux/uaccess.h> 29 30#include <linux/netfilter/x_tables.h> 31#include <linux/netfilter_arp/arp_tables.h> 32#include "../../netfilter/xt_repldata.h" 33 34MODULE_LICENSE("GPL"); 35MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); 36MODULE_DESCRIPTION("arptables core"); 37 38void *arpt_alloc_initial_table(const struct xt_table *info) 39{ 40 return xt_alloc_initial_table(arpt, ARPT); 41} 42EXPORT_SYMBOL_GPL(arpt_alloc_initial_table); 43 44static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap, 45 const char *hdr_addr, int len) 46{ 47 int i, ret; 48 49 if (len > ARPT_DEV_ADDR_LEN_MAX) 50 len = ARPT_DEV_ADDR_LEN_MAX; 51 52 ret = 0; 53 for (i = 0; i < len; i++) 54 ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i]; 55 56 return ret != 0; 57} 58 59/* 60 * Unfortunately, _b and _mask are not aligned to an int (or long int) 61 * Some arches dont care, unrolling the loop is a win on them. 62 * For other arches, we only have a 16bit alignement. 63 */ 64static unsigned long ifname_compare(const char *_a, const char *_b, const char *_mask) 65{ 66#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 67 unsigned long ret = ifname_compare_aligned(_a, _b, _mask); 68#else 69 unsigned long ret = 0; 70 const u16 *a = (const u16 *)_a; 71 const u16 *b = (const u16 *)_b; 72 const u16 *mask = (const u16 *)_mask; 73 int i; 74 75 for (i = 0; i < IFNAMSIZ/sizeof(u16); i++) 76 ret |= (a[i] ^ b[i]) & mask[i]; 77#endif 78 return ret; 79} 80 81/* Returns whether packet matches rule or not. */ 82static inline int arp_packet_match(const struct arphdr *arphdr, 83 struct net_device *dev, 84 const char *indev, 85 const char *outdev, 86 const struct arpt_arp *arpinfo) 87{ 88 const char *arpptr = (char *)(arphdr + 1); 89 const char *src_devaddr, *tgt_devaddr; 90 __be32 src_ipaddr, tgt_ipaddr; 91 long ret; 92 93 if (NF_INVF(arpinfo, ARPT_INV_ARPOP, 94 (arphdr->ar_op & arpinfo->arpop_mask) != arpinfo->arpop)) 95 return 0; 96 97 if (NF_INVF(arpinfo, ARPT_INV_ARPHRD, 98 (arphdr->ar_hrd & arpinfo->arhrd_mask) != arpinfo->arhrd)) 99 return 0; 100 101 if (NF_INVF(arpinfo, ARPT_INV_ARPPRO, 102 (arphdr->ar_pro & arpinfo->arpro_mask) != arpinfo->arpro)) 103 return 0; 104 105 if (NF_INVF(arpinfo, ARPT_INV_ARPHLN, 106 (arphdr->ar_hln & arpinfo->arhln_mask) != arpinfo->arhln)) 107 return 0; 108 109 src_devaddr = arpptr; 110 arpptr += dev->addr_len; 111 memcpy(&src_ipaddr, arpptr, sizeof(u32)); 112 arpptr += sizeof(u32); 113 tgt_devaddr = arpptr; 114 arpptr += dev->addr_len; 115 memcpy(&tgt_ipaddr, arpptr, sizeof(u32)); 116 117 if (NF_INVF(arpinfo, ARPT_INV_SRCDEVADDR, 118 arp_devaddr_compare(&arpinfo->src_devaddr, src_devaddr, 119 dev->addr_len)) || 120 NF_INVF(arpinfo, ARPT_INV_TGTDEVADDR, 121 arp_devaddr_compare(&arpinfo->tgt_devaddr, tgt_devaddr, 122 dev->addr_len))) 123 return 0; 124 125 if (NF_INVF(arpinfo, ARPT_INV_SRCIP, 126 (src_ipaddr & arpinfo->smsk.s_addr) != arpinfo->src.s_addr) || 127 NF_INVF(arpinfo, ARPT_INV_TGTIP, 128 (tgt_ipaddr & arpinfo->tmsk.s_addr) != arpinfo->tgt.s_addr)) 129 return 0; 130 131 /* Look for ifname matches. */ 132 ret = ifname_compare(indev, arpinfo->iniface, arpinfo->iniface_mask); 133 134 if (NF_INVF(arpinfo, ARPT_INV_VIA_IN, ret != 0)) 135 return 0; 136 137 ret = ifname_compare(outdev, arpinfo->outiface, arpinfo->outiface_mask); 138 139 if (NF_INVF(arpinfo, ARPT_INV_VIA_OUT, ret != 0)) 140 return 0; 141 142 return 1; 143} 144 145static inline int arp_checkentry(const struct arpt_arp *arp) 146{ 147 if (arp->flags & ~ARPT_F_MASK) 148 return 0; 149 if (arp->invflags & ~ARPT_INV_MASK) 150 return 0; 151 152 return 1; 153} 154 155static unsigned int 156arpt_error(struct sk_buff *skb, const struct xt_action_param *par) 157{ 158 net_err_ratelimited("arp_tables: error: '%s'\n", 159 (const char *)par->targinfo); 160 161 return NF_DROP; 162} 163 164static inline const struct xt_entry_target * 165arpt_get_target_c(const struct arpt_entry *e) 166{ 167 return arpt_get_target((struct arpt_entry *)e); 168} 169 170static inline struct arpt_entry * 171get_entry(const void *base, unsigned int offset) 172{ 173 return (struct arpt_entry *)(base + offset); 174} 175 176static inline 177struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry) 178{ 179 return (void *)entry + entry->next_offset; 180} 181 182unsigned int arpt_do_table(void *priv, 183 struct sk_buff *skb, 184 const struct nf_hook_state *state) 185{ 186 const struct xt_table *table = priv; 187 unsigned int hook = state->hook; 188 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); 189 unsigned int verdict = NF_DROP; 190 const struct arphdr *arp; 191 struct arpt_entry *e, **jumpstack; 192 const char *indev, *outdev; 193 const void *table_base; 194 unsigned int cpu, stackidx = 0; 195 const struct xt_table_info *private; 196 struct xt_action_param acpar; 197 unsigned int addend; 198 199 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 200 return NF_DROP; 201 202 indev = state->in ? state->in->name : nulldevname; 203 outdev = state->out ? state->out->name : nulldevname; 204 205 local_bh_disable(); 206 addend = xt_write_recseq_begin(); 207 private = READ_ONCE(table->private); /* Address dependency. */ 208 cpu = smp_processor_id(); 209 table_base = private->entries; 210 jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; 211 212 /* No TEE support for arptables, so no need to switch to alternate 213 * stack. All targets that reenter must return absolute verdicts. 214 */ 215 e = get_entry(table_base, private->hook_entry[hook]); 216 217 acpar.state = state; 218 acpar.hotdrop = false; 219 220 arp = arp_hdr(skb); 221 do { 222 const struct xt_entry_target *t; 223 struct xt_counters *counter; 224 225 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 226 e = arpt_next_entry(e); 227 continue; 228 } 229 230 counter = xt_get_this_cpu_counter(&e->counters); 231 ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); 232 233 t = arpt_get_target_c(e); 234 235 /* Standard target? */ 236 if (!t->u.kernel.target->target) { 237 int v; 238 239 v = ((struct xt_standard_target *)t)->verdict; 240 if (v < 0) { 241 /* Pop from stack? */ 242 if (v != XT_RETURN) { 243 verdict = (unsigned int)(-v) - 1; 244 break; 245 } 246 if (stackidx == 0) { 247 e = get_entry(table_base, 248 private->underflow[hook]); 249 } else { 250 e = jumpstack[--stackidx]; 251 e = arpt_next_entry(e); 252 } 253 continue; 254 } 255 if (table_base + v 256 != arpt_next_entry(e)) { 257 if (unlikely(stackidx >= private->stacksize)) { 258 verdict = NF_DROP; 259 break; 260 } 261 jumpstack[stackidx++] = e; 262 } 263 264 e = get_entry(table_base, v); 265 continue; 266 } 267 268 acpar.target = t->u.kernel.target; 269 acpar.targinfo = t->data; 270 verdict = t->u.kernel.target->target(skb, &acpar); 271 272 if (verdict == XT_CONTINUE) { 273 /* Target might have changed stuff. */ 274 arp = arp_hdr(skb); 275 e = arpt_next_entry(e); 276 } else { 277 /* Verdict */ 278 break; 279 } 280 } while (!acpar.hotdrop); 281 xt_write_recseq_end(addend); 282 local_bh_enable(); 283 284 if (acpar.hotdrop) 285 return NF_DROP; 286 else 287 return verdict; 288} 289 290/* All zeroes == unconditional rule. */ 291static inline bool unconditional(const struct arpt_entry *e) 292{ 293 static const struct arpt_arp uncond; 294 295 return e->target_offset == sizeof(struct arpt_entry) && 296 memcmp(&e->arp, &uncond, sizeof(uncond)) == 0; 297} 298 299/* Figures out from what hook each rule can be called: returns 0 if 300 * there are loops. Puts hook bitmask in comefrom. 301 */ 302static int mark_source_chains(const struct xt_table_info *newinfo, 303 unsigned int valid_hooks, void *entry0, 304 unsigned int *offsets) 305{ 306 unsigned int hook; 307 308 /* No recursion; use packet counter to save back ptrs (reset 309 * to 0 as we leave), and comefrom to save source hook bitmask. 310 */ 311 for (hook = 0; hook < NF_ARP_NUMHOOKS; hook++) { 312 unsigned int pos = newinfo->hook_entry[hook]; 313 struct arpt_entry *e = entry0 + pos; 314 315 if (!(valid_hooks & (1 << hook))) 316 continue; 317 318 /* Set initial back pointer. */ 319 e->counters.pcnt = pos; 320 321 for (;;) { 322 const struct xt_standard_target *t 323 = (void *)arpt_get_target_c(e); 324 int visited = e->comefrom & (1 << hook); 325 326 if (e->comefrom & (1 << NF_ARP_NUMHOOKS)) 327 return 0; 328 329 e->comefrom 330 |= ((1 << hook) | (1 << NF_ARP_NUMHOOKS)); 331 332 /* Unconditional return/END. */ 333 if ((unconditional(e) && 334 (strcmp(t->target.u.user.name, 335 XT_STANDARD_TARGET) == 0) && 336 t->verdict < 0) || visited) { 337 unsigned int oldpos, size; 338 339 /* Return: backtrack through the last 340 * big jump. 341 */ 342 do { 343 e->comefrom ^= (1<<NF_ARP_NUMHOOKS); 344 oldpos = pos; 345 pos = e->counters.pcnt; 346 e->counters.pcnt = 0; 347 348 /* We're at the start. */ 349 if (pos == oldpos) 350 goto next; 351 352 e = entry0 + pos; 353 } while (oldpos == pos + e->next_offset); 354 355 /* Move along one */ 356 size = e->next_offset; 357 e = entry0 + pos + size; 358 if (pos + size >= newinfo->size) 359 return 0; 360 e->counters.pcnt = pos; 361 pos += size; 362 } else { 363 int newpos = t->verdict; 364 365 if (strcmp(t->target.u.user.name, 366 XT_STANDARD_TARGET) == 0 && 367 newpos >= 0) { 368 /* This a jump; chase it. */ 369 if (!xt_find_jump_offset(offsets, newpos, 370 newinfo->number)) 371 return 0; 372 } else { 373 /* ... this is a fallthru */ 374 newpos = pos + e->next_offset; 375 if (newpos >= newinfo->size) 376 return 0; 377 } 378 e = entry0 + newpos; 379 e->counters.pcnt = pos; 380 pos = newpos; 381 } 382 } 383next: ; 384 } 385 return 1; 386} 387 388static int check_target(struct arpt_entry *e, struct net *net, const char *name) 389{ 390 struct xt_entry_target *t = arpt_get_target(e); 391 struct xt_tgchk_param par = { 392 .net = net, 393 .table = name, 394 .entryinfo = e, 395 .target = t->u.kernel.target, 396 .targinfo = t->data, 397 .hook_mask = e->comefrom, 398 .family = NFPROTO_ARP, 399 }; 400 401 return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); 402} 403 404static int 405find_check_entry(struct arpt_entry *e, struct net *net, const char *name, 406 unsigned int size, 407 struct xt_percpu_counter_alloc_state *alloc_state) 408{ 409 struct xt_entry_target *t; 410 struct xt_target *target; 411 int ret; 412 413 if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) 414 return -ENOMEM; 415 416 t = arpt_get_target(e); 417 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, 418 t->u.user.revision); 419 if (IS_ERR(target)) { 420 ret = PTR_ERR(target); 421 goto out; 422 } 423 t->u.kernel.target = target; 424 425 ret = check_target(e, net, name); 426 if (ret) 427 goto err; 428 return 0; 429err: 430 module_put(t->u.kernel.target->me); 431out: 432 xt_percpu_counter_free(&e->counters); 433 434 return ret; 435} 436 437static bool check_underflow(const struct arpt_entry *e) 438{ 439 const struct xt_entry_target *t; 440 unsigned int verdict; 441 442 if (!unconditional(e)) 443 return false; 444 t = arpt_get_target_c(e); 445 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) 446 return false; 447 verdict = ((struct xt_standard_target *)t)->verdict; 448 verdict = -verdict - 1; 449 return verdict == NF_DROP || verdict == NF_ACCEPT; 450} 451 452static inline int check_entry_size_and_hooks(struct arpt_entry *e, 453 struct xt_table_info *newinfo, 454 const unsigned char *base, 455 const unsigned char *limit, 456 const unsigned int *hook_entries, 457 const unsigned int *underflows, 458 unsigned int valid_hooks) 459{ 460 unsigned int h; 461 int err; 462 463 if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 || 464 (unsigned char *)e + sizeof(struct arpt_entry) >= limit || 465 (unsigned char *)e + e->next_offset > limit) 466 return -EINVAL; 467 468 if (e->next_offset 469 < sizeof(struct arpt_entry) + sizeof(struct xt_entry_target)) 470 return -EINVAL; 471 472 if (!arp_checkentry(&e->arp)) 473 return -EINVAL; 474 475 err = xt_check_entry_offsets(e, e->elems, e->target_offset, 476 e->next_offset); 477 if (err) 478 return err; 479 480 /* Check hooks & underflows */ 481 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 482 if (!(valid_hooks & (1 << h))) 483 continue; 484 if ((unsigned char *)e - base == hook_entries[h]) 485 newinfo->hook_entry[h] = hook_entries[h]; 486 if ((unsigned char *)e - base == underflows[h]) { 487 if (!check_underflow(e)) 488 return -EINVAL; 489 490 newinfo->underflow[h] = underflows[h]; 491 } 492 } 493 494 /* Clear counters and comefrom */ 495 e->counters = ((struct xt_counters) { 0, 0 }); 496 e->comefrom = 0; 497 return 0; 498} 499 500static void cleanup_entry(struct arpt_entry *e, struct net *net) 501{ 502 struct xt_tgdtor_param par; 503 struct xt_entry_target *t; 504 505 t = arpt_get_target(e); 506 par.net = net; 507 par.target = t->u.kernel.target; 508 par.targinfo = t->data; 509 par.family = NFPROTO_ARP; 510 if (par.target->destroy != NULL) 511 par.target->destroy(&par); 512 module_put(par.target->me); 513 xt_percpu_counter_free(&e->counters); 514} 515 516/* Checks and translates the user-supplied table segment (held in 517 * newinfo). 518 */ 519static int translate_table(struct net *net, 520 struct xt_table_info *newinfo, 521 void *entry0, 522 const struct arpt_replace *repl) 523{ 524 struct xt_percpu_counter_alloc_state alloc_state = { 0 }; 525 struct arpt_entry *iter; 526 unsigned int *offsets; 527 unsigned int i; 528 int ret = 0; 529 530 newinfo->size = repl->size; 531 newinfo->number = repl->num_entries; 532 533 /* Init all hooks to impossible value. */ 534 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 535 newinfo->hook_entry[i] = 0xFFFFFFFF; 536 newinfo->underflow[i] = 0xFFFFFFFF; 537 } 538 539 offsets = xt_alloc_entry_offsets(newinfo->number); 540 if (!offsets) 541 return -ENOMEM; 542 i = 0; 543 544 /* Walk through entries, checking offsets. */ 545 xt_entry_foreach(iter, entry0, newinfo->size) { 546 ret = check_entry_size_and_hooks(iter, newinfo, entry0, 547 entry0 + repl->size, 548 repl->hook_entry, 549 repl->underflow, 550 repl->valid_hooks); 551 if (ret != 0) 552 goto out_free; 553 if (i < repl->num_entries) 554 offsets[i] = (void *)iter - entry0; 555 ++i; 556 if (strcmp(arpt_get_target(iter)->u.user.name, 557 XT_ERROR_TARGET) == 0) 558 ++newinfo->stacksize; 559 } 560 561 ret = -EINVAL; 562 if (i != repl->num_entries) 563 goto out_free; 564 565 ret = xt_check_table_hooks(newinfo, repl->valid_hooks); 566 if (ret) 567 goto out_free; 568 569 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) { 570 ret = -ELOOP; 571 goto out_free; 572 } 573 kvfree(offsets); 574 575 /* Finally, each sanity check must pass */ 576 i = 0; 577 xt_entry_foreach(iter, entry0, newinfo->size) { 578 ret = find_check_entry(iter, net, repl->name, repl->size, 579 &alloc_state); 580 if (ret != 0) 581 break; 582 ++i; 583 } 584 585 if (ret != 0) { 586 xt_entry_foreach(iter, entry0, newinfo->size) { 587 if (i-- == 0) 588 break; 589 cleanup_entry(iter, net); 590 } 591 return ret; 592 } 593 594 return ret; 595 out_free: 596 kvfree(offsets); 597 return ret; 598} 599 600static void get_counters(const struct xt_table_info *t, 601 struct xt_counters counters[]) 602{ 603 struct arpt_entry *iter; 604 unsigned int cpu; 605 unsigned int i; 606 607 for_each_possible_cpu(cpu) { 608 seqcount_t *s = &per_cpu(xt_recseq, cpu); 609 610 i = 0; 611 xt_entry_foreach(iter, t->entries, t->size) { 612 struct xt_counters *tmp; 613 u64 bcnt, pcnt; 614 unsigned int start; 615 616 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); 617 do { 618 start = read_seqcount_begin(s); 619 bcnt = tmp->bcnt; 620 pcnt = tmp->pcnt; 621 } while (read_seqcount_retry(s, start)); 622 623 ADD_COUNTER(counters[i], bcnt, pcnt); 624 ++i; 625 cond_resched(); 626 } 627 } 628} 629 630static void get_old_counters(const struct xt_table_info *t, 631 struct xt_counters counters[]) 632{ 633 struct arpt_entry *iter; 634 unsigned int cpu, i; 635 636 for_each_possible_cpu(cpu) { 637 i = 0; 638 xt_entry_foreach(iter, t->entries, t->size) { 639 struct xt_counters *tmp; 640 641 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); 642 ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt); 643 ++i; 644 } 645 cond_resched(); 646 } 647} 648 649static struct xt_counters *alloc_counters(const struct xt_table *table) 650{ 651 unsigned int countersize; 652 struct xt_counters *counters; 653 const struct xt_table_info *private = table->private; 654 655 /* We need atomic snapshot of counters: rest doesn't change 656 * (other than comefrom, which userspace doesn't care 657 * about). 658 */ 659 countersize = sizeof(struct xt_counters) * private->number; 660 counters = vzalloc(countersize); 661 662 if (counters == NULL) 663 return ERR_PTR(-ENOMEM); 664 665 get_counters(private, counters); 666 667 return counters; 668} 669 670static int copy_entries_to_user(unsigned int total_size, 671 const struct xt_table *table, 672 void __user *userptr) 673{ 674 unsigned int off, num; 675 const struct arpt_entry *e; 676 struct xt_counters *counters; 677 struct xt_table_info *private = table->private; 678 int ret = 0; 679 void *loc_cpu_entry; 680 681 counters = alloc_counters(table); 682 if (IS_ERR(counters)) 683 return PTR_ERR(counters); 684 685 loc_cpu_entry = private->entries; 686 687 /* FIXME: use iterator macros --RR */ 688 /* ... then go back and fix counters and names */ 689 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ 690 const struct xt_entry_target *t; 691 692 e = loc_cpu_entry + off; 693 if (copy_to_user(userptr + off, e, sizeof(*e))) { 694 ret = -EFAULT; 695 goto free_counters; 696 } 697 if (copy_to_user(userptr + off 698 + offsetof(struct arpt_entry, counters), 699 &counters[num], 700 sizeof(counters[num])) != 0) { 701 ret = -EFAULT; 702 goto free_counters; 703 } 704 705 t = arpt_get_target_c(e); 706 if (xt_target_to_user(t, userptr + off + e->target_offset)) { 707 ret = -EFAULT; 708 goto free_counters; 709 } 710 } 711 712 free_counters: 713 vfree(counters); 714 return ret; 715} 716 717#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 718static void compat_standard_from_user(void *dst, const void *src) 719{ 720 int v = *(compat_int_t *)src; 721 722 if (v > 0) 723 v += xt_compat_calc_jump(NFPROTO_ARP, v); 724 memcpy(dst, &v, sizeof(v)); 725} 726 727static int compat_standard_to_user(void __user *dst, const void *src) 728{ 729 compat_int_t cv = *(int *)src; 730 731 if (cv > 0) 732 cv -= xt_compat_calc_jump(NFPROTO_ARP, cv); 733 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; 734} 735 736static int compat_calc_entry(const struct arpt_entry *e, 737 const struct xt_table_info *info, 738 const void *base, struct xt_table_info *newinfo) 739{ 740 const struct xt_entry_target *t; 741 unsigned int entry_offset; 742 int off, i, ret; 743 744 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 745 entry_offset = (void *)e - base; 746 747 t = arpt_get_target_c(e); 748 off += xt_compat_target_offset(t->u.kernel.target); 749 newinfo->size -= off; 750 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 751 if (ret) 752 return ret; 753 754 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 755 if (info->hook_entry[i] && 756 (e < (struct arpt_entry *)(base + info->hook_entry[i]))) 757 newinfo->hook_entry[i] -= off; 758 if (info->underflow[i] && 759 (e < (struct arpt_entry *)(base + info->underflow[i]))) 760 newinfo->underflow[i] -= off; 761 } 762 return 0; 763} 764 765static int compat_table_info(const struct xt_table_info *info, 766 struct xt_table_info *newinfo) 767{ 768 struct arpt_entry *iter; 769 const void *loc_cpu_entry; 770 int ret; 771 772 if (!newinfo || !info) 773 return -EINVAL; 774 775 /* we dont care about newinfo->entries */ 776 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); 777 newinfo->initial_entries = 0; 778 loc_cpu_entry = info->entries; 779 ret = xt_compat_init_offsets(NFPROTO_ARP, info->number); 780 if (ret) 781 return ret; 782 xt_entry_foreach(iter, loc_cpu_entry, info->size) { 783 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); 784 if (ret != 0) 785 return ret; 786 } 787 return 0; 788} 789#endif 790 791static int get_info(struct net *net, void __user *user, const int *len) 792{ 793 char name[XT_TABLE_MAXNAMELEN]; 794 struct xt_table *t; 795 int ret; 796 797 if (*len != sizeof(struct arpt_getinfo)) 798 return -EINVAL; 799 800 if (copy_from_user(name, user, sizeof(name)) != 0) 801 return -EFAULT; 802 803 name[XT_TABLE_MAXNAMELEN-1] = '\0'; 804#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 805 if (in_compat_syscall()) 806 xt_compat_lock(NFPROTO_ARP); 807#endif 808 t = xt_request_find_table_lock(net, NFPROTO_ARP, name); 809 if (!IS_ERR(t)) { 810 struct arpt_getinfo info; 811 const struct xt_table_info *private = t->private; 812#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 813 struct xt_table_info tmp; 814 815 if (in_compat_syscall()) { 816 ret = compat_table_info(private, &tmp); 817 xt_compat_flush_offsets(NFPROTO_ARP); 818 private = &tmp; 819 } 820#endif 821 memset(&info, 0, sizeof(info)); 822 info.valid_hooks = t->valid_hooks; 823 memcpy(info.hook_entry, private->hook_entry, 824 sizeof(info.hook_entry)); 825 memcpy(info.underflow, private->underflow, 826 sizeof(info.underflow)); 827 info.num_entries = private->number; 828 info.size = private->size; 829 strcpy(info.name, name); 830 831 if (copy_to_user(user, &info, *len) != 0) 832 ret = -EFAULT; 833 else 834 ret = 0; 835 xt_table_unlock(t); 836 module_put(t->me); 837 } else 838 ret = PTR_ERR(t); 839#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 840 if (in_compat_syscall()) 841 xt_compat_unlock(NFPROTO_ARP); 842#endif 843 return ret; 844} 845 846static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, 847 const int *len) 848{ 849 int ret; 850 struct arpt_get_entries get; 851 struct xt_table *t; 852 853 if (*len < sizeof(get)) 854 return -EINVAL; 855 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 856 return -EFAULT; 857 if (*len != sizeof(struct arpt_get_entries) + get.size) 858 return -EINVAL; 859 860 get.name[sizeof(get.name) - 1] = '\0'; 861 862 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 863 if (!IS_ERR(t)) { 864 const struct xt_table_info *private = t->private; 865 866 if (get.size == private->size) 867 ret = copy_entries_to_user(private->size, 868 t, uptr->entrytable); 869 else 870 ret = -EAGAIN; 871 872 module_put(t->me); 873 xt_table_unlock(t); 874 } else 875 ret = PTR_ERR(t); 876 877 return ret; 878} 879 880static int __do_replace(struct net *net, const char *name, 881 unsigned int valid_hooks, 882 struct xt_table_info *newinfo, 883 unsigned int num_counters, 884 void __user *counters_ptr) 885{ 886 int ret; 887 struct xt_table *t; 888 struct xt_table_info *oldinfo; 889 struct xt_counters *counters; 890 void *loc_cpu_old_entry; 891 struct arpt_entry *iter; 892 893 ret = 0; 894 counters = xt_counters_alloc(num_counters); 895 if (!counters) { 896 ret = -ENOMEM; 897 goto out; 898 } 899 900 t = xt_request_find_table_lock(net, NFPROTO_ARP, name); 901 if (IS_ERR(t)) { 902 ret = PTR_ERR(t); 903 goto free_newinfo_counters_untrans; 904 } 905 906 /* You lied! */ 907 if (valid_hooks != t->valid_hooks) { 908 ret = -EINVAL; 909 goto put_module; 910 } 911 912 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); 913 if (!oldinfo) 914 goto put_module; 915 916 /* Update module usage count based on number of rules */ 917 if ((oldinfo->number > oldinfo->initial_entries) || 918 (newinfo->number <= oldinfo->initial_entries)) 919 module_put(t->me); 920 if ((oldinfo->number > oldinfo->initial_entries) && 921 (newinfo->number <= oldinfo->initial_entries)) 922 module_put(t->me); 923 924 xt_table_unlock(t); 925 926 get_old_counters(oldinfo, counters); 927 928 /* Decrease module usage counts and free resource */ 929 loc_cpu_old_entry = oldinfo->entries; 930 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size) 931 cleanup_entry(iter, net); 932 933 xt_free_table_info(oldinfo); 934 if (copy_to_user(counters_ptr, counters, 935 sizeof(struct xt_counters) * num_counters) != 0) { 936 /* Silent error, can't fail, new table is already in place */ 937 net_warn_ratelimited("arptables: counters copy to user failed while replacing table\n"); 938 } 939 vfree(counters); 940 return ret; 941 942 put_module: 943 module_put(t->me); 944 xt_table_unlock(t); 945 free_newinfo_counters_untrans: 946 vfree(counters); 947 out: 948 return ret; 949} 950 951static int do_replace(struct net *net, sockptr_t arg, unsigned int len) 952{ 953 int ret; 954 struct arpt_replace tmp; 955 struct xt_table_info *newinfo; 956 void *loc_cpu_entry; 957 struct arpt_entry *iter; 958 959 if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) 960 return -EFAULT; 961 962 /* overflow check */ 963 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 964 return -ENOMEM; 965 if (tmp.num_counters == 0) 966 return -EINVAL; 967 968 tmp.name[sizeof(tmp.name)-1] = 0; 969 970 newinfo = xt_alloc_table_info(tmp.size); 971 if (!newinfo) 972 return -ENOMEM; 973 974 loc_cpu_entry = newinfo->entries; 975 if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), 976 tmp.size) != 0) { 977 ret = -EFAULT; 978 goto free_newinfo; 979 } 980 981 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); 982 if (ret != 0) 983 goto free_newinfo; 984 985 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 986 tmp.num_counters, tmp.counters); 987 if (ret) 988 goto free_newinfo_untrans; 989 return 0; 990 991 free_newinfo_untrans: 992 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) 993 cleanup_entry(iter, net); 994 free_newinfo: 995 xt_free_table_info(newinfo); 996 return ret; 997} 998 999static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len) 1000{ 1001 unsigned int i; 1002 struct xt_counters_info tmp; 1003 struct xt_counters *paddc; 1004 struct xt_table *t; 1005 const struct xt_table_info *private; 1006 int ret = 0; 1007 struct arpt_entry *iter; 1008 unsigned int addend; 1009 1010 paddc = xt_copy_counters(arg, len, &tmp); 1011 if (IS_ERR(paddc)) 1012 return PTR_ERR(paddc); 1013 1014 t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name); 1015 if (IS_ERR(t)) { 1016 ret = PTR_ERR(t); 1017 goto free; 1018 } 1019 1020 local_bh_disable(); 1021 private = t->private; 1022 if (private->number != tmp.num_counters) { 1023 ret = -EINVAL; 1024 goto unlock_up_free; 1025 } 1026 1027 i = 0; 1028 1029 addend = xt_write_recseq_begin(); 1030 xt_entry_foreach(iter, private->entries, private->size) { 1031 struct xt_counters *tmp; 1032 1033 tmp = xt_get_this_cpu_counter(&iter->counters); 1034 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); 1035 ++i; 1036 } 1037 xt_write_recseq_end(addend); 1038 unlock_up_free: 1039 local_bh_enable(); 1040 xt_table_unlock(t); 1041 module_put(t->me); 1042 free: 1043 vfree(paddc); 1044 1045 return ret; 1046} 1047 1048#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 1049struct compat_arpt_replace { 1050 char name[XT_TABLE_MAXNAMELEN]; 1051 u32 valid_hooks; 1052 u32 num_entries; 1053 u32 size; 1054 u32 hook_entry[NF_ARP_NUMHOOKS]; 1055 u32 underflow[NF_ARP_NUMHOOKS]; 1056 u32 num_counters; 1057 compat_uptr_t counters; 1058 struct compat_arpt_entry entries[]; 1059}; 1060 1061static inline void compat_release_entry(struct compat_arpt_entry *e) 1062{ 1063 struct xt_entry_target *t; 1064 1065 t = compat_arpt_get_target(e); 1066 module_put(t->u.kernel.target->me); 1067} 1068 1069static int 1070check_compat_entry_size_and_hooks(struct compat_arpt_entry *e, 1071 struct xt_table_info *newinfo, 1072 unsigned int *size, 1073 const unsigned char *base, 1074 const unsigned char *limit) 1075{ 1076 struct xt_entry_target *t; 1077 struct xt_target *target; 1078 unsigned int entry_offset; 1079 int ret, off; 1080 1081 if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 || 1082 (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit || 1083 (unsigned char *)e + e->next_offset > limit) 1084 return -EINVAL; 1085 1086 if (e->next_offset < sizeof(struct compat_arpt_entry) + 1087 sizeof(struct compat_xt_entry_target)) 1088 return -EINVAL; 1089 1090 if (!arp_checkentry(&e->arp)) 1091 return -EINVAL; 1092 1093 ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, 1094 e->next_offset); 1095 if (ret) 1096 return ret; 1097 1098 off = sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1099 entry_offset = (void *)e - (void *)base; 1100 1101 t = compat_arpt_get_target(e); 1102 target = xt_request_find_target(NFPROTO_ARP, t->u.user.name, 1103 t->u.user.revision); 1104 if (IS_ERR(target)) { 1105 ret = PTR_ERR(target); 1106 goto out; 1107 } 1108 t->u.kernel.target = target; 1109 1110 off += xt_compat_target_offset(target); 1111 *size += off; 1112 ret = xt_compat_add_offset(NFPROTO_ARP, entry_offset, off); 1113 if (ret) 1114 goto release_target; 1115 1116 return 0; 1117 1118release_target: 1119 module_put(t->u.kernel.target->me); 1120out: 1121 return ret; 1122} 1123 1124static void 1125compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, 1126 unsigned int *size, 1127 struct xt_table_info *newinfo, unsigned char *base) 1128{ 1129 struct xt_entry_target *t; 1130 struct arpt_entry *de; 1131 unsigned int origsize; 1132 int h; 1133 1134 origsize = *size; 1135 de = *dstptr; 1136 memcpy(de, e, sizeof(struct arpt_entry)); 1137 memcpy(&de->counters, &e->counters, sizeof(e->counters)); 1138 1139 *dstptr += sizeof(struct arpt_entry); 1140 *size += sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1141 1142 de->target_offset = e->target_offset - (origsize - *size); 1143 t = compat_arpt_get_target(e); 1144 xt_compat_target_from_user(t, dstptr, size); 1145 1146 de->next_offset = e->next_offset - (origsize - *size); 1147 for (h = 0; h < NF_ARP_NUMHOOKS; h++) { 1148 if ((unsigned char *)de - base < newinfo->hook_entry[h]) 1149 newinfo->hook_entry[h] -= origsize - *size; 1150 if ((unsigned char *)de - base < newinfo->underflow[h]) 1151 newinfo->underflow[h] -= origsize - *size; 1152 } 1153} 1154 1155static int translate_compat_table(struct net *net, 1156 struct xt_table_info **pinfo, 1157 void **pentry0, 1158 const struct compat_arpt_replace *compatr) 1159{ 1160 unsigned int i, j; 1161 struct xt_table_info *newinfo, *info; 1162 void *pos, *entry0, *entry1; 1163 struct compat_arpt_entry *iter0; 1164 struct arpt_replace repl; 1165 unsigned int size; 1166 int ret; 1167 1168 info = *pinfo; 1169 entry0 = *pentry0; 1170 size = compatr->size; 1171 info->number = compatr->num_entries; 1172 1173 j = 0; 1174 xt_compat_lock(NFPROTO_ARP); 1175 ret = xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries); 1176 if (ret) 1177 goto out_unlock; 1178 /* Walk through entries, checking offsets. */ 1179 xt_entry_foreach(iter0, entry0, compatr->size) { 1180 ret = check_compat_entry_size_and_hooks(iter0, info, &size, 1181 entry0, 1182 entry0 + compatr->size); 1183 if (ret != 0) 1184 goto out_unlock; 1185 ++j; 1186 } 1187 1188 ret = -EINVAL; 1189 if (j != compatr->num_entries) 1190 goto out_unlock; 1191 1192 ret = -ENOMEM; 1193 newinfo = xt_alloc_table_info(size); 1194 if (!newinfo) 1195 goto out_unlock; 1196 1197 memset(newinfo->entries, 0, size); 1198 1199 newinfo->number = compatr->num_entries; 1200 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1201 newinfo->hook_entry[i] = compatr->hook_entry[i]; 1202 newinfo->underflow[i] = compatr->underflow[i]; 1203 } 1204 entry1 = newinfo->entries; 1205 pos = entry1; 1206 size = compatr->size; 1207 xt_entry_foreach(iter0, entry0, compatr->size) 1208 compat_copy_entry_from_user(iter0, &pos, &size, 1209 newinfo, entry1); 1210 1211 /* all module references in entry0 are now gone */ 1212 1213 xt_compat_flush_offsets(NFPROTO_ARP); 1214 xt_compat_unlock(NFPROTO_ARP); 1215 1216 memcpy(&repl, compatr, sizeof(*compatr)); 1217 1218 for (i = 0; i < NF_ARP_NUMHOOKS; i++) { 1219 repl.hook_entry[i] = newinfo->hook_entry[i]; 1220 repl.underflow[i] = newinfo->underflow[i]; 1221 } 1222 1223 repl.num_counters = 0; 1224 repl.counters = NULL; 1225 repl.size = newinfo->size; 1226 ret = translate_table(net, newinfo, entry1, &repl); 1227 if (ret) 1228 goto free_newinfo; 1229 1230 *pinfo = newinfo; 1231 *pentry0 = entry1; 1232 xt_free_table_info(info); 1233 return 0; 1234 1235free_newinfo: 1236 xt_free_table_info(newinfo); 1237 return ret; 1238out_unlock: 1239 xt_compat_flush_offsets(NFPROTO_ARP); 1240 xt_compat_unlock(NFPROTO_ARP); 1241 xt_entry_foreach(iter0, entry0, compatr->size) { 1242 if (j-- == 0) 1243 break; 1244 compat_release_entry(iter0); 1245 } 1246 return ret; 1247} 1248 1249static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) 1250{ 1251 int ret; 1252 struct compat_arpt_replace tmp; 1253 struct xt_table_info *newinfo; 1254 void *loc_cpu_entry; 1255 struct arpt_entry *iter; 1256 1257 if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) 1258 return -EFAULT; 1259 1260 /* overflow check */ 1261 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) 1262 return -ENOMEM; 1263 if (tmp.num_counters == 0) 1264 return -EINVAL; 1265 1266 tmp.name[sizeof(tmp.name)-1] = 0; 1267 1268 newinfo = xt_alloc_table_info(tmp.size); 1269 if (!newinfo) 1270 return -ENOMEM; 1271 1272 loc_cpu_entry = newinfo->entries; 1273 if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), 1274 tmp.size) != 0) { 1275 ret = -EFAULT; 1276 goto free_newinfo; 1277 } 1278 1279 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); 1280 if (ret != 0) 1281 goto free_newinfo; 1282 1283 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, 1284 tmp.num_counters, compat_ptr(tmp.counters)); 1285 if (ret) 1286 goto free_newinfo_untrans; 1287 return 0; 1288 1289 free_newinfo_untrans: 1290 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) 1291 cleanup_entry(iter, net); 1292 free_newinfo: 1293 xt_free_table_info(newinfo); 1294 return ret; 1295} 1296 1297static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, 1298 compat_uint_t *size, 1299 struct xt_counters *counters, 1300 unsigned int i) 1301{ 1302 struct xt_entry_target *t; 1303 struct compat_arpt_entry __user *ce; 1304 u_int16_t target_offset, next_offset; 1305 compat_uint_t origsize; 1306 int ret; 1307 1308 origsize = *size; 1309 ce = *dstptr; 1310 if (copy_to_user(ce, e, sizeof(struct arpt_entry)) != 0 || 1311 copy_to_user(&ce->counters, &counters[i], 1312 sizeof(counters[i])) != 0) 1313 return -EFAULT; 1314 1315 *dstptr += sizeof(struct compat_arpt_entry); 1316 *size -= sizeof(struct arpt_entry) - sizeof(struct compat_arpt_entry); 1317 1318 target_offset = e->target_offset - (origsize - *size); 1319 1320 t = arpt_get_target(e); 1321 ret = xt_compat_target_to_user(t, dstptr, size); 1322 if (ret) 1323 return ret; 1324 next_offset = e->next_offset - (origsize - *size); 1325 if (put_user(target_offset, &ce->target_offset) != 0 || 1326 put_user(next_offset, &ce->next_offset) != 0) 1327 return -EFAULT; 1328 return 0; 1329} 1330 1331static int compat_copy_entries_to_user(unsigned int total_size, 1332 struct xt_table *table, 1333 void __user *userptr) 1334{ 1335 struct xt_counters *counters; 1336 const struct xt_table_info *private = table->private; 1337 void __user *pos; 1338 unsigned int size; 1339 int ret = 0; 1340 unsigned int i = 0; 1341 struct arpt_entry *iter; 1342 1343 counters = alloc_counters(table); 1344 if (IS_ERR(counters)) 1345 return PTR_ERR(counters); 1346 1347 pos = userptr; 1348 size = total_size; 1349 xt_entry_foreach(iter, private->entries, total_size) { 1350 ret = compat_copy_entry_to_user(iter, &pos, 1351 &size, counters, i++); 1352 if (ret != 0) 1353 break; 1354 } 1355 vfree(counters); 1356 return ret; 1357} 1358 1359struct compat_arpt_get_entries { 1360 char name[XT_TABLE_MAXNAMELEN]; 1361 compat_uint_t size; 1362 struct compat_arpt_entry entrytable[]; 1363}; 1364 1365static int compat_get_entries(struct net *net, 1366 struct compat_arpt_get_entries __user *uptr, 1367 int *len) 1368{ 1369 int ret; 1370 struct compat_arpt_get_entries get; 1371 struct xt_table *t; 1372 1373 if (*len < sizeof(get)) 1374 return -EINVAL; 1375 if (copy_from_user(&get, uptr, sizeof(get)) != 0) 1376 return -EFAULT; 1377 if (*len != sizeof(struct compat_arpt_get_entries) + get.size) 1378 return -EINVAL; 1379 1380 get.name[sizeof(get.name) - 1] = '\0'; 1381 1382 xt_compat_lock(NFPROTO_ARP); 1383 t = xt_find_table_lock(net, NFPROTO_ARP, get.name); 1384 if (!IS_ERR(t)) { 1385 const struct xt_table_info *private = t->private; 1386 struct xt_table_info info; 1387 1388 ret = compat_table_info(private, &info); 1389 if (!ret && get.size == info.size) { 1390 ret = compat_copy_entries_to_user(private->size, 1391 t, uptr->entrytable); 1392 } else if (!ret) 1393 ret = -EAGAIN; 1394 1395 xt_compat_flush_offsets(NFPROTO_ARP); 1396 module_put(t->me); 1397 xt_table_unlock(t); 1398 } else 1399 ret = PTR_ERR(t); 1400 1401 xt_compat_unlock(NFPROTO_ARP); 1402 return ret; 1403} 1404#endif 1405 1406static int do_arpt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, 1407 unsigned int len) 1408{ 1409 int ret; 1410 1411 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1412 return -EPERM; 1413 1414 switch (cmd) { 1415 case ARPT_SO_SET_REPLACE: 1416#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 1417 if (in_compat_syscall()) 1418 ret = compat_do_replace(sock_net(sk), arg, len); 1419 else 1420#endif 1421 ret = do_replace(sock_net(sk), arg, len); 1422 break; 1423 1424 case ARPT_SO_SET_ADD_COUNTERS: 1425 ret = do_add_counters(sock_net(sk), arg, len); 1426 break; 1427 1428 default: 1429 ret = -EINVAL; 1430 } 1431 1432 return ret; 1433} 1434 1435static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) 1436{ 1437 int ret; 1438 1439 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 1440 return -EPERM; 1441 1442 switch (cmd) { 1443 case ARPT_SO_GET_INFO: 1444 ret = get_info(sock_net(sk), user, len); 1445 break; 1446 1447 case ARPT_SO_GET_ENTRIES: 1448#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 1449 if (in_compat_syscall()) 1450 ret = compat_get_entries(sock_net(sk), user, len); 1451 else 1452#endif 1453 ret = get_entries(sock_net(sk), user, len); 1454 break; 1455 1456 case ARPT_SO_GET_REVISION_TARGET: { 1457 struct xt_get_revision rev; 1458 1459 if (*len != sizeof(rev)) { 1460 ret = -EINVAL; 1461 break; 1462 } 1463 if (copy_from_user(&rev, user, sizeof(rev)) != 0) { 1464 ret = -EFAULT; 1465 break; 1466 } 1467 rev.name[sizeof(rev.name)-1] = 0; 1468 1469 try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, 1470 rev.revision, 1, &ret), 1471 "arpt_%s", rev.name); 1472 break; 1473 } 1474 1475 default: 1476 ret = -EINVAL; 1477 } 1478 1479 return ret; 1480} 1481 1482static void __arpt_unregister_table(struct net *net, struct xt_table *table) 1483{ 1484 struct xt_table_info *private; 1485 void *loc_cpu_entry; 1486 struct module *table_owner = table->me; 1487 struct arpt_entry *iter; 1488 1489 private = xt_unregister_table(table); 1490 1491 /* Decrease module usage counts and free resources */ 1492 loc_cpu_entry = private->entries; 1493 xt_entry_foreach(iter, loc_cpu_entry, private->size) 1494 cleanup_entry(iter, net); 1495 if (private->number > private->initial_entries) 1496 module_put(table_owner); 1497 xt_free_table_info(private); 1498} 1499 1500int arpt_register_table(struct net *net, 1501 const struct xt_table *table, 1502 const struct arpt_replace *repl, 1503 const struct nf_hook_ops *template_ops) 1504{ 1505 struct nf_hook_ops *ops; 1506 unsigned int num_ops; 1507 int ret, i; 1508 struct xt_table_info *newinfo; 1509 struct xt_table_info bootstrap = {0}; 1510 void *loc_cpu_entry; 1511 struct xt_table *new_table; 1512 1513 newinfo = xt_alloc_table_info(repl->size); 1514 if (!newinfo) 1515 return -ENOMEM; 1516 1517 loc_cpu_entry = newinfo->entries; 1518 memcpy(loc_cpu_entry, repl->entries, repl->size); 1519 1520 ret = translate_table(net, newinfo, loc_cpu_entry, repl); 1521 if (ret != 0) { 1522 xt_free_table_info(newinfo); 1523 return ret; 1524 } 1525 1526 new_table = xt_register_table(net, table, &bootstrap, newinfo); 1527 if (IS_ERR(new_table)) { 1528 xt_free_table_info(newinfo); 1529 return PTR_ERR(new_table); 1530 } 1531 1532 num_ops = hweight32(table->valid_hooks); 1533 if (num_ops == 0) { 1534 ret = -EINVAL; 1535 goto out_free; 1536 } 1537 1538 ops = kmemdup(template_ops, sizeof(*ops) * num_ops, GFP_KERNEL); 1539 if (!ops) { 1540 ret = -ENOMEM; 1541 goto out_free; 1542 } 1543 1544 for (i = 0; i < num_ops; i++) 1545 ops[i].priv = new_table; 1546 1547 new_table->ops = ops; 1548 1549 ret = nf_register_net_hooks(net, ops, num_ops); 1550 if (ret != 0) 1551 goto out_free; 1552 1553 return ret; 1554 1555out_free: 1556 __arpt_unregister_table(net, new_table); 1557 return ret; 1558} 1559 1560void arpt_unregister_table_pre_exit(struct net *net, const char *name) 1561{ 1562 struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name); 1563 1564 if (table) 1565 nf_unregister_net_hooks(net, table->ops, hweight32(table->valid_hooks)); 1566} 1567EXPORT_SYMBOL(arpt_unregister_table_pre_exit); 1568 1569void arpt_unregister_table(struct net *net, const char *name) 1570{ 1571 struct xt_table *table = xt_find_table(net, NFPROTO_ARP, name); 1572 1573 if (table) 1574 __arpt_unregister_table(net, table); 1575} 1576 1577/* The built-in targets: standard (NULL) and error. */ 1578static struct xt_target arpt_builtin_tg[] __read_mostly = { 1579 { 1580 .name = XT_STANDARD_TARGET, 1581 .targetsize = sizeof(int), 1582 .family = NFPROTO_ARP, 1583#ifdef CONFIG_NETFILTER_XTABLES_COMPAT 1584 .compatsize = sizeof(compat_int_t), 1585 .compat_from_user = compat_standard_from_user, 1586 .compat_to_user = compat_standard_to_user, 1587#endif 1588 }, 1589 { 1590 .name = XT_ERROR_TARGET, 1591 .target = arpt_error, 1592 .targetsize = XT_FUNCTION_MAXNAMELEN, 1593 .family = NFPROTO_ARP, 1594 }, 1595}; 1596 1597static struct nf_sockopt_ops arpt_sockopts = { 1598 .pf = PF_INET, 1599 .set_optmin = ARPT_BASE_CTL, 1600 .set_optmax = ARPT_SO_SET_MAX+1, 1601 .set = do_arpt_set_ctl, 1602 .get_optmin = ARPT_BASE_CTL, 1603 .get_optmax = ARPT_SO_GET_MAX+1, 1604 .get = do_arpt_get_ctl, 1605 .owner = THIS_MODULE, 1606}; 1607 1608static int __net_init arp_tables_net_init(struct net *net) 1609{ 1610 return xt_proto_init(net, NFPROTO_ARP); 1611} 1612 1613static void __net_exit arp_tables_net_exit(struct net *net) 1614{ 1615 xt_proto_fini(net, NFPROTO_ARP); 1616} 1617 1618static struct pernet_operations arp_tables_net_ops = { 1619 .init = arp_tables_net_init, 1620 .exit = arp_tables_net_exit, 1621}; 1622 1623static int __init arp_tables_init(void) 1624{ 1625 int ret; 1626 1627 ret = register_pernet_subsys(&arp_tables_net_ops); 1628 if (ret < 0) 1629 goto err1; 1630 1631 /* No one else will be downing sem now, so we won't sleep */ 1632 ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); 1633 if (ret < 0) 1634 goto err2; 1635 1636 /* Register setsockopt */ 1637 ret = nf_register_sockopt(&arpt_sockopts); 1638 if (ret < 0) 1639 goto err4; 1640 1641 return 0; 1642 1643err4: 1644 xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); 1645err2: 1646 unregister_pernet_subsys(&arp_tables_net_ops); 1647err1: 1648 return ret; 1649} 1650 1651static void __exit arp_tables_fini(void) 1652{ 1653 nf_unregister_sockopt(&arpt_sockopts); 1654 xt_unregister_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); 1655 unregister_pernet_subsys(&arp_tables_net_ops); 1656} 1657 1658EXPORT_SYMBOL(arpt_register_table); 1659EXPORT_SYMBOL(arpt_unregister_table); 1660EXPORT_SYMBOL(arpt_do_table); 1661 1662module_init(arp_tables_init); 1663module_exit(arp_tables_fini);