dev_addr_lists.c (27124B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * net/core/dev_addr_lists.c - Functions for handling net device lists 4 * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com> 5 * 6 * This file contains functions for working with unicast, multicast and device 7 * addresses lists. 8 */ 9 10#include <linux/netdevice.h> 11#include <linux/rtnetlink.h> 12#include <linux/export.h> 13#include <linux/list.h> 14 15#include "dev.h" 16 17/* 18 * General list handling functions 19 */ 20 21static int __hw_addr_insert(struct netdev_hw_addr_list *list, 22 struct netdev_hw_addr *new, int addr_len) 23{ 24 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; 25 struct netdev_hw_addr *ha; 26 27 while (*ins_point) { 28 int diff; 29 30 ha = rb_entry(*ins_point, struct netdev_hw_addr, node); 31 diff = memcmp(new->addr, ha->addr, addr_len); 32 if (diff == 0) 33 diff = memcmp(&new->type, &ha->type, sizeof(new->type)); 34 35 parent = *ins_point; 36 if (diff < 0) 37 ins_point = &parent->rb_left; 38 else if (diff > 0) 39 ins_point = &parent->rb_right; 40 else 41 return -EEXIST; 42 } 43 44 rb_link_node_rcu(&new->node, parent, ins_point); 45 rb_insert_color(&new->node, &list->tree); 46 47 return 0; 48} 49 50static struct netdev_hw_addr* 51__hw_addr_create(const unsigned char *addr, int addr_len, 52 unsigned char addr_type, bool global, bool sync) 53{ 54 struct netdev_hw_addr *ha; 55 int alloc_size; 56 57 alloc_size = sizeof(*ha); 58 if (alloc_size < L1_CACHE_BYTES) 59 alloc_size = L1_CACHE_BYTES; 60 ha = kmalloc(alloc_size, GFP_ATOMIC); 61 if (!ha) 62 return NULL; 63 memcpy(ha->addr, addr, addr_len); 64 ha->type = addr_type; 65 ha->refcount = 1; 66 ha->global_use = global; 67 ha->synced = sync ? 1 : 0; 68 ha->sync_cnt = 0; 69 70 return ha; 71} 72 73static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, 74 const unsigned char *addr, int addr_len, 75 unsigned char addr_type, bool global, bool sync, 76 int sync_count, bool exclusive) 77{ 78 struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL; 79 struct netdev_hw_addr *ha; 80 81 if (addr_len > MAX_ADDR_LEN) 82 return -EINVAL; 83 84 while (*ins_point) { 85 int diff; 86 87 ha = rb_entry(*ins_point, struct netdev_hw_addr, node); 88 diff = memcmp(addr, ha->addr, addr_len); 89 if (diff == 0) 90 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type)); 91 92 parent = *ins_point; 93 if (diff < 0) { 94 ins_point = &parent->rb_left; 95 } else if (diff > 0) { 96 ins_point = &parent->rb_right; 97 } else { 98 if (exclusive) 99 return -EEXIST; 100 if (global) { 101 /* check if addr is already used as global */ 102 if (ha->global_use) 103 return 0; 104 else 105 ha->global_use = true; 106 } 107 if (sync) { 108 if (ha->synced && sync_count) 109 return -EEXIST; 110 else 111 ha->synced++; 112 } 113 ha->refcount++; 114 return 0; 115 } 116 } 117 118 ha = __hw_addr_create(addr, addr_len, addr_type, global, sync); 119 if (!ha) 120 return -ENOMEM; 121 122 rb_link_node(&ha->node, parent, ins_point); 123 rb_insert_color(&ha->node, &list->tree); 124 125 list_add_tail_rcu(&ha->list, &list->list); 126 list->count++; 127 128 return 0; 129} 130 131static int __hw_addr_add(struct netdev_hw_addr_list *list, 132 const unsigned char *addr, int addr_len, 133 unsigned char addr_type) 134{ 135 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false, false, 136 0, false); 137} 138 139static int __hw_addr_del_entry(struct netdev_hw_addr_list *list, 140 struct netdev_hw_addr *ha, bool global, 141 bool sync) 142{ 143 if (global && !ha->global_use) 144 return -ENOENT; 145 146 if (sync && !ha->synced) 147 return -ENOENT; 148 149 if (global) 150 ha->global_use = false; 151 152 if (sync) 153 ha->synced--; 154 155 if (--ha->refcount) 156 return 0; 157 158 rb_erase(&ha->node, &list->tree); 159 160 list_del_rcu(&ha->list); 161 kfree_rcu(ha, rcu_head); 162 list->count--; 163 return 0; 164} 165 166static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list, 167 const unsigned char *addr, int addr_len, 168 unsigned char addr_type) 169{ 170 struct rb_node *node; 171 172 node = list->tree.rb_node; 173 174 while (node) { 175 struct netdev_hw_addr *ha = rb_entry(node, struct netdev_hw_addr, node); 176 int diff = memcmp(addr, ha->addr, addr_len); 177 178 if (diff == 0 && addr_type) 179 diff = memcmp(&addr_type, &ha->type, sizeof(addr_type)); 180 181 if (diff < 0) 182 node = node->rb_left; 183 else if (diff > 0) 184 node = node->rb_right; 185 else 186 return ha; 187 } 188 189 return NULL; 190} 191 192static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, 193 const unsigned char *addr, int addr_len, 194 unsigned char addr_type, bool global, bool sync) 195{ 196 struct netdev_hw_addr *ha = __hw_addr_lookup(list, addr, addr_len, addr_type); 197 198 if (!ha) 199 return -ENOENT; 200 return __hw_addr_del_entry(list, ha, global, sync); 201} 202 203static int __hw_addr_del(struct netdev_hw_addr_list *list, 204 const unsigned char *addr, int addr_len, 205 unsigned char addr_type) 206{ 207 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false, false); 208} 209 210static int __hw_addr_sync_one(struct netdev_hw_addr_list *to_list, 211 struct netdev_hw_addr *ha, 212 int addr_len) 213{ 214 int err; 215 216 err = __hw_addr_add_ex(to_list, ha->addr, addr_len, ha->type, 217 false, true, ha->sync_cnt, false); 218 if (err && err != -EEXIST) 219 return err; 220 221 if (!err) { 222 ha->sync_cnt++; 223 ha->refcount++; 224 } 225 226 return 0; 227} 228 229static void __hw_addr_unsync_one(struct netdev_hw_addr_list *to_list, 230 struct netdev_hw_addr_list *from_list, 231 struct netdev_hw_addr *ha, 232 int addr_len) 233{ 234 int err; 235 236 err = __hw_addr_del_ex(to_list, ha->addr, addr_len, ha->type, 237 false, true); 238 if (err) 239 return; 240 ha->sync_cnt--; 241 /* address on from list is not marked synced */ 242 __hw_addr_del_entry(from_list, ha, false, false); 243} 244 245static int __hw_addr_sync_multiple(struct netdev_hw_addr_list *to_list, 246 struct netdev_hw_addr_list *from_list, 247 int addr_len) 248{ 249 int err = 0; 250 struct netdev_hw_addr *ha, *tmp; 251 252 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 253 if (ha->sync_cnt == ha->refcount) { 254 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 255 } else { 256 err = __hw_addr_sync_one(to_list, ha, addr_len); 257 if (err) 258 break; 259 } 260 } 261 return err; 262} 263 264/* This function only works where there is a strict 1-1 relationship 265 * between source and destionation of they synch. If you ever need to 266 * sync addresses to more then 1 destination, you need to use 267 * __hw_addr_sync_multiple(). 268 */ 269int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 270 struct netdev_hw_addr_list *from_list, 271 int addr_len) 272{ 273 int err = 0; 274 struct netdev_hw_addr *ha, *tmp; 275 276 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 277 if (!ha->sync_cnt) { 278 err = __hw_addr_sync_one(to_list, ha, addr_len); 279 if (err) 280 break; 281 } else if (ha->refcount == 1) 282 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 283 } 284 return err; 285} 286EXPORT_SYMBOL(__hw_addr_sync); 287 288void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 289 struct netdev_hw_addr_list *from_list, 290 int addr_len) 291{ 292 struct netdev_hw_addr *ha, *tmp; 293 294 list_for_each_entry_safe(ha, tmp, &from_list->list, list) { 295 if (ha->sync_cnt) 296 __hw_addr_unsync_one(to_list, from_list, ha, addr_len); 297 } 298} 299EXPORT_SYMBOL(__hw_addr_unsync); 300 301/** 302 * __hw_addr_sync_dev - Synchonize device's multicast list 303 * @list: address list to syncronize 304 * @dev: device to sync 305 * @sync: function to call if address should be added 306 * @unsync: function to call if address should be removed 307 * 308 * This function is intended to be called from the ndo_set_rx_mode 309 * function of devices that require explicit address add/remove 310 * notifications. The unsync function may be NULL in which case 311 * the addresses requiring removal will simply be removed without 312 * any notification to the device. 313 **/ 314int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, 315 struct net_device *dev, 316 int (*sync)(struct net_device *, const unsigned char *), 317 int (*unsync)(struct net_device *, 318 const unsigned char *)) 319{ 320 struct netdev_hw_addr *ha, *tmp; 321 int err; 322 323 /* first go through and flush out any stale entries */ 324 list_for_each_entry_safe(ha, tmp, &list->list, list) { 325 if (!ha->sync_cnt || ha->refcount != 1) 326 continue; 327 328 /* if unsync is defined and fails defer unsyncing address */ 329 if (unsync && unsync(dev, ha->addr)) 330 continue; 331 332 ha->sync_cnt--; 333 __hw_addr_del_entry(list, ha, false, false); 334 } 335 336 /* go through and sync new entries to the list */ 337 list_for_each_entry_safe(ha, tmp, &list->list, list) { 338 if (ha->sync_cnt) 339 continue; 340 341 err = sync(dev, ha->addr); 342 if (err) 343 return err; 344 345 ha->sync_cnt++; 346 ha->refcount++; 347 } 348 349 return 0; 350} 351EXPORT_SYMBOL(__hw_addr_sync_dev); 352 353/** 354 * __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking 355 * into account references 356 * @list: address list to synchronize 357 * @dev: device to sync 358 * @sync: function to call if address or reference on it should be added 359 * @unsync: function to call if address or some reference on it should removed 360 * 361 * This function is intended to be called from the ndo_set_rx_mode 362 * function of devices that require explicit address or references on it 363 * add/remove notifications. The unsync function may be NULL in which case 364 * the addresses or references on it requiring removal will simply be 365 * removed without any notification to the device. That is responsibility of 366 * the driver to identify and distribute address or references on it between 367 * internal address tables. 368 **/ 369int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, 370 struct net_device *dev, 371 int (*sync)(struct net_device *, 372 const unsigned char *, int), 373 int (*unsync)(struct net_device *, 374 const unsigned char *, int)) 375{ 376 struct netdev_hw_addr *ha, *tmp; 377 int err, ref_cnt; 378 379 /* first go through and flush out any unsynced/stale entries */ 380 list_for_each_entry_safe(ha, tmp, &list->list, list) { 381 /* sync if address is not used */ 382 if ((ha->sync_cnt << 1) <= ha->refcount) 383 continue; 384 385 /* if fails defer unsyncing address */ 386 ref_cnt = ha->refcount - ha->sync_cnt; 387 if (unsync && unsync(dev, ha->addr, ref_cnt)) 388 continue; 389 390 ha->refcount = (ref_cnt << 1) + 1; 391 ha->sync_cnt = ref_cnt; 392 __hw_addr_del_entry(list, ha, false, false); 393 } 394 395 /* go through and sync updated/new entries to the list */ 396 list_for_each_entry_safe(ha, tmp, &list->list, list) { 397 /* sync if address added or reused */ 398 if ((ha->sync_cnt << 1) >= ha->refcount) 399 continue; 400 401 ref_cnt = ha->refcount - ha->sync_cnt; 402 err = sync(dev, ha->addr, ref_cnt); 403 if (err) 404 return err; 405 406 ha->refcount = ref_cnt << 1; 407 ha->sync_cnt = ref_cnt; 408 } 409 410 return 0; 411} 412EXPORT_SYMBOL(__hw_addr_ref_sync_dev); 413 414/** 415 * __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on 416 * it from device 417 * @list: address list to remove synchronized addresses (references on it) from 418 * @dev: device to sync 419 * @unsync: function to call if address and references on it should be removed 420 * 421 * Remove all addresses that were added to the device by 422 * __hw_addr_ref_sync_dev(). This function is intended to be called from the 423 * ndo_stop or ndo_open functions on devices that require explicit address (or 424 * references on it) add/remove notifications. If the unsync function pointer 425 * is NULL then this function can be used to just reset the sync_cnt for the 426 * addresses in the list. 427 **/ 428void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, 429 struct net_device *dev, 430 int (*unsync)(struct net_device *, 431 const unsigned char *, int)) 432{ 433 struct netdev_hw_addr *ha, *tmp; 434 435 list_for_each_entry_safe(ha, tmp, &list->list, list) { 436 if (!ha->sync_cnt) 437 continue; 438 439 /* if fails defer unsyncing address */ 440 if (unsync && unsync(dev, ha->addr, ha->sync_cnt)) 441 continue; 442 443 ha->refcount -= ha->sync_cnt - 1; 444 ha->sync_cnt = 0; 445 __hw_addr_del_entry(list, ha, false, false); 446 } 447} 448EXPORT_SYMBOL(__hw_addr_ref_unsync_dev); 449 450/** 451 * __hw_addr_unsync_dev - Remove synchronized addresses from device 452 * @list: address list to remove synchronized addresses from 453 * @dev: device to sync 454 * @unsync: function to call if address should be removed 455 * 456 * Remove all addresses that were added to the device by __hw_addr_sync_dev(). 457 * This function is intended to be called from the ndo_stop or ndo_open 458 * functions on devices that require explicit address add/remove 459 * notifications. If the unsync function pointer is NULL then this function 460 * can be used to just reset the sync_cnt for the addresses in the list. 461 **/ 462void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, 463 struct net_device *dev, 464 int (*unsync)(struct net_device *, 465 const unsigned char *)) 466{ 467 struct netdev_hw_addr *ha, *tmp; 468 469 list_for_each_entry_safe(ha, tmp, &list->list, list) { 470 if (!ha->sync_cnt) 471 continue; 472 473 /* if unsync is defined and fails defer unsyncing address */ 474 if (unsync && unsync(dev, ha->addr)) 475 continue; 476 477 ha->sync_cnt--; 478 __hw_addr_del_entry(list, ha, false, false); 479 } 480} 481EXPORT_SYMBOL(__hw_addr_unsync_dev); 482 483static void __hw_addr_flush(struct netdev_hw_addr_list *list) 484{ 485 struct netdev_hw_addr *ha, *tmp; 486 487 list->tree = RB_ROOT; 488 list_for_each_entry_safe(ha, tmp, &list->list, list) { 489 list_del_rcu(&ha->list); 490 kfree_rcu(ha, rcu_head); 491 } 492 list->count = 0; 493} 494 495void __hw_addr_init(struct netdev_hw_addr_list *list) 496{ 497 INIT_LIST_HEAD(&list->list); 498 list->count = 0; 499 list->tree = RB_ROOT; 500} 501EXPORT_SYMBOL(__hw_addr_init); 502 503/* 504 * Device addresses handling functions 505 */ 506 507/* Check that netdev->dev_addr is not written to directly as this would 508 * break the rbtree layout. All changes should go thru dev_addr_set() and co. 509 * Remove this check in mid-2024. 510 */ 511void dev_addr_check(struct net_device *dev) 512{ 513 if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN)) 514 return; 515 516 netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr); 517 netdev_warn(dev, "Expected addr: %*ph\n", 518 MAX_ADDR_LEN, dev->dev_addr_shadow); 519 netdev_WARN(dev, "Incorrect netdev->dev_addr\n"); 520} 521 522/** 523 * dev_addr_flush - Flush device address list 524 * @dev: device 525 * 526 * Flush device address list and reset ->dev_addr. 527 * 528 * The caller must hold the rtnl_mutex. 529 */ 530void dev_addr_flush(struct net_device *dev) 531{ 532 /* rtnl_mutex must be held here */ 533 dev_addr_check(dev); 534 535 __hw_addr_flush(&dev->dev_addrs); 536 dev->dev_addr = NULL; 537} 538 539/** 540 * dev_addr_init - Init device address list 541 * @dev: device 542 * 543 * Init device address list and create the first element, 544 * used by ->dev_addr. 545 * 546 * The caller must hold the rtnl_mutex. 547 */ 548int dev_addr_init(struct net_device *dev) 549{ 550 unsigned char addr[MAX_ADDR_LEN]; 551 struct netdev_hw_addr *ha; 552 int err; 553 554 /* rtnl_mutex must be held here */ 555 556 __hw_addr_init(&dev->dev_addrs); 557 memset(addr, 0, sizeof(addr)); 558 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), 559 NETDEV_HW_ADDR_T_LAN); 560 if (!err) { 561 /* 562 * Get the first (previously created) address from the list 563 * and set dev_addr pointer to this location. 564 */ 565 ha = list_first_entry(&dev->dev_addrs.list, 566 struct netdev_hw_addr, list); 567 dev->dev_addr = ha->addr; 568 } 569 return err; 570} 571 572void dev_addr_mod(struct net_device *dev, unsigned int offset, 573 const void *addr, size_t len) 574{ 575 struct netdev_hw_addr *ha; 576 577 dev_addr_check(dev); 578 579 ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]); 580 rb_erase(&ha->node, &dev->dev_addrs.tree); 581 memcpy(&ha->addr[offset], addr, len); 582 memcpy(&dev->dev_addr_shadow[offset], addr, len); 583 WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len)); 584} 585EXPORT_SYMBOL(dev_addr_mod); 586 587/** 588 * dev_addr_add - Add a device address 589 * @dev: device 590 * @addr: address to add 591 * @addr_type: address type 592 * 593 * Add a device address to the device or increase the reference count if 594 * it already exists. 595 * 596 * The caller must hold the rtnl_mutex. 597 */ 598int dev_addr_add(struct net_device *dev, const unsigned char *addr, 599 unsigned char addr_type) 600{ 601 int err; 602 603 ASSERT_RTNL(); 604 605 err = dev_pre_changeaddr_notify(dev, addr, NULL); 606 if (err) 607 return err; 608 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); 609 if (!err) 610 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 611 return err; 612} 613EXPORT_SYMBOL(dev_addr_add); 614 615/** 616 * dev_addr_del - Release a device address. 617 * @dev: device 618 * @addr: address to delete 619 * @addr_type: address type 620 * 621 * Release reference to a device address and remove it from the device 622 * if the reference count drops to zero. 623 * 624 * The caller must hold the rtnl_mutex. 625 */ 626int dev_addr_del(struct net_device *dev, const unsigned char *addr, 627 unsigned char addr_type) 628{ 629 int err; 630 struct netdev_hw_addr *ha; 631 632 ASSERT_RTNL(); 633 634 /* 635 * We can not remove the first address from the list because 636 * dev->dev_addr points to that. 637 */ 638 ha = list_first_entry(&dev->dev_addrs.list, 639 struct netdev_hw_addr, list); 640 if (!memcmp(ha->addr, addr, dev->addr_len) && 641 ha->type == addr_type && ha->refcount == 1) 642 return -ENOENT; 643 644 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, 645 addr_type); 646 if (!err) 647 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 648 return err; 649} 650EXPORT_SYMBOL(dev_addr_del); 651 652/* 653 * Unicast list handling functions 654 */ 655 656/** 657 * dev_uc_add_excl - Add a global secondary unicast address 658 * @dev: device 659 * @addr: address to add 660 */ 661int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr) 662{ 663 int err; 664 665 netif_addr_lock_bh(dev); 666 err = __hw_addr_add_ex(&dev->uc, addr, dev->addr_len, 667 NETDEV_HW_ADDR_T_UNICAST, true, false, 668 0, true); 669 if (!err) 670 __dev_set_rx_mode(dev); 671 netif_addr_unlock_bh(dev); 672 return err; 673} 674EXPORT_SYMBOL(dev_uc_add_excl); 675 676/** 677 * dev_uc_add - Add a secondary unicast address 678 * @dev: device 679 * @addr: address to add 680 * 681 * Add a secondary unicast address to the device or increase 682 * the reference count if it already exists. 683 */ 684int dev_uc_add(struct net_device *dev, const unsigned char *addr) 685{ 686 int err; 687 688 netif_addr_lock_bh(dev); 689 err = __hw_addr_add(&dev->uc, addr, dev->addr_len, 690 NETDEV_HW_ADDR_T_UNICAST); 691 if (!err) 692 __dev_set_rx_mode(dev); 693 netif_addr_unlock_bh(dev); 694 return err; 695} 696EXPORT_SYMBOL(dev_uc_add); 697 698/** 699 * dev_uc_del - Release secondary unicast address. 700 * @dev: device 701 * @addr: address to delete 702 * 703 * Release reference to a secondary unicast address and remove it 704 * from the device if the reference count drops to zero. 705 */ 706int dev_uc_del(struct net_device *dev, const unsigned char *addr) 707{ 708 int err; 709 710 netif_addr_lock_bh(dev); 711 err = __hw_addr_del(&dev->uc, addr, dev->addr_len, 712 NETDEV_HW_ADDR_T_UNICAST); 713 if (!err) 714 __dev_set_rx_mode(dev); 715 netif_addr_unlock_bh(dev); 716 return err; 717} 718EXPORT_SYMBOL(dev_uc_del); 719 720/** 721 * dev_uc_sync - Synchronize device's unicast list to another device 722 * @to: destination device 723 * @from: source device 724 * 725 * Add newly added addresses to the destination device and release 726 * addresses that have no users left. The source device must be 727 * locked by netif_addr_lock_bh. 728 * 729 * This function is intended to be called from the dev->set_rx_mode 730 * function of layered software devices. This function assumes that 731 * addresses will only ever be synced to the @to devices and no other. 732 */ 733int dev_uc_sync(struct net_device *to, struct net_device *from) 734{ 735 int err = 0; 736 737 if (to->addr_len != from->addr_len) 738 return -EINVAL; 739 740 netif_addr_lock(to); 741 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); 742 if (!err) 743 __dev_set_rx_mode(to); 744 netif_addr_unlock(to); 745 return err; 746} 747EXPORT_SYMBOL(dev_uc_sync); 748 749/** 750 * dev_uc_sync_multiple - Synchronize device's unicast list to another 751 * device, but allow for multiple calls to sync to multiple devices. 752 * @to: destination device 753 * @from: source device 754 * 755 * Add newly added addresses to the destination device and release 756 * addresses that have been deleted from the source. The source device 757 * must be locked by netif_addr_lock_bh. 758 * 759 * This function is intended to be called from the dev->set_rx_mode 760 * function of layered software devices. It allows for a single source 761 * device to be synced to multiple destination devices. 762 */ 763int dev_uc_sync_multiple(struct net_device *to, struct net_device *from) 764{ 765 int err = 0; 766 767 if (to->addr_len != from->addr_len) 768 return -EINVAL; 769 770 netif_addr_lock(to); 771 err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len); 772 if (!err) 773 __dev_set_rx_mode(to); 774 netif_addr_unlock(to); 775 return err; 776} 777EXPORT_SYMBOL(dev_uc_sync_multiple); 778 779/** 780 * dev_uc_unsync - Remove synchronized addresses from the destination device 781 * @to: destination device 782 * @from: source device 783 * 784 * Remove all addresses that were added to the destination device by 785 * dev_uc_sync(). This function is intended to be called from the 786 * dev->stop function of layered software devices. 787 */ 788void dev_uc_unsync(struct net_device *to, struct net_device *from) 789{ 790 if (to->addr_len != from->addr_len) 791 return; 792 793 /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two 794 * reasons: 795 * 1) This is always called without any addr_list_lock, so as the 796 * outermost one here, it must be 0. 797 * 2) This is called by some callers after unlinking the upper device, 798 * so the dev->lower_level becomes 1 again. 799 * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or 800 * larger. 801 */ 802 netif_addr_lock_bh(from); 803 netif_addr_lock(to); 804 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); 805 __dev_set_rx_mode(to); 806 netif_addr_unlock(to); 807 netif_addr_unlock_bh(from); 808} 809EXPORT_SYMBOL(dev_uc_unsync); 810 811/** 812 * dev_uc_flush - Flush unicast addresses 813 * @dev: device 814 * 815 * Flush unicast addresses. 816 */ 817void dev_uc_flush(struct net_device *dev) 818{ 819 netif_addr_lock_bh(dev); 820 __hw_addr_flush(&dev->uc); 821 netif_addr_unlock_bh(dev); 822} 823EXPORT_SYMBOL(dev_uc_flush); 824 825/** 826 * dev_uc_init - Init unicast address list 827 * @dev: device 828 * 829 * Init unicast address list. 830 */ 831void dev_uc_init(struct net_device *dev) 832{ 833 __hw_addr_init(&dev->uc); 834} 835EXPORT_SYMBOL(dev_uc_init); 836 837/* 838 * Multicast list handling functions 839 */ 840 841/** 842 * dev_mc_add_excl - Add a global secondary multicast address 843 * @dev: device 844 * @addr: address to add 845 */ 846int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr) 847{ 848 int err; 849 850 netif_addr_lock_bh(dev); 851 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, 852 NETDEV_HW_ADDR_T_MULTICAST, true, false, 853 0, true); 854 if (!err) 855 __dev_set_rx_mode(dev); 856 netif_addr_unlock_bh(dev); 857 return err; 858} 859EXPORT_SYMBOL(dev_mc_add_excl); 860 861static int __dev_mc_add(struct net_device *dev, const unsigned char *addr, 862 bool global) 863{ 864 int err; 865 866 netif_addr_lock_bh(dev); 867 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, 868 NETDEV_HW_ADDR_T_MULTICAST, global, false, 869 0, false); 870 if (!err) 871 __dev_set_rx_mode(dev); 872 netif_addr_unlock_bh(dev); 873 return err; 874} 875/** 876 * dev_mc_add - Add a multicast address 877 * @dev: device 878 * @addr: address to add 879 * 880 * Add a multicast address to the device or increase 881 * the reference count if it already exists. 882 */ 883int dev_mc_add(struct net_device *dev, const unsigned char *addr) 884{ 885 return __dev_mc_add(dev, addr, false); 886} 887EXPORT_SYMBOL(dev_mc_add); 888 889/** 890 * dev_mc_add_global - Add a global multicast address 891 * @dev: device 892 * @addr: address to add 893 * 894 * Add a global multicast address to the device. 895 */ 896int dev_mc_add_global(struct net_device *dev, const unsigned char *addr) 897{ 898 return __dev_mc_add(dev, addr, true); 899} 900EXPORT_SYMBOL(dev_mc_add_global); 901 902static int __dev_mc_del(struct net_device *dev, const unsigned char *addr, 903 bool global) 904{ 905 int err; 906 907 netif_addr_lock_bh(dev); 908 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, 909 NETDEV_HW_ADDR_T_MULTICAST, global, false); 910 if (!err) 911 __dev_set_rx_mode(dev); 912 netif_addr_unlock_bh(dev); 913 return err; 914} 915 916/** 917 * dev_mc_del - Delete a multicast address. 918 * @dev: device 919 * @addr: address to delete 920 * 921 * Release reference to a multicast address and remove it 922 * from the device if the reference count drops to zero. 923 */ 924int dev_mc_del(struct net_device *dev, const unsigned char *addr) 925{ 926 return __dev_mc_del(dev, addr, false); 927} 928EXPORT_SYMBOL(dev_mc_del); 929 930/** 931 * dev_mc_del_global - Delete a global multicast address. 932 * @dev: device 933 * @addr: address to delete 934 * 935 * Release reference to a multicast address and remove it 936 * from the device if the reference count drops to zero. 937 */ 938int dev_mc_del_global(struct net_device *dev, const unsigned char *addr) 939{ 940 return __dev_mc_del(dev, addr, true); 941} 942EXPORT_SYMBOL(dev_mc_del_global); 943 944/** 945 * dev_mc_sync - Synchronize device's multicast list to another device 946 * @to: destination device 947 * @from: source device 948 * 949 * Add newly added addresses to the destination device and release 950 * addresses that have no users left. The source device must be 951 * locked by netif_addr_lock_bh. 952 * 953 * This function is intended to be called from the ndo_set_rx_mode 954 * function of layered software devices. 955 */ 956int dev_mc_sync(struct net_device *to, struct net_device *from) 957{ 958 int err = 0; 959 960 if (to->addr_len != from->addr_len) 961 return -EINVAL; 962 963 netif_addr_lock(to); 964 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); 965 if (!err) 966 __dev_set_rx_mode(to); 967 netif_addr_unlock(to); 968 return err; 969} 970EXPORT_SYMBOL(dev_mc_sync); 971 972/** 973 * dev_mc_sync_multiple - Synchronize device's multicast list to another 974 * device, but allow for multiple calls to sync to multiple devices. 975 * @to: destination device 976 * @from: source device 977 * 978 * Add newly added addresses to the destination device and release 979 * addresses that have no users left. The source device must be 980 * locked by netif_addr_lock_bh. 981 * 982 * This function is intended to be called from the ndo_set_rx_mode 983 * function of layered software devices. It allows for a single 984 * source device to be synced to multiple destination devices. 985 */ 986int dev_mc_sync_multiple(struct net_device *to, struct net_device *from) 987{ 988 int err = 0; 989 990 if (to->addr_len != from->addr_len) 991 return -EINVAL; 992 993 netif_addr_lock(to); 994 err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len); 995 if (!err) 996 __dev_set_rx_mode(to); 997 netif_addr_unlock(to); 998 return err; 999} 1000EXPORT_SYMBOL(dev_mc_sync_multiple); 1001 1002/** 1003 * dev_mc_unsync - Remove synchronized addresses from the destination device 1004 * @to: destination device 1005 * @from: source device 1006 * 1007 * Remove all addresses that were added to the destination device by 1008 * dev_mc_sync(). This function is intended to be called from the 1009 * dev->stop function of layered software devices. 1010 */ 1011void dev_mc_unsync(struct net_device *to, struct net_device *from) 1012{ 1013 if (to->addr_len != from->addr_len) 1014 return; 1015 1016 /* See the above comments inside dev_uc_unsync(). */ 1017 netif_addr_lock_bh(from); 1018 netif_addr_lock(to); 1019 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); 1020 __dev_set_rx_mode(to); 1021 netif_addr_unlock(to); 1022 netif_addr_unlock_bh(from); 1023} 1024EXPORT_SYMBOL(dev_mc_unsync); 1025 1026/** 1027 * dev_mc_flush - Flush multicast addresses 1028 * @dev: device 1029 * 1030 * Flush multicast addresses. 1031 */ 1032void dev_mc_flush(struct net_device *dev) 1033{ 1034 netif_addr_lock_bh(dev); 1035 __hw_addr_flush(&dev->mc); 1036 netif_addr_unlock_bh(dev); 1037} 1038EXPORT_SYMBOL(dev_mc_flush); 1039 1040/** 1041 * dev_mc_init - Init multicast address list 1042 * @dev: device 1043 * 1044 * Init multicast address list. 1045 */ 1046void dev_mc_init(struct net_device *dev) 1047{ 1048 __hw_addr_init(&dev->mc); 1049} 1050EXPORT_SYMBOL(dev_mc_init);