dsa2.c (41284B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org> 6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> 7 */ 8 9#include <linux/device.h> 10#include <linux/err.h> 11#include <linux/list.h> 12#include <linux/netdevice.h> 13#include <linux/slab.h> 14#include <linux/rtnetlink.h> 15#include <linux/of.h> 16#include <linux/of_mdio.h> 17#include <linux/of_net.h> 18#include <net/devlink.h> 19#include <net/sch_generic.h> 20 21#include "dsa_priv.h" 22 23static DEFINE_MUTEX(dsa2_mutex); 24LIST_HEAD(dsa_tree_list); 25 26/* Track the bridges with forwarding offload enabled */ 27static unsigned long dsa_fwd_offloading_bridges; 28 29/** 30 * dsa_tree_notify - Execute code for all switches in a DSA switch tree. 31 * @dst: collection of struct dsa_switch devices to notify. 32 * @e: event, must be of type DSA_NOTIFIER_* 33 * @v: event-specific value. 34 * 35 * Given a struct dsa_switch_tree, this can be used to run a function once for 36 * each member DSA switch. The other alternative of traversing the tree is only 37 * through its ports list, which does not uniquely list the switches. 38 */ 39int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v) 40{ 41 struct raw_notifier_head *nh = &dst->nh; 42 int err; 43 44 err = raw_notifier_call_chain(nh, e, v); 45 46 return notifier_to_errno(err); 47} 48 49/** 50 * dsa_broadcast - Notify all DSA trees in the system. 51 * @e: event, must be of type DSA_NOTIFIER_* 52 * @v: event-specific value. 53 * 54 * Can be used to notify the switching fabric of events such as cross-chip 55 * bridging between disjoint trees (such as islands of tagger-compatible 56 * switches bridged by an incompatible middle switch). 57 * 58 * WARNING: this function is not reliable during probe time, because probing 59 * between trees is asynchronous and not all DSA trees might have probed. 60 */ 61int dsa_broadcast(unsigned long e, void *v) 62{ 63 struct dsa_switch_tree *dst; 64 int err = 0; 65 66 list_for_each_entry(dst, &dsa_tree_list, list) { 67 err = dsa_tree_notify(dst, e, v); 68 if (err) 69 break; 70 } 71 72 return err; 73} 74 75/** 76 * dsa_lag_map() - Map LAG structure to a linear LAG array 77 * @dst: Tree in which to record the mapping. 78 * @lag: LAG structure that is to be mapped to the tree's array. 79 * 80 * dsa_lag_id/dsa_lag_by_id can then be used to translate between the 81 * two spaces. The size of the mapping space is determined by the 82 * driver by setting ds->num_lag_ids. It is perfectly legal to leave 83 * it unset if it is not needed, in which case these functions become 84 * no-ops. 85 */ 86void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag) 87{ 88 unsigned int id; 89 90 for (id = 1; id <= dst->lags_len; id++) { 91 if (!dsa_lag_by_id(dst, id)) { 92 dst->lags[id - 1] = lag; 93 lag->id = id; 94 return; 95 } 96 } 97 98 /* No IDs left, which is OK. Some drivers do not need it. The 99 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id 100 * returns an error for this device when joining the LAG. The 101 * driver can then return -EOPNOTSUPP back to DSA, which will 102 * fall back to a software LAG. 103 */ 104} 105 106/** 107 * dsa_lag_unmap() - Remove a LAG ID mapping 108 * @dst: Tree in which the mapping is recorded. 109 * @lag: LAG structure that was mapped. 110 * 111 * As there may be multiple users of the mapping, it is only removed 112 * if there are no other references to it. 113 */ 114void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag) 115{ 116 unsigned int id; 117 118 dsa_lags_foreach_id(id, dst) { 119 if (dsa_lag_by_id(dst, id) == lag) { 120 dst->lags[id - 1] = NULL; 121 lag->id = 0; 122 break; 123 } 124 } 125} 126 127struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst, 128 const struct net_device *lag_dev) 129{ 130 struct dsa_port *dp; 131 132 list_for_each_entry(dp, &dst->ports, list) 133 if (dsa_port_lag_dev_get(dp) == lag_dev) 134 return dp->lag; 135 136 return NULL; 137} 138 139struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst, 140 const struct net_device *br) 141{ 142 struct dsa_port *dp; 143 144 list_for_each_entry(dp, &dst->ports, list) 145 if (dsa_port_bridge_dev_get(dp) == br) 146 return dp->bridge; 147 148 return NULL; 149} 150 151static int dsa_bridge_num_find(const struct net_device *bridge_dev) 152{ 153 struct dsa_switch_tree *dst; 154 155 list_for_each_entry(dst, &dsa_tree_list, list) { 156 struct dsa_bridge *bridge; 157 158 bridge = dsa_tree_bridge_find(dst, bridge_dev); 159 if (bridge) 160 return bridge->num; 161 } 162 163 return 0; 164} 165 166unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max) 167{ 168 unsigned int bridge_num = dsa_bridge_num_find(bridge_dev); 169 170 /* Switches without FDB isolation support don't get unique 171 * bridge numbering 172 */ 173 if (!max) 174 return 0; 175 176 if (!bridge_num) { 177 /* First port that requests FDB isolation or TX forwarding 178 * offload for this bridge 179 */ 180 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges, 181 DSA_MAX_NUM_OFFLOADING_BRIDGES, 182 1); 183 if (bridge_num >= max) 184 return 0; 185 186 set_bit(bridge_num, &dsa_fwd_offloading_bridges); 187 } 188 189 return bridge_num; 190} 191 192void dsa_bridge_num_put(const struct net_device *bridge_dev, 193 unsigned int bridge_num) 194{ 195 /* Since we refcount bridges, we know that when we call this function 196 * it is no longer in use, so we can just go ahead and remove it from 197 * the bit mask. 198 */ 199 clear_bit(bridge_num, &dsa_fwd_offloading_bridges); 200} 201 202struct dsa_switch *dsa_switch_find(int tree_index, int sw_index) 203{ 204 struct dsa_switch_tree *dst; 205 struct dsa_port *dp; 206 207 list_for_each_entry(dst, &dsa_tree_list, list) { 208 if (dst->index != tree_index) 209 continue; 210 211 list_for_each_entry(dp, &dst->ports, list) { 212 if (dp->ds->index != sw_index) 213 continue; 214 215 return dp->ds; 216 } 217 } 218 219 return NULL; 220} 221EXPORT_SYMBOL_GPL(dsa_switch_find); 222 223static struct dsa_switch_tree *dsa_tree_find(int index) 224{ 225 struct dsa_switch_tree *dst; 226 227 list_for_each_entry(dst, &dsa_tree_list, list) 228 if (dst->index == index) 229 return dst; 230 231 return NULL; 232} 233 234static struct dsa_switch_tree *dsa_tree_alloc(int index) 235{ 236 struct dsa_switch_tree *dst; 237 238 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 239 if (!dst) 240 return NULL; 241 242 dst->index = index; 243 244 INIT_LIST_HEAD(&dst->rtable); 245 246 INIT_LIST_HEAD(&dst->ports); 247 248 INIT_LIST_HEAD(&dst->list); 249 list_add_tail(&dst->list, &dsa_tree_list); 250 251 kref_init(&dst->refcount); 252 253 return dst; 254} 255 256static void dsa_tree_free(struct dsa_switch_tree *dst) 257{ 258 if (dst->tag_ops) 259 dsa_tag_driver_put(dst->tag_ops); 260 list_del(&dst->list); 261 kfree(dst); 262} 263 264static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst) 265{ 266 if (dst) 267 kref_get(&dst->refcount); 268 269 return dst; 270} 271 272static struct dsa_switch_tree *dsa_tree_touch(int index) 273{ 274 struct dsa_switch_tree *dst; 275 276 dst = dsa_tree_find(index); 277 if (dst) 278 return dsa_tree_get(dst); 279 else 280 return dsa_tree_alloc(index); 281} 282 283static void dsa_tree_release(struct kref *ref) 284{ 285 struct dsa_switch_tree *dst; 286 287 dst = container_of(ref, struct dsa_switch_tree, refcount); 288 289 dsa_tree_free(dst); 290} 291 292static void dsa_tree_put(struct dsa_switch_tree *dst) 293{ 294 if (dst) 295 kref_put(&dst->refcount, dsa_tree_release); 296} 297 298static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst, 299 struct device_node *dn) 300{ 301 struct dsa_port *dp; 302 303 list_for_each_entry(dp, &dst->ports, list) 304 if (dp->dn == dn) 305 return dp; 306 307 return NULL; 308} 309 310static struct dsa_link *dsa_link_touch(struct dsa_port *dp, 311 struct dsa_port *link_dp) 312{ 313 struct dsa_switch *ds = dp->ds; 314 struct dsa_switch_tree *dst; 315 struct dsa_link *dl; 316 317 dst = ds->dst; 318 319 list_for_each_entry(dl, &dst->rtable, list) 320 if (dl->dp == dp && dl->link_dp == link_dp) 321 return dl; 322 323 dl = kzalloc(sizeof(*dl), GFP_KERNEL); 324 if (!dl) 325 return NULL; 326 327 dl->dp = dp; 328 dl->link_dp = link_dp; 329 330 INIT_LIST_HEAD(&dl->list); 331 list_add_tail(&dl->list, &dst->rtable); 332 333 return dl; 334} 335 336static bool dsa_port_setup_routing_table(struct dsa_port *dp) 337{ 338 struct dsa_switch *ds = dp->ds; 339 struct dsa_switch_tree *dst = ds->dst; 340 struct device_node *dn = dp->dn; 341 struct of_phandle_iterator it; 342 struct dsa_port *link_dp; 343 struct dsa_link *dl; 344 int err; 345 346 of_for_each_phandle(&it, err, dn, "link", NULL, 0) { 347 link_dp = dsa_tree_find_port_by_node(dst, it.node); 348 if (!link_dp) { 349 of_node_put(it.node); 350 return false; 351 } 352 353 dl = dsa_link_touch(dp, link_dp); 354 if (!dl) { 355 of_node_put(it.node); 356 return false; 357 } 358 } 359 360 return true; 361} 362 363static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst) 364{ 365 bool complete = true; 366 struct dsa_port *dp; 367 368 list_for_each_entry(dp, &dst->ports, list) { 369 if (dsa_port_is_dsa(dp)) { 370 complete = dsa_port_setup_routing_table(dp); 371 if (!complete) 372 break; 373 } 374 } 375 376 return complete; 377} 378 379static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst) 380{ 381 struct dsa_port *dp; 382 383 list_for_each_entry(dp, &dst->ports, list) 384 if (dsa_port_is_cpu(dp)) 385 return dp; 386 387 return NULL; 388} 389 390/* Assign the default CPU port (the first one in the tree) to all ports of the 391 * fabric which don't already have one as part of their own switch. 392 */ 393static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst) 394{ 395 struct dsa_port *cpu_dp, *dp; 396 397 cpu_dp = dsa_tree_find_first_cpu(dst); 398 if (!cpu_dp) { 399 pr_err("DSA: tree %d has no CPU port\n", dst->index); 400 return -EINVAL; 401 } 402 403 list_for_each_entry(dp, &dst->ports, list) { 404 if (dp->cpu_dp) 405 continue; 406 407 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) 408 dp->cpu_dp = cpu_dp; 409 } 410 411 return 0; 412} 413 414/* Perform initial assignment of CPU ports to user ports and DSA links in the 415 * fabric, giving preference to CPU ports local to each switch. Default to 416 * using the first CPU port in the switch tree if the port does not have a CPU 417 * port local to this switch. 418 */ 419static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst) 420{ 421 struct dsa_port *cpu_dp, *dp; 422 423 list_for_each_entry(cpu_dp, &dst->ports, list) { 424 if (!dsa_port_is_cpu(cpu_dp)) 425 continue; 426 427 /* Prefer a local CPU port */ 428 dsa_switch_for_each_port(dp, cpu_dp->ds) { 429 /* Prefer the first local CPU port found */ 430 if (dp->cpu_dp) 431 continue; 432 433 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) 434 dp->cpu_dp = cpu_dp; 435 } 436 } 437 438 return dsa_tree_setup_default_cpu(dst); 439} 440 441static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst) 442{ 443 struct dsa_port *dp; 444 445 list_for_each_entry(dp, &dst->ports, list) 446 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp)) 447 dp->cpu_dp = NULL; 448} 449 450static int dsa_port_setup(struct dsa_port *dp) 451{ 452 struct devlink_port *dlp = &dp->devlink_port; 453 bool dsa_port_link_registered = false; 454 struct dsa_switch *ds = dp->ds; 455 bool dsa_port_enabled = false; 456 int err = 0; 457 458 if (dp->setup) 459 return 0; 460 461 if (ds->ops->port_setup) { 462 err = ds->ops->port_setup(ds, dp->index); 463 if (err) 464 return err; 465 } 466 467 switch (dp->type) { 468 case DSA_PORT_TYPE_UNUSED: 469 dsa_port_disable(dp); 470 break; 471 case DSA_PORT_TYPE_CPU: 472 err = dsa_port_link_register_of(dp); 473 if (err) 474 break; 475 dsa_port_link_registered = true; 476 477 err = dsa_port_enable(dp, NULL); 478 if (err) 479 break; 480 dsa_port_enabled = true; 481 482 break; 483 case DSA_PORT_TYPE_DSA: 484 err = dsa_port_link_register_of(dp); 485 if (err) 486 break; 487 dsa_port_link_registered = true; 488 489 err = dsa_port_enable(dp, NULL); 490 if (err) 491 break; 492 dsa_port_enabled = true; 493 494 break; 495 case DSA_PORT_TYPE_USER: 496 of_get_mac_address(dp->dn, dp->mac); 497 err = dsa_slave_create(dp); 498 if (err) 499 break; 500 501 devlink_port_type_eth_set(dlp, dp->slave); 502 break; 503 } 504 505 if (err && dsa_port_enabled) 506 dsa_port_disable(dp); 507 if (err && dsa_port_link_registered) 508 dsa_port_link_unregister_of(dp); 509 if (err) { 510 if (ds->ops->port_teardown) 511 ds->ops->port_teardown(ds, dp->index); 512 return err; 513 } 514 515 dp->setup = true; 516 517 return 0; 518} 519 520static int dsa_port_devlink_setup(struct dsa_port *dp) 521{ 522 struct devlink_port *dlp = &dp->devlink_port; 523 struct dsa_switch_tree *dst = dp->ds->dst; 524 struct devlink_port_attrs attrs = {}; 525 struct devlink *dl = dp->ds->devlink; 526 const unsigned char *id; 527 unsigned char len; 528 int err; 529 530 id = (const unsigned char *)&dst->index; 531 len = sizeof(dst->index); 532 533 attrs.phys.port_number = dp->index; 534 memcpy(attrs.switch_id.id, id, len); 535 attrs.switch_id.id_len = len; 536 memset(dlp, 0, sizeof(*dlp)); 537 538 switch (dp->type) { 539 case DSA_PORT_TYPE_UNUSED: 540 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED; 541 break; 542 case DSA_PORT_TYPE_CPU: 543 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU; 544 break; 545 case DSA_PORT_TYPE_DSA: 546 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA; 547 break; 548 case DSA_PORT_TYPE_USER: 549 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 550 break; 551 } 552 553 devlink_port_attrs_set(dlp, &attrs); 554 err = devlink_port_register(dl, dlp, dp->index); 555 556 if (!err) 557 dp->devlink_port_setup = true; 558 559 return err; 560} 561 562static void dsa_port_teardown(struct dsa_port *dp) 563{ 564 struct devlink_port *dlp = &dp->devlink_port; 565 struct dsa_switch *ds = dp->ds; 566 567 if (!dp->setup) 568 return; 569 570 if (ds->ops->port_teardown) 571 ds->ops->port_teardown(ds, dp->index); 572 573 devlink_port_type_clear(dlp); 574 575 switch (dp->type) { 576 case DSA_PORT_TYPE_UNUSED: 577 break; 578 case DSA_PORT_TYPE_CPU: 579 dsa_port_disable(dp); 580 dsa_port_link_unregister_of(dp); 581 break; 582 case DSA_PORT_TYPE_DSA: 583 dsa_port_disable(dp); 584 dsa_port_link_unregister_of(dp); 585 break; 586 case DSA_PORT_TYPE_USER: 587 if (dp->slave) { 588 dsa_slave_destroy(dp->slave); 589 dp->slave = NULL; 590 } 591 break; 592 } 593 594 dp->setup = false; 595} 596 597static void dsa_port_devlink_teardown(struct dsa_port *dp) 598{ 599 struct devlink_port *dlp = &dp->devlink_port; 600 601 if (dp->devlink_port_setup) 602 devlink_port_unregister(dlp); 603 dp->devlink_port_setup = false; 604} 605 606/* Destroy the current devlink port, and create a new one which has the UNUSED 607 * flavour. At this point, any call to ds->ops->port_setup has been already 608 * balanced out by a call to ds->ops->port_teardown, so we know that any 609 * devlink port regions the driver had are now unregistered. We then call its 610 * ds->ops->port_setup again, in order for the driver to re-create them on the 611 * new devlink port. 612 */ 613static int dsa_port_reinit_as_unused(struct dsa_port *dp) 614{ 615 struct dsa_switch *ds = dp->ds; 616 int err; 617 618 dsa_port_devlink_teardown(dp); 619 dp->type = DSA_PORT_TYPE_UNUSED; 620 err = dsa_port_devlink_setup(dp); 621 if (err) 622 return err; 623 624 if (ds->ops->port_setup) { 625 /* On error, leave the devlink port registered, 626 * dsa_switch_teardown will clean it up later. 627 */ 628 err = ds->ops->port_setup(ds, dp->index); 629 if (err) 630 return err; 631 } 632 633 return 0; 634} 635 636static int dsa_devlink_info_get(struct devlink *dl, 637 struct devlink_info_req *req, 638 struct netlink_ext_ack *extack) 639{ 640 struct dsa_switch *ds = dsa_devlink_to_ds(dl); 641 642 if (ds->ops->devlink_info_get) 643 return ds->ops->devlink_info_get(ds, req, extack); 644 645 return -EOPNOTSUPP; 646} 647 648static int dsa_devlink_sb_pool_get(struct devlink *dl, 649 unsigned int sb_index, u16 pool_index, 650 struct devlink_sb_pool_info *pool_info) 651{ 652 struct dsa_switch *ds = dsa_devlink_to_ds(dl); 653 654 if (!ds->ops->devlink_sb_pool_get) 655 return -EOPNOTSUPP; 656 657 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index, 658 pool_info); 659} 660 661static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index, 662 u16 pool_index, u32 size, 663 enum devlink_sb_threshold_type threshold_type, 664 struct netlink_ext_ack *extack) 665{ 666 struct dsa_switch *ds = dsa_devlink_to_ds(dl); 667 668 if (!ds->ops->devlink_sb_pool_set) 669 return -EOPNOTSUPP; 670 671 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size, 672 threshold_type, extack); 673} 674 675static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp, 676 unsigned int sb_index, u16 pool_index, 677 u32 *p_threshold) 678{ 679 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); 680 int port = dsa_devlink_port_to_port(dlp); 681 682 if (!ds->ops->devlink_sb_port_pool_get) 683 return -EOPNOTSUPP; 684 685 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index, 686 pool_index, p_threshold); 687} 688 689static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp, 690 unsigned int sb_index, u16 pool_index, 691 u32 threshold, 692 struct netlink_ext_ack *extack) 693{ 694 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); 695 int port = dsa_devlink_port_to_port(dlp); 696 697 if (!ds->ops->devlink_sb_port_pool_set) 698 return -EOPNOTSUPP; 699 700 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index, 701 pool_index, threshold, extack); 702} 703 704static int 705dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp, 706 unsigned int sb_index, u16 tc_index, 707 enum devlink_sb_pool_type pool_type, 708 u16 *p_pool_index, u32 *p_threshold) 709{ 710 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); 711 int port = dsa_devlink_port_to_port(dlp); 712 713 if (!ds->ops->devlink_sb_tc_pool_bind_get) 714 return -EOPNOTSUPP; 715 716 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index, 717 tc_index, pool_type, 718 p_pool_index, p_threshold); 719} 720 721static int 722dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp, 723 unsigned int sb_index, u16 tc_index, 724 enum devlink_sb_pool_type pool_type, 725 u16 pool_index, u32 threshold, 726 struct netlink_ext_ack *extack) 727{ 728 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); 729 int port = dsa_devlink_port_to_port(dlp); 730 731 if (!ds->ops->devlink_sb_tc_pool_bind_set) 732 return -EOPNOTSUPP; 733 734 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index, 735 tc_index, pool_type, 736 pool_index, threshold, 737 extack); 738} 739 740static int dsa_devlink_sb_occ_snapshot(struct devlink *dl, 741 unsigned int sb_index) 742{ 743 struct dsa_switch *ds = dsa_devlink_to_ds(dl); 744 745 if (!ds->ops->devlink_sb_occ_snapshot) 746 return -EOPNOTSUPP; 747 748 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index); 749} 750 751static int dsa_devlink_sb_occ_max_clear(struct devlink *dl, 752 unsigned int sb_index) 753{ 754 struct dsa_switch *ds = dsa_devlink_to_ds(dl); 755 756 if (!ds->ops->devlink_sb_occ_max_clear) 757 return -EOPNOTSUPP; 758 759 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index); 760} 761 762static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp, 763 unsigned int sb_index, 764 u16 pool_index, u32 *p_cur, 765 u32 *p_max) 766{ 767 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); 768 int port = dsa_devlink_port_to_port(dlp); 769 770 if (!ds->ops->devlink_sb_occ_port_pool_get) 771 return -EOPNOTSUPP; 772 773 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index, 774 pool_index, p_cur, p_max); 775} 776 777static int 778dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp, 779 unsigned int sb_index, u16 tc_index, 780 enum devlink_sb_pool_type pool_type, 781 u32 *p_cur, u32 *p_max) 782{ 783 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp); 784 int port = dsa_devlink_port_to_port(dlp); 785 786 if (!ds->ops->devlink_sb_occ_tc_port_bind_get) 787 return -EOPNOTSUPP; 788 789 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port, 790 sb_index, tc_index, 791 pool_type, p_cur, 792 p_max); 793} 794 795static const struct devlink_ops dsa_devlink_ops = { 796 .info_get = dsa_devlink_info_get, 797 .sb_pool_get = dsa_devlink_sb_pool_get, 798 .sb_pool_set = dsa_devlink_sb_pool_set, 799 .sb_port_pool_get = dsa_devlink_sb_port_pool_get, 800 .sb_port_pool_set = dsa_devlink_sb_port_pool_set, 801 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get, 802 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set, 803 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot, 804 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear, 805 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get, 806 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get, 807}; 808 809static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds) 810{ 811 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops; 812 struct dsa_switch_tree *dst = ds->dst; 813 int err; 814 815 if (tag_ops->proto == dst->default_proto) 816 goto connect; 817 818 rtnl_lock(); 819 err = ds->ops->change_tag_protocol(ds, tag_ops->proto); 820 rtnl_unlock(); 821 if (err) { 822 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n", 823 tag_ops->name, ERR_PTR(err)); 824 return err; 825 } 826 827connect: 828 if (tag_ops->connect) { 829 err = tag_ops->connect(ds); 830 if (err) 831 return err; 832 } 833 834 if (ds->ops->connect_tag_protocol) { 835 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto); 836 if (err) { 837 dev_err(ds->dev, 838 "Unable to connect to tag protocol \"%s\": %pe\n", 839 tag_ops->name, ERR_PTR(err)); 840 goto disconnect; 841 } 842 } 843 844 return 0; 845 846disconnect: 847 if (tag_ops->disconnect) 848 tag_ops->disconnect(ds); 849 850 return err; 851} 852 853static int dsa_switch_setup(struct dsa_switch *ds) 854{ 855 struct dsa_devlink_priv *dl_priv; 856 struct device_node *dn; 857 struct dsa_port *dp; 858 int err; 859 860 if (ds->setup) 861 return 0; 862 863 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus 864 * driver and before ops->setup() has run, since the switch drivers and 865 * the slave MDIO bus driver rely on these values for probing PHY 866 * devices or not 867 */ 868 ds->phys_mii_mask |= dsa_user_ports(ds); 869 870 /* Add the switch to devlink before calling setup, so that setup can 871 * add dpipe tables 872 */ 873 ds->devlink = 874 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev); 875 if (!ds->devlink) 876 return -ENOMEM; 877 dl_priv = devlink_priv(ds->devlink); 878 dl_priv->ds = ds; 879 880 /* Setup devlink port instances now, so that the switch 881 * setup() can register regions etc, against the ports 882 */ 883 dsa_switch_for_each_port(dp, ds) { 884 err = dsa_port_devlink_setup(dp); 885 if (err) 886 goto unregister_devlink_ports; 887 } 888 889 err = dsa_switch_register_notifier(ds); 890 if (err) 891 goto unregister_devlink_ports; 892 893 ds->configure_vlan_while_not_filtering = true; 894 895 err = ds->ops->setup(ds); 896 if (err < 0) 897 goto unregister_notifier; 898 899 err = dsa_switch_setup_tag_protocol(ds); 900 if (err) 901 goto teardown; 902 903 if (!ds->slave_mii_bus && ds->ops->phy_read) { 904 ds->slave_mii_bus = mdiobus_alloc(); 905 if (!ds->slave_mii_bus) { 906 err = -ENOMEM; 907 goto teardown; 908 } 909 910 dsa_slave_mii_bus_init(ds); 911 912 dn = of_get_child_by_name(ds->dev->of_node, "mdio"); 913 914 err = of_mdiobus_register(ds->slave_mii_bus, dn); 915 of_node_put(dn); 916 if (err < 0) 917 goto free_slave_mii_bus; 918 } 919 920 ds->setup = true; 921 devlink_register(ds->devlink); 922 return 0; 923 924free_slave_mii_bus: 925 if (ds->slave_mii_bus && ds->ops->phy_read) 926 mdiobus_free(ds->slave_mii_bus); 927teardown: 928 if (ds->ops->teardown) 929 ds->ops->teardown(ds); 930unregister_notifier: 931 dsa_switch_unregister_notifier(ds); 932unregister_devlink_ports: 933 dsa_switch_for_each_port(dp, ds) 934 dsa_port_devlink_teardown(dp); 935 devlink_free(ds->devlink); 936 ds->devlink = NULL; 937 return err; 938} 939 940static void dsa_switch_teardown(struct dsa_switch *ds) 941{ 942 struct dsa_port *dp; 943 944 if (!ds->setup) 945 return; 946 947 if (ds->devlink) 948 devlink_unregister(ds->devlink); 949 950 if (ds->slave_mii_bus && ds->ops->phy_read) { 951 mdiobus_unregister(ds->slave_mii_bus); 952 mdiobus_free(ds->slave_mii_bus); 953 ds->slave_mii_bus = NULL; 954 } 955 956 if (ds->ops->teardown) 957 ds->ops->teardown(ds); 958 959 dsa_switch_unregister_notifier(ds); 960 961 if (ds->devlink) { 962 dsa_switch_for_each_port(dp, ds) 963 dsa_port_devlink_teardown(dp); 964 devlink_free(ds->devlink); 965 ds->devlink = NULL; 966 } 967 968 ds->setup = false; 969} 970 971/* First tear down the non-shared, then the shared ports. This ensures that 972 * all work items scheduled by our switchdev handlers for user ports have 973 * completed before we destroy the refcounting kept on the shared ports. 974 */ 975static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst) 976{ 977 struct dsa_port *dp; 978 979 list_for_each_entry(dp, &dst->ports, list) 980 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) 981 dsa_port_teardown(dp); 982 983 dsa_flush_workqueue(); 984 985 list_for_each_entry(dp, &dst->ports, list) 986 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) 987 dsa_port_teardown(dp); 988} 989 990static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst) 991{ 992 struct dsa_port *dp; 993 994 list_for_each_entry(dp, &dst->ports, list) 995 dsa_switch_teardown(dp->ds); 996} 997 998/* Bring shared ports up first, then non-shared ports */ 999static int dsa_tree_setup_ports(struct dsa_switch_tree *dst) 1000{ 1001 struct dsa_port *dp; 1002 int err = 0; 1003 1004 list_for_each_entry(dp, &dst->ports, list) { 1005 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) { 1006 err = dsa_port_setup(dp); 1007 if (err) 1008 goto teardown; 1009 } 1010 } 1011 1012 list_for_each_entry(dp, &dst->ports, list) { 1013 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) { 1014 err = dsa_port_setup(dp); 1015 if (err) { 1016 err = dsa_port_reinit_as_unused(dp); 1017 if (err) 1018 goto teardown; 1019 } 1020 } 1021 } 1022 1023 return 0; 1024 1025teardown: 1026 dsa_tree_teardown_ports(dst); 1027 1028 return err; 1029} 1030 1031static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) 1032{ 1033 struct dsa_port *dp; 1034 int err = 0; 1035 1036 list_for_each_entry(dp, &dst->ports, list) { 1037 err = dsa_switch_setup(dp->ds); 1038 if (err) { 1039 dsa_tree_teardown_switches(dst); 1040 break; 1041 } 1042 } 1043 1044 return err; 1045} 1046 1047static int dsa_tree_setup_master(struct dsa_switch_tree *dst) 1048{ 1049 struct dsa_port *dp; 1050 int err = 0; 1051 1052 rtnl_lock(); 1053 1054 list_for_each_entry(dp, &dst->ports, list) { 1055 if (dsa_port_is_cpu(dp)) { 1056 struct net_device *master = dp->master; 1057 bool admin_up = (master->flags & IFF_UP) && 1058 !qdisc_tx_is_noop(master); 1059 1060 err = dsa_master_setup(master, dp); 1061 if (err) 1062 break; 1063 1064 /* Replay master state event */ 1065 dsa_tree_master_admin_state_change(dst, master, admin_up); 1066 dsa_tree_master_oper_state_change(dst, master, 1067 netif_oper_up(master)); 1068 } 1069 } 1070 1071 rtnl_unlock(); 1072 1073 return err; 1074} 1075 1076static void dsa_tree_teardown_master(struct dsa_switch_tree *dst) 1077{ 1078 struct dsa_port *dp; 1079 1080 rtnl_lock(); 1081 1082 list_for_each_entry(dp, &dst->ports, list) { 1083 if (dsa_port_is_cpu(dp)) { 1084 struct net_device *master = dp->master; 1085 1086 /* Synthesizing an "admin down" state is sufficient for 1087 * the switches to get a notification if the master is 1088 * currently up and running. 1089 */ 1090 dsa_tree_master_admin_state_change(dst, master, false); 1091 1092 dsa_master_teardown(master); 1093 } 1094 } 1095 1096 rtnl_unlock(); 1097} 1098 1099static int dsa_tree_setup_lags(struct dsa_switch_tree *dst) 1100{ 1101 unsigned int len = 0; 1102 struct dsa_port *dp; 1103 1104 list_for_each_entry(dp, &dst->ports, list) { 1105 if (dp->ds->num_lag_ids > len) 1106 len = dp->ds->num_lag_ids; 1107 } 1108 1109 if (!len) 1110 return 0; 1111 1112 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL); 1113 if (!dst->lags) 1114 return -ENOMEM; 1115 1116 dst->lags_len = len; 1117 return 0; 1118} 1119 1120static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst) 1121{ 1122 kfree(dst->lags); 1123} 1124 1125static int dsa_tree_setup(struct dsa_switch_tree *dst) 1126{ 1127 bool complete; 1128 int err; 1129 1130 if (dst->setup) { 1131 pr_err("DSA: tree %d already setup! Disjoint trees?\n", 1132 dst->index); 1133 return -EEXIST; 1134 } 1135 1136 complete = dsa_tree_setup_routing_table(dst); 1137 if (!complete) 1138 return 0; 1139 1140 err = dsa_tree_setup_cpu_ports(dst); 1141 if (err) 1142 return err; 1143 1144 err = dsa_tree_setup_switches(dst); 1145 if (err) 1146 goto teardown_cpu_ports; 1147 1148 err = dsa_tree_setup_ports(dst); 1149 if (err) 1150 goto teardown_switches; 1151 1152 err = dsa_tree_setup_master(dst); 1153 if (err) 1154 goto teardown_ports; 1155 1156 err = dsa_tree_setup_lags(dst); 1157 if (err) 1158 goto teardown_master; 1159 1160 dst->setup = true; 1161 1162 pr_info("DSA: tree %d setup\n", dst->index); 1163 1164 return 0; 1165 1166teardown_master: 1167 dsa_tree_teardown_master(dst); 1168teardown_ports: 1169 dsa_tree_teardown_ports(dst); 1170teardown_switches: 1171 dsa_tree_teardown_switches(dst); 1172teardown_cpu_ports: 1173 dsa_tree_teardown_cpu_ports(dst); 1174 1175 return err; 1176} 1177 1178static void dsa_tree_teardown(struct dsa_switch_tree *dst) 1179{ 1180 struct dsa_link *dl, *next; 1181 1182 if (!dst->setup) 1183 return; 1184 1185 dsa_tree_teardown_lags(dst); 1186 1187 dsa_tree_teardown_master(dst); 1188 1189 dsa_tree_teardown_ports(dst); 1190 1191 dsa_tree_teardown_switches(dst); 1192 1193 dsa_tree_teardown_cpu_ports(dst); 1194 1195 list_for_each_entry_safe(dl, next, &dst->rtable, list) { 1196 list_del(&dl->list); 1197 kfree(dl); 1198 } 1199 1200 pr_info("DSA: tree %d torn down\n", dst->index); 1201 1202 dst->setup = false; 1203} 1204 1205static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst, 1206 const struct dsa_device_ops *tag_ops) 1207{ 1208 const struct dsa_device_ops *old_tag_ops = dst->tag_ops; 1209 struct dsa_notifier_tag_proto_info info; 1210 int err; 1211 1212 dst->tag_ops = tag_ops; 1213 1214 /* Notify the switches from this tree about the connection 1215 * to the new tagger 1216 */ 1217 info.tag_ops = tag_ops; 1218 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info); 1219 if (err && err != -EOPNOTSUPP) 1220 goto out_disconnect; 1221 1222 /* Notify the old tagger about the disconnection from this tree */ 1223 info.tag_ops = old_tag_ops; 1224 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); 1225 1226 return 0; 1227 1228out_disconnect: 1229 info.tag_ops = tag_ops; 1230 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info); 1231 dst->tag_ops = old_tag_ops; 1232 1233 return err; 1234} 1235 1236/* Since the dsa/tagging sysfs device attribute is per master, the assumption 1237 * is that all DSA switches within a tree share the same tagger, otherwise 1238 * they would have formed disjoint trees (different "dsa,member" values). 1239 */ 1240int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, 1241 struct net_device *master, 1242 const struct dsa_device_ops *tag_ops, 1243 const struct dsa_device_ops *old_tag_ops) 1244{ 1245 struct dsa_notifier_tag_proto_info info; 1246 struct dsa_port *dp; 1247 int err = -EBUSY; 1248 1249 if (!rtnl_trylock()) 1250 return restart_syscall(); 1251 1252 /* At the moment we don't allow changing the tag protocol under 1253 * traffic. The rtnl_mutex also happens to serialize concurrent 1254 * attempts to change the tagging protocol. If we ever lift the IFF_UP 1255 * restriction, there needs to be another mutex which serializes this. 1256 */ 1257 if (master->flags & IFF_UP) 1258 goto out_unlock; 1259 1260 list_for_each_entry(dp, &dst->ports, list) { 1261 if (!dsa_port_is_user(dp)) 1262 continue; 1263 1264 if (dp->slave->flags & IFF_UP) 1265 goto out_unlock; 1266 } 1267 1268 /* Notify the tag protocol change */ 1269 info.tag_ops = tag_ops; 1270 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); 1271 if (err) 1272 goto out_unwind_tagger; 1273 1274 err = dsa_tree_bind_tag_proto(dst, tag_ops); 1275 if (err) 1276 goto out_unwind_tagger; 1277 1278 rtnl_unlock(); 1279 1280 return 0; 1281 1282out_unwind_tagger: 1283 info.tag_ops = old_tag_ops; 1284 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info); 1285out_unlock: 1286 rtnl_unlock(); 1287 return err; 1288} 1289 1290static void dsa_tree_master_state_change(struct dsa_switch_tree *dst, 1291 struct net_device *master) 1292{ 1293 struct dsa_notifier_master_state_info info; 1294 struct dsa_port *cpu_dp = master->dsa_ptr; 1295 1296 info.master = master; 1297 info.operational = dsa_port_master_is_operational(cpu_dp); 1298 1299 dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info); 1300} 1301 1302void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, 1303 struct net_device *master, 1304 bool up) 1305{ 1306 struct dsa_port *cpu_dp = master->dsa_ptr; 1307 bool notify = false; 1308 1309 if ((dsa_port_master_is_operational(cpu_dp)) != 1310 (up && cpu_dp->master_oper_up)) 1311 notify = true; 1312 1313 cpu_dp->master_admin_up = up; 1314 1315 if (notify) 1316 dsa_tree_master_state_change(dst, master); 1317} 1318 1319void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst, 1320 struct net_device *master, 1321 bool up) 1322{ 1323 struct dsa_port *cpu_dp = master->dsa_ptr; 1324 bool notify = false; 1325 1326 if ((dsa_port_master_is_operational(cpu_dp)) != 1327 (cpu_dp->master_admin_up && up)) 1328 notify = true; 1329 1330 cpu_dp->master_oper_up = up; 1331 1332 if (notify) 1333 dsa_tree_master_state_change(dst, master); 1334} 1335 1336static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) 1337{ 1338 struct dsa_switch_tree *dst = ds->dst; 1339 struct dsa_port *dp; 1340 1341 dsa_switch_for_each_port(dp, ds) 1342 if (dp->index == index) 1343 return dp; 1344 1345 dp = kzalloc(sizeof(*dp), GFP_KERNEL); 1346 if (!dp) 1347 return NULL; 1348 1349 dp->ds = ds; 1350 dp->index = index; 1351 1352 mutex_init(&dp->addr_lists_lock); 1353 mutex_init(&dp->vlans_lock); 1354 INIT_LIST_HEAD(&dp->fdbs); 1355 INIT_LIST_HEAD(&dp->mdbs); 1356 INIT_LIST_HEAD(&dp->vlans); 1357 INIT_LIST_HEAD(&dp->list); 1358 list_add_tail(&dp->list, &dst->ports); 1359 1360 return dp; 1361} 1362 1363static int dsa_port_parse_user(struct dsa_port *dp, const char *name) 1364{ 1365 if (!name) 1366 name = "eth%d"; 1367 1368 dp->type = DSA_PORT_TYPE_USER; 1369 dp->name = name; 1370 1371 return 0; 1372} 1373 1374static int dsa_port_parse_dsa(struct dsa_port *dp) 1375{ 1376 dp->type = DSA_PORT_TYPE_DSA; 1377 1378 return 0; 1379} 1380 1381static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp, 1382 struct net_device *master) 1383{ 1384 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE; 1385 struct dsa_switch *mds, *ds = dp->ds; 1386 unsigned int mdp_upstream; 1387 struct dsa_port *mdp; 1388 1389 /* It is possible to stack DSA switches onto one another when that 1390 * happens the switch driver may want to know if its tagging protocol 1391 * is going to work in such a configuration. 1392 */ 1393 if (dsa_slave_dev_check(master)) { 1394 mdp = dsa_slave_to_port(master); 1395 mds = mdp->ds; 1396 mdp_upstream = dsa_upstream_port(mds, mdp->index); 1397 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream, 1398 DSA_TAG_PROTO_NONE); 1399 } 1400 1401 /* If the master device is not itself a DSA slave in a disjoint DSA 1402 * tree, then return immediately. 1403 */ 1404 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol); 1405} 1406 1407static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master, 1408 const char *user_protocol) 1409{ 1410 struct dsa_switch *ds = dp->ds; 1411 struct dsa_switch_tree *dst = ds->dst; 1412 const struct dsa_device_ops *tag_ops; 1413 enum dsa_tag_protocol default_proto; 1414 1415 /* Find out which protocol the switch would prefer. */ 1416 default_proto = dsa_get_tag_protocol(dp, master); 1417 if (dst->default_proto) { 1418 if (dst->default_proto != default_proto) { 1419 dev_err(ds->dev, 1420 "A DSA switch tree can have only one tagging protocol\n"); 1421 return -EINVAL; 1422 } 1423 } else { 1424 dst->default_proto = default_proto; 1425 } 1426 1427 /* See if the user wants to override that preference. */ 1428 if (user_protocol) { 1429 if (!ds->ops->change_tag_protocol) { 1430 dev_err(ds->dev, "Tag protocol cannot be modified\n"); 1431 return -EINVAL; 1432 } 1433 1434 tag_ops = dsa_find_tagger_by_name(user_protocol); 1435 } else { 1436 tag_ops = dsa_tag_driver_get(default_proto); 1437 } 1438 1439 if (IS_ERR(tag_ops)) { 1440 if (PTR_ERR(tag_ops) == -ENOPROTOOPT) 1441 return -EPROBE_DEFER; 1442 1443 dev_warn(ds->dev, "No tagger for this switch\n"); 1444 return PTR_ERR(tag_ops); 1445 } 1446 1447 if (dst->tag_ops) { 1448 if (dst->tag_ops != tag_ops) { 1449 dev_err(ds->dev, 1450 "A DSA switch tree can have only one tagging protocol\n"); 1451 1452 dsa_tag_driver_put(tag_ops); 1453 return -EINVAL; 1454 } 1455 1456 /* In the case of multiple CPU ports per switch, the tagging 1457 * protocol is still reference-counted only per switch tree. 1458 */ 1459 dsa_tag_driver_put(tag_ops); 1460 } else { 1461 dst->tag_ops = tag_ops; 1462 } 1463 1464 dp->master = master; 1465 dp->type = DSA_PORT_TYPE_CPU; 1466 dsa_port_set_tag_protocol(dp, dst->tag_ops); 1467 dp->dst = dst; 1468 1469 /* At this point, the tree may be configured to use a different 1470 * tagger than the one chosen by the switch driver during 1471 * .setup, in the case when a user selects a custom protocol 1472 * through the DT. 1473 * 1474 * This is resolved by syncing the driver with the tree in 1475 * dsa_switch_setup_tag_protocol once .setup has run and the 1476 * driver is ready to accept calls to .change_tag_protocol. If 1477 * the driver does not support the custom protocol at that 1478 * point, the tree is wholly rejected, thereby ensuring that the 1479 * tree and driver are always in agreement on the protocol to 1480 * use. 1481 */ 1482 return 0; 1483} 1484 1485static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) 1486{ 1487 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0); 1488 const char *name = of_get_property(dn, "label", NULL); 1489 bool link = of_property_read_bool(dn, "link"); 1490 1491 dp->dn = dn; 1492 1493 if (ethernet) { 1494 struct net_device *master; 1495 const char *user_protocol; 1496 1497 master = of_find_net_device_by_node(ethernet); 1498 of_node_put(ethernet); 1499 if (!master) 1500 return -EPROBE_DEFER; 1501 1502 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL); 1503 return dsa_port_parse_cpu(dp, master, user_protocol); 1504 } 1505 1506 if (link) 1507 return dsa_port_parse_dsa(dp); 1508 1509 return dsa_port_parse_user(dp, name); 1510} 1511 1512static int dsa_switch_parse_ports_of(struct dsa_switch *ds, 1513 struct device_node *dn) 1514{ 1515 struct device_node *ports, *port; 1516 struct dsa_port *dp; 1517 int err = 0; 1518 u32 reg; 1519 1520 ports = of_get_child_by_name(dn, "ports"); 1521 if (!ports) { 1522 /* The second possibility is "ethernet-ports" */ 1523 ports = of_get_child_by_name(dn, "ethernet-ports"); 1524 if (!ports) { 1525 dev_err(ds->dev, "no ports child node found\n"); 1526 return -EINVAL; 1527 } 1528 } 1529 1530 for_each_available_child_of_node(ports, port) { 1531 err = of_property_read_u32(port, "reg", ®); 1532 if (err) { 1533 of_node_put(port); 1534 goto out_put_node; 1535 } 1536 1537 if (reg >= ds->num_ports) { 1538 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n", 1539 port, reg, ds->num_ports); 1540 of_node_put(port); 1541 err = -EINVAL; 1542 goto out_put_node; 1543 } 1544 1545 dp = dsa_to_port(ds, reg); 1546 1547 err = dsa_port_parse_of(dp, port); 1548 if (err) { 1549 of_node_put(port); 1550 goto out_put_node; 1551 } 1552 } 1553 1554out_put_node: 1555 of_node_put(ports); 1556 return err; 1557} 1558 1559static int dsa_switch_parse_member_of(struct dsa_switch *ds, 1560 struct device_node *dn) 1561{ 1562 u32 m[2] = { 0, 0 }; 1563 int sz; 1564 1565 /* Don't error out if this optional property isn't found */ 1566 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2); 1567 if (sz < 0 && sz != -EINVAL) 1568 return sz; 1569 1570 ds->index = m[1]; 1571 1572 ds->dst = dsa_tree_touch(m[0]); 1573 if (!ds->dst) 1574 return -ENOMEM; 1575 1576 if (dsa_switch_find(ds->dst->index, ds->index)) { 1577 dev_err(ds->dev, 1578 "A DSA switch with index %d already exists in tree %d\n", 1579 ds->index, ds->dst->index); 1580 return -EEXIST; 1581 } 1582 1583 if (ds->dst->last_switch < ds->index) 1584 ds->dst->last_switch = ds->index; 1585 1586 return 0; 1587} 1588 1589static int dsa_switch_touch_ports(struct dsa_switch *ds) 1590{ 1591 struct dsa_port *dp; 1592 int port; 1593 1594 for (port = 0; port < ds->num_ports; port++) { 1595 dp = dsa_port_touch(ds, port); 1596 if (!dp) 1597 return -ENOMEM; 1598 } 1599 1600 return 0; 1601} 1602 1603static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn) 1604{ 1605 int err; 1606 1607 err = dsa_switch_parse_member_of(ds, dn); 1608 if (err) 1609 return err; 1610 1611 err = dsa_switch_touch_ports(ds); 1612 if (err) 1613 return err; 1614 1615 return dsa_switch_parse_ports_of(ds, dn); 1616} 1617 1618static int dsa_port_parse(struct dsa_port *dp, const char *name, 1619 struct device *dev) 1620{ 1621 if (!strcmp(name, "cpu")) { 1622 struct net_device *master; 1623 1624 master = dsa_dev_to_net_device(dev); 1625 if (!master) 1626 return -EPROBE_DEFER; 1627 1628 dev_put(master); 1629 1630 return dsa_port_parse_cpu(dp, master, NULL); 1631 } 1632 1633 if (!strcmp(name, "dsa")) 1634 return dsa_port_parse_dsa(dp); 1635 1636 return dsa_port_parse_user(dp, name); 1637} 1638 1639static int dsa_switch_parse_ports(struct dsa_switch *ds, 1640 struct dsa_chip_data *cd) 1641{ 1642 bool valid_name_found = false; 1643 struct dsa_port *dp; 1644 struct device *dev; 1645 const char *name; 1646 unsigned int i; 1647 int err; 1648 1649 for (i = 0; i < DSA_MAX_PORTS; i++) { 1650 name = cd->port_names[i]; 1651 dev = cd->netdev[i]; 1652 dp = dsa_to_port(ds, i); 1653 1654 if (!name) 1655 continue; 1656 1657 err = dsa_port_parse(dp, name, dev); 1658 if (err) 1659 return err; 1660 1661 valid_name_found = true; 1662 } 1663 1664 if (!valid_name_found && i == DSA_MAX_PORTS) 1665 return -EINVAL; 1666 1667 return 0; 1668} 1669 1670static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd) 1671{ 1672 int err; 1673 1674 ds->cd = cd; 1675 1676 /* We don't support interconnected switches nor multiple trees via 1677 * platform data, so this is the unique switch of the tree. 1678 */ 1679 ds->index = 0; 1680 ds->dst = dsa_tree_touch(0); 1681 if (!ds->dst) 1682 return -ENOMEM; 1683 1684 err = dsa_switch_touch_ports(ds); 1685 if (err) 1686 return err; 1687 1688 return dsa_switch_parse_ports(ds, cd); 1689} 1690 1691static void dsa_switch_release_ports(struct dsa_switch *ds) 1692{ 1693 struct dsa_port *dp, *next; 1694 1695 dsa_switch_for_each_port_safe(dp, next, ds) { 1696 WARN_ON(!list_empty(&dp->fdbs)); 1697 WARN_ON(!list_empty(&dp->mdbs)); 1698 WARN_ON(!list_empty(&dp->vlans)); 1699 list_del(&dp->list); 1700 kfree(dp); 1701 } 1702} 1703 1704static int dsa_switch_probe(struct dsa_switch *ds) 1705{ 1706 struct dsa_switch_tree *dst; 1707 struct dsa_chip_data *pdata; 1708 struct device_node *np; 1709 int err; 1710 1711 if (!ds->dev) 1712 return -ENODEV; 1713 1714 pdata = ds->dev->platform_data; 1715 np = ds->dev->of_node; 1716 1717 if (!ds->num_ports) 1718 return -EINVAL; 1719 1720 if (np) { 1721 err = dsa_switch_parse_of(ds, np); 1722 if (err) 1723 dsa_switch_release_ports(ds); 1724 } else if (pdata) { 1725 err = dsa_switch_parse(ds, pdata); 1726 if (err) 1727 dsa_switch_release_ports(ds); 1728 } else { 1729 err = -ENODEV; 1730 } 1731 1732 if (err) 1733 return err; 1734 1735 dst = ds->dst; 1736 dsa_tree_get(dst); 1737 err = dsa_tree_setup(dst); 1738 if (err) { 1739 dsa_switch_release_ports(ds); 1740 dsa_tree_put(dst); 1741 } 1742 1743 return err; 1744} 1745 1746int dsa_register_switch(struct dsa_switch *ds) 1747{ 1748 int err; 1749 1750 mutex_lock(&dsa2_mutex); 1751 err = dsa_switch_probe(ds); 1752 dsa_tree_put(ds->dst); 1753 mutex_unlock(&dsa2_mutex); 1754 1755 return err; 1756} 1757EXPORT_SYMBOL_GPL(dsa_register_switch); 1758 1759static void dsa_switch_remove(struct dsa_switch *ds) 1760{ 1761 struct dsa_switch_tree *dst = ds->dst; 1762 1763 dsa_tree_teardown(dst); 1764 dsa_switch_release_ports(ds); 1765 dsa_tree_put(dst); 1766} 1767 1768void dsa_unregister_switch(struct dsa_switch *ds) 1769{ 1770 mutex_lock(&dsa2_mutex); 1771 dsa_switch_remove(ds); 1772 mutex_unlock(&dsa2_mutex); 1773} 1774EXPORT_SYMBOL_GPL(dsa_unregister_switch); 1775 1776/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is 1777 * blocking that operation from completion, due to the dev_hold taken inside 1778 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of 1779 * the DSA master, so that the system can reboot successfully. 1780 */ 1781void dsa_switch_shutdown(struct dsa_switch *ds) 1782{ 1783 struct net_device *master, *slave_dev; 1784 struct dsa_port *dp; 1785 1786 mutex_lock(&dsa2_mutex); 1787 1788 if (!ds->setup) 1789 goto out; 1790 1791 rtnl_lock(); 1792 1793 dsa_switch_for_each_user_port(dp, ds) { 1794 master = dp->cpu_dp->master; 1795 slave_dev = dp->slave; 1796 1797 netdev_upper_dev_unlink(master, slave_dev); 1798 } 1799 1800 /* Disconnect from further netdevice notifiers on the master, 1801 * since netdev_uses_dsa() will now return false. 1802 */ 1803 dsa_switch_for_each_cpu_port(dp, ds) 1804 dp->master->dsa_ptr = NULL; 1805 1806 rtnl_unlock(); 1807out: 1808 mutex_unlock(&dsa2_mutex); 1809} 1810EXPORT_SYMBOL_GPL(dsa_switch_shutdown);