dsa.c (13014B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * net/dsa/dsa.c - Hardware switch handling 4 * Copyright (c) 2008-2009 Marvell Semiconductor 5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org> 6 */ 7 8#include <linux/device.h> 9#include <linux/list.h> 10#include <linux/module.h> 11#include <linux/netdevice.h> 12#include <linux/sysfs.h> 13#include <linux/ptp_classify.h> 14 15#include "dsa_priv.h" 16 17static LIST_HEAD(dsa_tag_drivers_list); 18static DEFINE_MUTEX(dsa_tag_drivers_lock); 19 20static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb, 21 struct net_device *dev) 22{ 23 /* Just return the original SKB */ 24 return skb; 25} 26 27static const struct dsa_device_ops none_ops = { 28 .name = "none", 29 .proto = DSA_TAG_PROTO_NONE, 30 .xmit = dsa_slave_notag_xmit, 31 .rcv = NULL, 32}; 33 34DSA_TAG_DRIVER(none_ops); 35 36static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver, 37 struct module *owner) 38{ 39 dsa_tag_driver->owner = owner; 40 41 mutex_lock(&dsa_tag_drivers_lock); 42 list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list); 43 mutex_unlock(&dsa_tag_drivers_lock); 44} 45 46void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[], 47 unsigned int count, struct module *owner) 48{ 49 unsigned int i; 50 51 for (i = 0; i < count; i++) 52 dsa_tag_driver_register(dsa_tag_driver_array[i], owner); 53} 54 55static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver) 56{ 57 mutex_lock(&dsa_tag_drivers_lock); 58 list_del(&dsa_tag_driver->list); 59 mutex_unlock(&dsa_tag_drivers_lock); 60} 61EXPORT_SYMBOL_GPL(dsa_tag_drivers_register); 62 63void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[], 64 unsigned int count) 65{ 66 unsigned int i; 67 68 for (i = 0; i < count; i++) 69 dsa_tag_driver_unregister(dsa_tag_driver_array[i]); 70} 71EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister); 72 73const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops) 74{ 75 return ops->name; 76}; 77 78/* Function takes a reference on the module owning the tagger, 79 * so dsa_tag_driver_put must be called afterwards. 80 */ 81const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf) 82{ 83 const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT); 84 struct dsa_tag_driver *dsa_tag_driver; 85 86 mutex_lock(&dsa_tag_drivers_lock); 87 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { 88 const struct dsa_device_ops *tmp = dsa_tag_driver->ops; 89 90 if (!sysfs_streq(buf, tmp->name)) 91 continue; 92 93 if (!try_module_get(dsa_tag_driver->owner)) 94 break; 95 96 ops = tmp; 97 break; 98 } 99 mutex_unlock(&dsa_tag_drivers_lock); 100 101 return ops; 102} 103 104const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol) 105{ 106 struct dsa_tag_driver *dsa_tag_driver; 107 const struct dsa_device_ops *ops; 108 bool found = false; 109 110 request_module("%s%d", DSA_TAG_DRIVER_ALIAS, tag_protocol); 111 112 mutex_lock(&dsa_tag_drivers_lock); 113 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { 114 ops = dsa_tag_driver->ops; 115 if (ops->proto == tag_protocol) { 116 found = true; 117 break; 118 } 119 } 120 121 if (found) { 122 if (!try_module_get(dsa_tag_driver->owner)) 123 ops = ERR_PTR(-ENOPROTOOPT); 124 } else { 125 ops = ERR_PTR(-ENOPROTOOPT); 126 } 127 128 mutex_unlock(&dsa_tag_drivers_lock); 129 130 return ops; 131} 132 133void dsa_tag_driver_put(const struct dsa_device_ops *ops) 134{ 135 struct dsa_tag_driver *dsa_tag_driver; 136 137 mutex_lock(&dsa_tag_drivers_lock); 138 list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { 139 if (dsa_tag_driver->ops == ops) { 140 module_put(dsa_tag_driver->owner); 141 break; 142 } 143 } 144 mutex_unlock(&dsa_tag_drivers_lock); 145} 146 147static int dev_is_class(struct device *dev, void *class) 148{ 149 if (dev->class != NULL && !strcmp(dev->class->name, class)) 150 return 1; 151 152 return 0; 153} 154 155static struct device *dev_find_class(struct device *parent, char *class) 156{ 157 if (dev_is_class(parent, class)) { 158 get_device(parent); 159 return parent; 160 } 161 162 return device_find_child(parent, class, dev_is_class); 163} 164 165struct net_device *dsa_dev_to_net_device(struct device *dev) 166{ 167 struct device *d; 168 169 d = dev_find_class(dev, "net"); 170 if (d != NULL) { 171 struct net_device *nd; 172 173 nd = to_net_dev(d); 174 dev_hold(nd); 175 put_device(d); 176 177 return nd; 178 } 179 180 return NULL; 181} 182EXPORT_SYMBOL_GPL(dsa_dev_to_net_device); 183 184/* Determine if we should defer delivery of skb until we have a rx timestamp. 185 * 186 * Called from dsa_switch_rcv. For now, this will only work if tagging is 187 * enabled on the switch. Normally the MAC driver would retrieve the hardware 188 * timestamp when it reads the packet out of the hardware. However in a DSA 189 * switch, the DSA driver owning the interface to which the packet is 190 * delivered is never notified unless we do so here. 191 */ 192static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p, 193 struct sk_buff *skb) 194{ 195 struct dsa_switch *ds = p->dp->ds; 196 unsigned int type; 197 198 if (skb_headroom(skb) < ETH_HLEN) 199 return false; 200 201 __skb_push(skb, ETH_HLEN); 202 203 type = ptp_classify_raw(skb); 204 205 __skb_pull(skb, ETH_HLEN); 206 207 if (type == PTP_CLASS_NONE) 208 return false; 209 210 if (likely(ds->ops->port_rxtstamp)) 211 return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type); 212 213 return false; 214} 215 216static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, 217 struct packet_type *pt, struct net_device *unused) 218{ 219 struct dsa_port *cpu_dp = dev->dsa_ptr; 220 struct sk_buff *nskb = NULL; 221 struct dsa_slave_priv *p; 222 223 if (unlikely(!cpu_dp)) { 224 kfree_skb(skb); 225 return 0; 226 } 227 228 skb = skb_unshare(skb, GFP_ATOMIC); 229 if (!skb) 230 return 0; 231 232 nskb = cpu_dp->rcv(skb, dev); 233 if (!nskb) { 234 kfree_skb(skb); 235 return 0; 236 } 237 238 skb = nskb; 239 skb_push(skb, ETH_HLEN); 240 skb->pkt_type = PACKET_HOST; 241 skb->protocol = eth_type_trans(skb, skb->dev); 242 243 if (unlikely(!dsa_slave_dev_check(skb->dev))) { 244 /* Packet is to be injected directly on an upper 245 * device, e.g. a team/bond, so skip all DSA-port 246 * specific actions. 247 */ 248 netif_rx(skb); 249 return 0; 250 } 251 252 p = netdev_priv(skb->dev); 253 254 if (unlikely(cpu_dp->ds->untag_bridge_pvid)) { 255 nskb = dsa_untag_bridge_pvid(skb); 256 if (!nskb) { 257 kfree_skb(skb); 258 return 0; 259 } 260 skb = nskb; 261 } 262 263 dev_sw_netstats_rx_add(skb->dev, skb->len); 264 265 if (dsa_skb_defer_rx_timestamp(p, skb)) 266 return 0; 267 268 gro_cells_receive(&p->gcells, skb); 269 270 return 0; 271} 272 273#ifdef CONFIG_PM_SLEEP 274static bool dsa_port_is_initialized(const struct dsa_port *dp) 275{ 276 return dp->type == DSA_PORT_TYPE_USER && dp->slave; 277} 278 279int dsa_switch_suspend(struct dsa_switch *ds) 280{ 281 struct dsa_port *dp; 282 int ret = 0; 283 284 /* Suspend slave network devices */ 285 dsa_switch_for_each_port(dp, ds) { 286 if (!dsa_port_is_initialized(dp)) 287 continue; 288 289 ret = dsa_slave_suspend(dp->slave); 290 if (ret) 291 return ret; 292 } 293 294 if (ds->ops->suspend) 295 ret = ds->ops->suspend(ds); 296 297 return ret; 298} 299EXPORT_SYMBOL_GPL(dsa_switch_suspend); 300 301int dsa_switch_resume(struct dsa_switch *ds) 302{ 303 struct dsa_port *dp; 304 int ret = 0; 305 306 if (ds->ops->resume) 307 ret = ds->ops->resume(ds); 308 309 if (ret) 310 return ret; 311 312 /* Resume slave network devices */ 313 dsa_switch_for_each_port(dp, ds) { 314 if (!dsa_port_is_initialized(dp)) 315 continue; 316 317 ret = dsa_slave_resume(dp->slave); 318 if (ret) 319 return ret; 320 } 321 322 return 0; 323} 324EXPORT_SYMBOL_GPL(dsa_switch_resume); 325#endif 326 327static struct packet_type dsa_pack_type __read_mostly = { 328 .type = cpu_to_be16(ETH_P_XDSA), 329 .func = dsa_switch_rcv, 330}; 331 332static struct workqueue_struct *dsa_owq; 333 334bool dsa_schedule_work(struct work_struct *work) 335{ 336 return queue_work(dsa_owq, work); 337} 338 339void dsa_flush_workqueue(void) 340{ 341 flush_workqueue(dsa_owq); 342} 343EXPORT_SYMBOL_GPL(dsa_flush_workqueue); 344 345int dsa_devlink_param_get(struct devlink *dl, u32 id, 346 struct devlink_param_gset_ctx *ctx) 347{ 348 struct dsa_switch *ds = dsa_devlink_to_ds(dl); 349 350 if (!ds->ops->devlink_param_get) 351 return -EOPNOTSUPP; 352 353 return ds->ops->devlink_param_get(ds, id, ctx); 354} 355EXPORT_SYMBOL_GPL(dsa_devlink_param_get); 356 357int dsa_devlink_param_set(struct devlink *dl, u32 id, 358 struct devlink_param_gset_ctx *ctx) 359{ 360 struct dsa_switch *ds = dsa_devlink_to_ds(dl); 361 362 if (!ds->ops->devlink_param_set) 363 return -EOPNOTSUPP; 364 365 return ds->ops->devlink_param_set(ds, id, ctx); 366} 367EXPORT_SYMBOL_GPL(dsa_devlink_param_set); 368 369int dsa_devlink_params_register(struct dsa_switch *ds, 370 const struct devlink_param *params, 371 size_t params_count) 372{ 373 return devlink_params_register(ds->devlink, params, params_count); 374} 375EXPORT_SYMBOL_GPL(dsa_devlink_params_register); 376 377void dsa_devlink_params_unregister(struct dsa_switch *ds, 378 const struct devlink_param *params, 379 size_t params_count) 380{ 381 devlink_params_unregister(ds->devlink, params, params_count); 382} 383EXPORT_SYMBOL_GPL(dsa_devlink_params_unregister); 384 385int dsa_devlink_resource_register(struct dsa_switch *ds, 386 const char *resource_name, 387 u64 resource_size, 388 u64 resource_id, 389 u64 parent_resource_id, 390 const struct devlink_resource_size_params *size_params) 391{ 392 return devlink_resource_register(ds->devlink, resource_name, 393 resource_size, resource_id, 394 parent_resource_id, 395 size_params); 396} 397EXPORT_SYMBOL_GPL(dsa_devlink_resource_register); 398 399void dsa_devlink_resources_unregister(struct dsa_switch *ds) 400{ 401 devlink_resources_unregister(ds->devlink); 402} 403EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister); 404 405void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds, 406 u64 resource_id, 407 devlink_resource_occ_get_t *occ_get, 408 void *occ_get_priv) 409{ 410 return devlink_resource_occ_get_register(ds->devlink, resource_id, 411 occ_get, occ_get_priv); 412} 413EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_register); 414 415void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds, 416 u64 resource_id) 417{ 418 devlink_resource_occ_get_unregister(ds->devlink, resource_id); 419} 420EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister); 421 422struct devlink_region * 423dsa_devlink_region_create(struct dsa_switch *ds, 424 const struct devlink_region_ops *ops, 425 u32 region_max_snapshots, u64 region_size) 426{ 427 return devlink_region_create(ds->devlink, ops, region_max_snapshots, 428 region_size); 429} 430EXPORT_SYMBOL_GPL(dsa_devlink_region_create); 431 432struct devlink_region * 433dsa_devlink_port_region_create(struct dsa_switch *ds, 434 int port, 435 const struct devlink_port_region_ops *ops, 436 u32 region_max_snapshots, u64 region_size) 437{ 438 struct dsa_port *dp = dsa_to_port(ds, port); 439 440 return devlink_port_region_create(&dp->devlink_port, ops, 441 region_max_snapshots, 442 region_size); 443} 444EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create); 445 446void dsa_devlink_region_destroy(struct devlink_region *region) 447{ 448 devlink_region_destroy(region); 449} 450EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy); 451 452struct dsa_port *dsa_port_from_netdev(struct net_device *netdev) 453{ 454 if (!netdev || !dsa_slave_dev_check(netdev)) 455 return ERR_PTR(-ENODEV); 456 457 return dsa_slave_to_port(netdev); 458} 459EXPORT_SYMBOL_GPL(dsa_port_from_netdev); 460 461bool dsa_db_equal(const struct dsa_db *a, const struct dsa_db *b) 462{ 463 if (a->type != b->type) 464 return false; 465 466 switch (a->type) { 467 case DSA_DB_PORT: 468 return a->dp == b->dp; 469 case DSA_DB_LAG: 470 return a->lag.dev == b->lag.dev; 471 case DSA_DB_BRIDGE: 472 return a->bridge.num == b->bridge.num; 473 default: 474 WARN_ON(1); 475 return false; 476 } 477} 478 479bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port, 480 const unsigned char *addr, u16 vid, 481 struct dsa_db db) 482{ 483 struct dsa_port *dp = dsa_to_port(ds, port); 484 struct dsa_mac_addr *a; 485 486 lockdep_assert_held(&dp->addr_lists_lock); 487 488 list_for_each_entry(a, &dp->fdbs, list) { 489 if (!ether_addr_equal(a->addr, addr) || a->vid != vid) 490 continue; 491 492 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) 493 return true; 494 } 495 496 return false; 497} 498EXPORT_SYMBOL_GPL(dsa_fdb_present_in_other_db); 499 500bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, 501 const struct switchdev_obj_port_mdb *mdb, 502 struct dsa_db db) 503{ 504 struct dsa_port *dp = dsa_to_port(ds, port); 505 struct dsa_mac_addr *a; 506 507 lockdep_assert_held(&dp->addr_lists_lock); 508 509 list_for_each_entry(a, &dp->mdbs, list) { 510 if (!ether_addr_equal(a->addr, mdb->addr) || a->vid != mdb->vid) 511 continue; 512 513 if (a->db.type == db.type && !dsa_db_equal(&a->db, &db)) 514 return true; 515 } 516 517 return false; 518} 519EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db); 520 521static int __init dsa_init_module(void) 522{ 523 int rc; 524 525 dsa_owq = alloc_ordered_workqueue("dsa_ordered", 526 WQ_MEM_RECLAIM); 527 if (!dsa_owq) 528 return -ENOMEM; 529 530 rc = dsa_slave_register_notifier(); 531 if (rc) 532 goto register_notifier_fail; 533 534 dev_add_pack(&dsa_pack_type); 535 536 dsa_tag_driver_register(&DSA_TAG_DRIVER_NAME(none_ops), 537 THIS_MODULE); 538 539 return 0; 540 541register_notifier_fail: 542 destroy_workqueue(dsa_owq); 543 544 return rc; 545} 546module_init(dsa_init_module); 547 548static void __exit dsa_cleanup_module(void) 549{ 550 dsa_tag_driver_unregister(&DSA_TAG_DRIVER_NAME(none_ops)); 551 552 dsa_slave_unregister_notifier(); 553 dev_remove_pack(&dsa_pack_type); 554 destroy_workqueue(dsa_owq); 555} 556module_exit(dsa_cleanup_module); 557 558MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); 559MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips"); 560MODULE_LICENSE("GPL"); 561MODULE_ALIAS("platform:dsa");