usnic_ib_main.c (19800B)
1/* 2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * Author: Upinder Malhi <umalhi@cisco.com> 33 * Author: Anant Deepak <anadeepa@cisco.com> 34 * Author: Cesare Cantu' <cantuc@cisco.com> 35 * Author: Jeff Squyres <jsquyres@cisco.com> 36 * Author: Kiran Thirumalai <kithirum@cisco.com> 37 * Author: Xuyang Wang <xuywang@cisco.com> 38 * Author: Reese Faucette <rfaucett@cisco.com> 39 * 40 */ 41 42#include <linux/module.h> 43#include <linux/inetdevice.h> 44#include <linux/init.h> 45#include <linux/slab.h> 46#include <linux/errno.h> 47#include <linux/pci.h> 48#include <linux/netdevice.h> 49 50#include <rdma/ib_user_verbs.h> 51#include <rdma/ib_addr.h> 52 53#include "usnic_abi.h" 54#include "usnic_common_util.h" 55#include "usnic_ib.h" 56#include "usnic_ib_qp_grp.h" 57#include "usnic_log.h" 58#include "usnic_fwd.h" 59#include "usnic_debugfs.h" 60#include "usnic_ib_verbs.h" 61#include "usnic_transport.h" 62#include "usnic_uiom.h" 63#include "usnic_ib_sysfs.h" 64 65unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR; 66unsigned int usnic_ib_share_vf = 1; 67 68static const char usnic_version[] = 69 DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v" 70 DRV_VERSION " (" DRV_RELDATE ")\n"; 71 72static DEFINE_MUTEX(usnic_ib_ibdev_list_lock); 73static LIST_HEAD(usnic_ib_ibdev_list); 74 75/* Callback dump funcs */ 76static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz) 77{ 78 struct usnic_ib_vf *vf = obj; 79 return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev)); 80} 81/* End callback dump funcs */ 82 83static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz) 84{ 85 usnic_vnic_dump(vf->vnic, buf, buf_sz, vf, 86 usnic_ib_dump_vf_hdr, 87 usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows); 88} 89 90void usnic_ib_log_vf(struct usnic_ib_vf *vf) 91{ 92 char *buf = kzalloc(1000, GFP_KERNEL); 93 94 if (!buf) 95 return; 96 97 usnic_ib_dump_vf(vf, buf, 1000); 98 usnic_dbg("%s\n", buf); 99 100 kfree(buf); 101} 102 103/* Start of netdev section */ 104static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) 105{ 106 struct usnic_ib_ucontext *ctx; 107 struct usnic_ib_qp_grp *qp_grp; 108 enum ib_qp_state cur_state; 109 int status; 110 111 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); 112 113 list_for_each_entry(ctx, &us_ibdev->ctx_list, link) { 114 list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) { 115 cur_state = qp_grp->state; 116 if (cur_state == IB_QPS_INIT || 117 cur_state == IB_QPS_RTR || 118 cur_state == IB_QPS_RTS) { 119 status = usnic_ib_qp_grp_modify(qp_grp, 120 IB_QPS_ERR, 121 NULL); 122 if (status) { 123 usnic_err("Failed to transition qp grp %u from %s to %s\n", 124 qp_grp->grp_id, 125 usnic_ib_qp_grp_state_to_string 126 (cur_state), 127 usnic_ib_qp_grp_state_to_string 128 (IB_QPS_ERR)); 129 } 130 } 131 } 132 } 133} 134 135static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev, 136 unsigned long event) 137{ 138 struct net_device *netdev; 139 struct ib_event ib_event; 140 141 memset(&ib_event, 0, sizeof(ib_event)); 142 143 mutex_lock(&us_ibdev->usdev_lock); 144 netdev = us_ibdev->netdev; 145 switch (event) { 146 case NETDEV_REBOOT: 147 usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev)); 148 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 149 ib_event.event = IB_EVENT_PORT_ERR; 150 ib_event.device = &us_ibdev->ib_dev; 151 ib_event.element.port_num = 1; 152 ib_dispatch_event(&ib_event); 153 break; 154 case NETDEV_UP: 155 case NETDEV_DOWN: 156 case NETDEV_CHANGE: 157 if (!us_ibdev->ufdev->link_up && 158 netif_carrier_ok(netdev)) { 159 usnic_fwd_carrier_up(us_ibdev->ufdev); 160 usnic_info("Link UP on %s\n", 161 dev_name(&us_ibdev->ib_dev.dev)); 162 ib_event.event = IB_EVENT_PORT_ACTIVE; 163 ib_event.device = &us_ibdev->ib_dev; 164 ib_event.element.port_num = 1; 165 ib_dispatch_event(&ib_event); 166 } else if (us_ibdev->ufdev->link_up && 167 !netif_carrier_ok(netdev)) { 168 usnic_fwd_carrier_down(us_ibdev->ufdev); 169 usnic_info("Link DOWN on %s\n", 170 dev_name(&us_ibdev->ib_dev.dev)); 171 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 172 ib_event.event = IB_EVENT_PORT_ERR; 173 ib_event.device = &us_ibdev->ib_dev; 174 ib_event.element.port_num = 1; 175 ib_dispatch_event(&ib_event); 176 } else { 177 usnic_dbg("Ignoring %s on %s\n", 178 netdev_cmd_to_name(event), 179 dev_name(&us_ibdev->ib_dev.dev)); 180 } 181 break; 182 case NETDEV_CHANGEADDR: 183 if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr, 184 sizeof(us_ibdev->ufdev->mac))) { 185 usnic_dbg("Ignoring addr change on %s\n", 186 dev_name(&us_ibdev->ib_dev.dev)); 187 } else { 188 usnic_info(" %s old mac: %pM new mac: %pM\n", 189 dev_name(&us_ibdev->ib_dev.dev), 190 us_ibdev->ufdev->mac, 191 netdev->dev_addr); 192 usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr); 193 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 194 ib_event.event = IB_EVENT_GID_CHANGE; 195 ib_event.device = &us_ibdev->ib_dev; 196 ib_event.element.port_num = 1; 197 ib_dispatch_event(&ib_event); 198 } 199 200 break; 201 case NETDEV_CHANGEMTU: 202 if (us_ibdev->ufdev->mtu != netdev->mtu) { 203 usnic_info("MTU Change on %s old: %u new: %u\n", 204 dev_name(&us_ibdev->ib_dev.dev), 205 us_ibdev->ufdev->mtu, netdev->mtu); 206 usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu); 207 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 208 } else { 209 usnic_dbg("Ignoring MTU change on %s\n", 210 dev_name(&us_ibdev->ib_dev.dev)); 211 } 212 break; 213 default: 214 usnic_dbg("Ignoring event %s on %s", 215 netdev_cmd_to_name(event), 216 dev_name(&us_ibdev->ib_dev.dev)); 217 } 218 mutex_unlock(&us_ibdev->usdev_lock); 219} 220 221static int usnic_ib_netdevice_event(struct notifier_block *notifier, 222 unsigned long event, void *ptr) 223{ 224 struct usnic_ib_dev *us_ibdev; 225 struct ib_device *ibdev; 226 227 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 228 229 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC); 230 if (!ibdev) 231 return NOTIFY_DONE; 232 233 us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev); 234 usnic_ib_handle_usdev_event(us_ibdev, event); 235 ib_device_put(ibdev); 236 return NOTIFY_DONE; 237} 238 239static struct notifier_block usnic_ib_netdevice_notifier = { 240 .notifier_call = usnic_ib_netdevice_event 241}; 242/* End of netdev section */ 243 244/* Start of inet section */ 245static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev, 246 unsigned long event, void *ptr) 247{ 248 struct in_ifaddr *ifa = ptr; 249 struct ib_event ib_event; 250 251 mutex_lock(&us_ibdev->usdev_lock); 252 253 switch (event) { 254 case NETDEV_DOWN: 255 usnic_info("%s via ip notifiers", 256 netdev_cmd_to_name(event)); 257 usnic_fwd_del_ipaddr(us_ibdev->ufdev); 258 usnic_ib_qp_grp_modify_active_to_err(us_ibdev); 259 ib_event.event = IB_EVENT_GID_CHANGE; 260 ib_event.device = &us_ibdev->ib_dev; 261 ib_event.element.port_num = 1; 262 ib_dispatch_event(&ib_event); 263 break; 264 case NETDEV_UP: 265 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); 266 usnic_info("%s via ip notifiers: ip %pI4", 267 netdev_cmd_to_name(event), 268 &us_ibdev->ufdev->inaddr); 269 ib_event.event = IB_EVENT_GID_CHANGE; 270 ib_event.device = &us_ibdev->ib_dev; 271 ib_event.element.port_num = 1; 272 ib_dispatch_event(&ib_event); 273 break; 274 default: 275 usnic_info("Ignoring event %s on %s", 276 netdev_cmd_to_name(event), 277 dev_name(&us_ibdev->ib_dev.dev)); 278 } 279 mutex_unlock(&us_ibdev->usdev_lock); 280 281 return NOTIFY_DONE; 282} 283 284static int usnic_ib_inetaddr_event(struct notifier_block *notifier, 285 unsigned long event, void *ptr) 286{ 287 struct usnic_ib_dev *us_ibdev; 288 struct in_ifaddr *ifa = ptr; 289 struct net_device *netdev = ifa->ifa_dev->dev; 290 struct ib_device *ibdev; 291 292 ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC); 293 if (!ibdev) 294 return NOTIFY_DONE; 295 296 us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev); 297 usnic_ib_handle_inet_event(us_ibdev, event, ptr); 298 ib_device_put(ibdev); 299 return NOTIFY_DONE; 300} 301static struct notifier_block usnic_ib_inetaddr_notifier = { 302 .notifier_call = usnic_ib_inetaddr_event 303}; 304/* End of inet section*/ 305 306static int usnic_port_immutable(struct ib_device *ibdev, u32 port_num, 307 struct ib_port_immutable *immutable) 308{ 309 struct ib_port_attr attr; 310 int err; 311 312 immutable->core_cap_flags = RDMA_CORE_PORT_USNIC; 313 314 err = ib_query_port(ibdev, port_num, &attr); 315 if (err) 316 return err; 317 318 immutable->gid_tbl_len = attr.gid_tbl_len; 319 320 return 0; 321} 322 323static void usnic_get_dev_fw_str(struct ib_device *device, char *str) 324{ 325 struct usnic_ib_dev *us_ibdev = 326 container_of(device, struct usnic_ib_dev, ib_dev); 327 struct ethtool_drvinfo info; 328 329 mutex_lock(&us_ibdev->usdev_lock); 330 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); 331 mutex_unlock(&us_ibdev->usdev_lock); 332 333 snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version); 334} 335 336static const struct ib_device_ops usnic_dev_ops = { 337 .owner = THIS_MODULE, 338 .driver_id = RDMA_DRIVER_USNIC, 339 .uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION, 340 341 .alloc_pd = usnic_ib_alloc_pd, 342 .alloc_ucontext = usnic_ib_alloc_ucontext, 343 .create_cq = usnic_ib_create_cq, 344 .create_qp = usnic_ib_create_qp, 345 .dealloc_pd = usnic_ib_dealloc_pd, 346 .dealloc_ucontext = usnic_ib_dealloc_ucontext, 347 .dereg_mr = usnic_ib_dereg_mr, 348 .destroy_cq = usnic_ib_destroy_cq, 349 .destroy_qp = usnic_ib_destroy_qp, 350 .device_group = &usnic_attr_group, 351 .get_dev_fw_str = usnic_get_dev_fw_str, 352 .get_link_layer = usnic_ib_port_link_layer, 353 .get_port_immutable = usnic_port_immutable, 354 .mmap = usnic_ib_mmap, 355 .modify_qp = usnic_ib_modify_qp, 356 .query_device = usnic_ib_query_device, 357 .query_gid = usnic_ib_query_gid, 358 .query_port = usnic_ib_query_port, 359 .query_qp = usnic_ib_query_qp, 360 .reg_user_mr = usnic_ib_reg_mr, 361 INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd), 362 INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq), 363 INIT_RDMA_OBJ_SIZE(ib_qp, usnic_ib_qp_grp, ibqp), 364 INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext), 365}; 366 367/* Start of PF discovery section */ 368static void *usnic_ib_device_add(struct pci_dev *dev) 369{ 370 struct usnic_ib_dev *us_ibdev; 371 union ib_gid gid; 372 struct in_device *ind; 373 struct net_device *netdev; 374 int ret; 375 376 usnic_dbg("\n"); 377 netdev = pci_get_drvdata(dev); 378 379 us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev); 380 if (!us_ibdev) { 381 usnic_err("Device %s context alloc failed\n", 382 netdev_name(pci_get_drvdata(dev))); 383 return ERR_PTR(-EFAULT); 384 } 385 386 us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); 387 if (!us_ibdev->ufdev) { 388 usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev)); 389 goto err_dealloc; 390 } 391 392 mutex_init(&us_ibdev->usdev_lock); 393 INIT_LIST_HEAD(&us_ibdev->vf_dev_list); 394 INIT_LIST_HEAD(&us_ibdev->ctx_list); 395 396 us_ibdev->pdev = dev; 397 us_ibdev->netdev = pci_get_drvdata(dev); 398 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; 399 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; 400 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; 401 us_ibdev->ib_dev.dev.parent = &dev->dev; 402 403 ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops); 404 405 ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1); 406 if (ret) 407 goto err_fwd_dealloc; 408 409 dma_set_max_seg_size(&dev->dev, SZ_2G); 410 if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev)) 411 goto err_fwd_dealloc; 412 413 usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu); 414 usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr); 415 if (netif_carrier_ok(us_ibdev->netdev)) 416 usnic_fwd_carrier_up(us_ibdev->ufdev); 417 418 rcu_read_lock(); 419 ind = __in_dev_get_rcu(netdev); 420 if (ind) { 421 const struct in_ifaddr *ifa; 422 423 ifa = rcu_dereference(ind->ifa_list); 424 if (ifa) 425 usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address); 426 } 427 rcu_read_unlock(); 428 429 usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, 430 us_ibdev->ufdev->inaddr, &gid.raw[0]); 431 memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, 432 sizeof(gid.global.interface_id)); 433 kref_init(&us_ibdev->vf_cnt); 434 435 usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n", 436 dev_name(&us_ibdev->ib_dev.dev), 437 netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac, 438 us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu); 439 return us_ibdev; 440 441err_fwd_dealloc: 442 usnic_fwd_dev_free(us_ibdev->ufdev); 443err_dealloc: 444 usnic_err("failed -- deallocing device\n"); 445 ib_dealloc_device(&us_ibdev->ib_dev); 446 return NULL; 447} 448 449static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev) 450{ 451 usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev)); 452 usnic_ib_sysfs_unregister_usdev(us_ibdev); 453 usnic_fwd_dev_free(us_ibdev->ufdev); 454 ib_unregister_device(&us_ibdev->ib_dev); 455 ib_dealloc_device(&us_ibdev->ib_dev); 456} 457 458static void usnic_ib_undiscover_pf(struct kref *kref) 459{ 460 struct usnic_ib_dev *us_ibdev, *tmp; 461 struct pci_dev *dev; 462 bool found = false; 463 464 dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev; 465 mutex_lock(&usnic_ib_ibdev_list_lock); 466 list_for_each_entry_safe(us_ibdev, tmp, 467 &usnic_ib_ibdev_list, ib_dev_link) { 468 if (us_ibdev->pdev == dev) { 469 list_del(&us_ibdev->ib_dev_link); 470 found = true; 471 break; 472 } 473 } 474 475 476 mutex_unlock(&usnic_ib_ibdev_list_lock); 477 if (found) 478 usnic_ib_device_remove(us_ibdev); 479 else 480 WARN(1, "Failed to remove PF %s\n", pci_name(dev)); 481} 482 483static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) 484{ 485 struct usnic_ib_dev *us_ibdev; 486 struct pci_dev *parent_pci, *vf_pci; 487 int err; 488 489 vf_pci = usnic_vnic_get_pdev(vnic); 490 parent_pci = pci_physfn(vf_pci); 491 492 BUG_ON(!parent_pci); 493 494 mutex_lock(&usnic_ib_ibdev_list_lock); 495 list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) { 496 if (us_ibdev->pdev == parent_pci) { 497 kref_get(&us_ibdev->vf_cnt); 498 goto out; 499 } 500 } 501 502 us_ibdev = usnic_ib_device_add(parent_pci); 503 if (IS_ERR_OR_NULL(us_ibdev)) { 504 us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); 505 goto out; 506 } 507 508 err = usnic_ib_sysfs_register_usdev(us_ibdev); 509 if (err) { 510 usnic_ib_device_remove(us_ibdev); 511 us_ibdev = ERR_PTR(err); 512 goto out; 513 } 514 515 list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list); 516out: 517 mutex_unlock(&usnic_ib_ibdev_list_lock); 518 return us_ibdev; 519} 520/* End of PF discovery section */ 521 522/* Start of PCI section */ 523 524static const struct pci_device_id usnic_ib_pci_ids[] = { 525 {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)}, 526 {0,} 527}; 528 529static int usnic_ib_pci_probe(struct pci_dev *pdev, 530 const struct pci_device_id *id) 531{ 532 int err; 533 struct usnic_ib_dev *pf; 534 struct usnic_ib_vf *vf; 535 enum usnic_vnic_res_type res_type; 536 537 if (!device_iommu_mapped(&pdev->dev)) { 538 usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n"); 539 return -EPERM; 540 } 541 542 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 543 if (!vf) 544 return -ENOMEM; 545 546 err = pci_enable_device(pdev); 547 if (err) { 548 usnic_err("Failed to enable %s with err %d\n", 549 pci_name(pdev), err); 550 goto out_clean_vf; 551 } 552 553 err = pci_request_regions(pdev, DRV_NAME); 554 if (err) { 555 usnic_err("Failed to request region for %s with err %d\n", 556 pci_name(pdev), err); 557 goto out_disable_device; 558 } 559 560 pci_set_master(pdev); 561 pci_set_drvdata(pdev, vf); 562 563 vf->vnic = usnic_vnic_alloc(pdev); 564 if (IS_ERR_OR_NULL(vf->vnic)) { 565 err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM; 566 usnic_err("Failed to alloc vnic for %s with err %d\n", 567 pci_name(pdev), err); 568 goto out_release_regions; 569 } 570 571 pf = usnic_ib_discover_pf(vf->vnic); 572 if (IS_ERR_OR_NULL(pf)) { 573 usnic_err("Failed to discover pf of vnic %s with err%ld\n", 574 pci_name(pdev), PTR_ERR(pf)); 575 err = pf ? PTR_ERR(pf) : -EFAULT; 576 goto out_clean_vnic; 577 } 578 579 vf->pf = pf; 580 mutex_init(&vf->lock); 581 mutex_lock(&pf->usdev_lock); 582 list_add_tail(&vf->link, &pf->vf_dev_list); 583 /* 584 * Save max settings (will be same for each VF, easier to re-write than 585 * to say "if (!set) { set_values(); set=1; } 586 */ 587 for (res_type = USNIC_VNIC_RES_TYPE_EOL+1; 588 res_type < USNIC_VNIC_RES_TYPE_MAX; 589 res_type++) { 590 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic, 591 res_type); 592 } 593 594 mutex_unlock(&pf->usdev_lock); 595 596 usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev), 597 dev_name(&pf->ib_dev.dev)); 598 usnic_ib_log_vf(vf); 599 return 0; 600 601out_clean_vnic: 602 usnic_vnic_free(vf->vnic); 603out_release_regions: 604 pci_set_drvdata(pdev, NULL); 605 pci_clear_master(pdev); 606 pci_release_regions(pdev); 607out_disable_device: 608 pci_disable_device(pdev); 609out_clean_vf: 610 kfree(vf); 611 return err; 612} 613 614static void usnic_ib_pci_remove(struct pci_dev *pdev) 615{ 616 struct usnic_ib_vf *vf = pci_get_drvdata(pdev); 617 struct usnic_ib_dev *pf = vf->pf; 618 619 mutex_lock(&pf->usdev_lock); 620 list_del(&vf->link); 621 mutex_unlock(&pf->usdev_lock); 622 623 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); 624 usnic_vnic_free(vf->vnic); 625 pci_set_drvdata(pdev, NULL); 626 pci_clear_master(pdev); 627 pci_release_regions(pdev); 628 pci_disable_device(pdev); 629 kfree(vf); 630 631 usnic_info("Removed VF %s\n", pci_name(pdev)); 632} 633 634/* PCI driver entry points */ 635static struct pci_driver usnic_ib_pci_driver = { 636 .name = DRV_NAME, 637 .id_table = usnic_ib_pci_ids, 638 .probe = usnic_ib_pci_probe, 639 .remove = usnic_ib_pci_remove, 640}; 641/* End of PCI section */ 642 643/* Start of module section */ 644static int __init usnic_ib_init(void) 645{ 646 int err; 647 648 printk_once(KERN_INFO "%s", usnic_version); 649 650 err = pci_register_driver(&usnic_ib_pci_driver); 651 if (err) { 652 usnic_err("Unable to register with PCI\n"); 653 goto out_umem_fini; 654 } 655 656 err = register_netdevice_notifier(&usnic_ib_netdevice_notifier); 657 if (err) { 658 usnic_err("Failed to register netdev notifier\n"); 659 goto out_pci_unreg; 660 } 661 662 err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 663 if (err) { 664 usnic_err("Failed to register inet addr notifier\n"); 665 goto out_unreg_netdev_notifier; 666 } 667 668 err = usnic_transport_init(); 669 if (err) { 670 usnic_err("Failed to initialize transport\n"); 671 goto out_unreg_inetaddr_notifier; 672 } 673 674 usnic_debugfs_init(); 675 676 return 0; 677 678out_unreg_inetaddr_notifier: 679 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 680out_unreg_netdev_notifier: 681 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 682out_pci_unreg: 683 pci_unregister_driver(&usnic_ib_pci_driver); 684out_umem_fini: 685 686 return err; 687} 688 689static void __exit usnic_ib_destroy(void) 690{ 691 usnic_dbg("\n"); 692 usnic_debugfs_exit(); 693 usnic_transport_fini(); 694 unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); 695 unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); 696 pci_unregister_driver(&usnic_ib_pci_driver); 697} 698 699MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); 700MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>"); 701MODULE_LICENSE("Dual BSD/GPL"); 702module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR); 703module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR); 704MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3"); 705MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs"); 706MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids); 707 708module_init(usnic_ib_init); 709module_exit(usnic_ib_destroy); 710/* End of module section */