bcmsdh.c (31655B)
1// SPDX-License-Identifier: ISC 2/* 3 * Copyright (c) 2010 Broadcom Corporation 4 */ 5/* ****************** SDIO CARD Interface Functions **************************/ 6 7#include <linux/types.h> 8#include <linux/netdevice.h> 9#include <linux/pci.h> 10#include <linux/pci_ids.h> 11#include <linux/sched.h> 12#include <linux/completion.h> 13#include <linux/interrupt.h> 14#include <linux/scatterlist.h> 15#include <linux/mmc/sdio.h> 16#include <linux/mmc/core.h> 17#include <linux/mmc/sdio_func.h> 18#include <linux/mmc/card.h> 19#include <linux/mmc/host.h> 20#include <linux/pm_runtime.h> 21#include <linux/suspend.h> 22#include <linux/errno.h> 23#include <linux/module.h> 24#include <linux/acpi.h> 25#include <net/cfg80211.h> 26 27#include <defs.h> 28#include <brcm_hw_ids.h> 29#include <brcmu_utils.h> 30#include <brcmu_wifi.h> 31#include <chipcommon.h> 32#include <soc.h> 33#include "chip.h" 34#include "bus.h" 35#include "debug.h" 36#include "sdio.h" 37#include "core.h" 38#include "common.h" 39 40#define SDIOH_API_ACCESS_RETRY_LIMIT 2 41 42#define DMA_ALIGN_MASK 0x03 43 44#define SDIO_FUNC1_BLOCKSIZE 64 45#define SDIO_FUNC2_BLOCKSIZE 512 46#define SDIO_4373_FUNC2_BLOCKSIZE 256 47#define SDIO_435X_FUNC2_BLOCKSIZE 256 48#define SDIO_4329_FUNC2_BLOCKSIZE 128 49/* Maximum milliseconds to wait for F2 to come up */ 50#define SDIO_WAIT_F2RDY 3000 51 52#define BRCMF_DEFAULT_RXGLOM_SIZE 32 /* max rx frames in glom chain */ 53 54struct brcmf_sdiod_freezer { 55 atomic_t freezing; 56 atomic_t thread_count; 57 u32 frozen_count; 58 wait_queue_head_t thread_freeze; 59 struct completion resumed; 60}; 61 62static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id) 63{ 64 struct brcmf_bus *bus_if = dev_get_drvdata(dev_id); 65 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 66 67 brcmf_dbg(INTR, "OOB intr triggered\n"); 68 69 /* out-of-band interrupt is level-triggered which won't 70 * be cleared until dpc 71 */ 72 if (sdiodev->irq_en) { 73 disable_irq_nosync(irq); 74 sdiodev->irq_en = false; 75 } 76 77 brcmf_sdio_isr(sdiodev->bus, true); 78 79 return IRQ_HANDLED; 80} 81 82static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func) 83{ 84 struct brcmf_bus *bus_if = dev_get_drvdata(&func->dev); 85 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 86 87 brcmf_dbg(INTR, "IB intr triggered\n"); 88 89 brcmf_sdio_isr(sdiodev->bus, false); 90} 91 92/* dummy handler for SDIO function 2 interrupt */ 93static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func) 94{ 95} 96 97int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) 98{ 99 struct brcmfmac_sdio_pd *pdata; 100 int ret = 0; 101 u8 data; 102 u32 addr, gpiocontrol; 103 104 pdata = &sdiodev->settings->bus.sdio; 105 if (pdata->oob_irq_supported) { 106 brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n", 107 pdata->oob_irq_nr); 108 spin_lock_init(&sdiodev->irq_en_lock); 109 sdiodev->irq_en = true; 110 111 ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler, 112 pdata->oob_irq_flags, "brcmf_oob_intr", 113 &sdiodev->func1->dev); 114 if (ret != 0) { 115 brcmf_err("request_irq failed %d\n", ret); 116 return ret; 117 } 118 sdiodev->oob_irq_requested = true; 119 120 ret = enable_irq_wake(pdata->oob_irq_nr); 121 if (ret != 0) { 122 brcmf_err("enable_irq_wake failed %d\n", ret); 123 return ret; 124 } 125 disable_irq_wake(pdata->oob_irq_nr); 126 127 sdio_claim_host(sdiodev->func1); 128 129 if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) { 130 /* assign GPIO to SDIO core */ 131 addr = brcmf_chip_enum_base(sdiodev->func1->device); 132 addr = CORE_CC_REG(addr, gpiocontrol); 133 gpiocontrol = brcmf_sdiod_readl(sdiodev, addr, &ret); 134 gpiocontrol |= 0x2; 135 brcmf_sdiod_writel(sdiodev, addr, gpiocontrol, &ret); 136 137 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_SELECT, 138 0xf, &ret); 139 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_OUT, 0, &ret); 140 brcmf_sdiod_writeb(sdiodev, SBSDIO_GPIO_EN, 0x2, &ret); 141 } 142 143 /* must configure SDIO_CCCR_IENx to enable irq */ 144 data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret); 145 data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 | 146 SDIO_CCCR_IEN_FUNC0; 147 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret); 148 149 /* redirect, configure and enable io for interrupt signal */ 150 data = SDIO_CCCR_BRCM_SEPINT_MASK | SDIO_CCCR_BRCM_SEPINT_OE; 151 if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH) 152 data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI; 153 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 154 data, &ret); 155 sdio_release_host(sdiodev->func1); 156 } else { 157 brcmf_dbg(SDIO, "Entering\n"); 158 sdio_claim_host(sdiodev->func1); 159 sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler); 160 sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler); 161 sdio_release_host(sdiodev->func1); 162 sdiodev->sd_irq_requested = true; 163 } 164 165 return 0; 166} 167 168void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) 169{ 170 171 brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n", 172 sdiodev->oob_irq_requested, 173 sdiodev->sd_irq_requested); 174 175 if (sdiodev->oob_irq_requested) { 176 struct brcmfmac_sdio_pd *pdata; 177 178 pdata = &sdiodev->settings->bus.sdio; 179 sdio_claim_host(sdiodev->func1); 180 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); 181 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL); 182 sdio_release_host(sdiodev->func1); 183 184 sdiodev->oob_irq_requested = false; 185 free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev); 186 sdiodev->irq_en = false; 187 sdiodev->oob_irq_requested = false; 188 } 189 190 if (sdiodev->sd_irq_requested) { 191 sdio_claim_host(sdiodev->func1); 192 sdio_release_irq(sdiodev->func2); 193 sdio_release_irq(sdiodev->func1); 194 sdio_release_host(sdiodev->func1); 195 sdiodev->sd_irq_requested = false; 196 } 197} 198 199void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, 200 enum brcmf_sdiod_state state) 201{ 202 if (sdiodev->state == BRCMF_SDIOD_NOMEDIUM || 203 state == sdiodev->state) 204 return; 205 206 brcmf_dbg(TRACE, "%d -> %d\n", sdiodev->state, state); 207 switch (sdiodev->state) { 208 case BRCMF_SDIOD_DATA: 209 /* any other state means bus interface is down */ 210 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); 211 break; 212 case BRCMF_SDIOD_DOWN: 213 /* transition from DOWN to DATA means bus interface is up */ 214 if (state == BRCMF_SDIOD_DATA) 215 brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_UP); 216 break; 217 default: 218 break; 219 } 220 sdiodev->state = state; 221} 222 223static int brcmf_sdiod_set_backplane_window(struct brcmf_sdio_dev *sdiodev, 224 u32 addr) 225{ 226 u32 v, bar0 = addr & SBSDIO_SBWINDOW_MASK; 227 int err = 0, i; 228 229 if (bar0 == sdiodev->sbwad) 230 return 0; 231 232 v = bar0 >> 8; 233 234 for (i = 0 ; i < 3 && !err ; i++, v >>= 8) 235 brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_SBADDRLOW + i, 236 v & 0xff, &err); 237 238 if (!err) 239 sdiodev->sbwad = bar0; 240 241 return err; 242} 243 244u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret) 245{ 246 u32 data = 0; 247 int retval; 248 249 retval = brcmf_sdiod_set_backplane_window(sdiodev, addr); 250 if (retval) 251 goto out; 252 253 addr &= SBSDIO_SB_OFT_ADDR_MASK; 254 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 255 256 data = sdio_readl(sdiodev->func1, addr, &retval); 257 258out: 259 if (ret) 260 *ret = retval; 261 262 return data; 263} 264 265void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, 266 u32 data, int *ret) 267{ 268 int retval; 269 270 retval = brcmf_sdiod_set_backplane_window(sdiodev, addr); 271 if (retval) 272 goto out; 273 274 addr &= SBSDIO_SB_OFT_ADDR_MASK; 275 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 276 277 sdio_writel(sdiodev->func1, data, addr, &retval); 278 279out: 280 if (ret) 281 *ret = retval; 282} 283 284static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev, 285 struct sdio_func *func, u32 addr, 286 struct sk_buff *skb) 287{ 288 unsigned int req_sz; 289 int err; 290 291 /* Single skb use the standard mmc interface */ 292 req_sz = skb->len + 3; 293 req_sz &= (uint)~3; 294 295 switch (func->num) { 296 case 1: 297 err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr, 298 req_sz); 299 break; 300 case 2: 301 err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz); 302 break; 303 default: 304 /* bail out as things are really fishy here */ 305 WARN(1, "invalid sdio function number: %d\n", func->num); 306 err = -ENOMEDIUM; 307 } 308 309 if (err == -ENOMEDIUM) 310 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); 311 312 return err; 313} 314 315static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev, 316 struct sdio_func *func, u32 addr, 317 struct sk_buff *skb) 318{ 319 unsigned int req_sz; 320 int err; 321 322 /* Single skb use the standard mmc interface */ 323 req_sz = skb->len + 3; 324 req_sz &= (uint)~3; 325 326 err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz); 327 328 if (err == -ENOMEDIUM) 329 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); 330 331 return err; 332} 333 334static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr, 335 struct mmc_command *mc, int sg_cnt, int req_sz, 336 int func_blk_sz, u32 *addr, 337 struct brcmf_sdio_dev *sdiodev, 338 struct sdio_func *func, int write) 339{ 340 int ret; 341 342 md->sg_len = sg_cnt; 343 md->blocks = req_sz / func_blk_sz; 344 mc->arg |= (*addr & 0x1FFFF) << 9; /* address */ 345 mc->arg |= md->blocks & 0x1FF; /* block count */ 346 /* incrementing addr for function 1 */ 347 if (func->num == 1) 348 *addr += req_sz; 349 350 mmc_set_data_timeout(md, func->card); 351 mmc_wait_for_req(func->card->host, mr); 352 353 ret = mc->error ? mc->error : md->error; 354 if (ret == -ENOMEDIUM) { 355 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); 356 } else if (ret != 0) { 357 brcmf_err("CMD53 sg block %s failed %d\n", 358 write ? "write" : "read", ret); 359 ret = -EIO; 360 } 361 362 return ret; 363} 364 365/** 366 * brcmf_sdiod_sglist_rw - SDIO interface function for block data access 367 * @sdiodev: brcmfmac sdio device 368 * @func: SDIO function 369 * @write: direction flag 370 * @addr: dongle memory address as source/destination 371 * @pktlist: skb buffer head pointer 372 * 373 * This function takes the respbonsibility as the interface function to MMC 374 * stack for block data access. It assumes that the skb passed down by the 375 * caller has already been padded and aligned. 376 */ 377static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, 378 struct sdio_func *func, 379 bool write, u32 addr, 380 struct sk_buff_head *pktlist) 381{ 382 unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; 383 unsigned int max_req_sz, src_offset, dst_offset; 384 unsigned char *pkt_data, *orig_data, *dst_data; 385 struct sk_buff_head local_list, *target_list; 386 struct sk_buff *pkt_next = NULL, *src; 387 unsigned short max_seg_cnt; 388 struct mmc_request mmc_req; 389 struct mmc_command mmc_cmd; 390 struct mmc_data mmc_dat; 391 struct scatterlist *sgl; 392 int ret = 0; 393 394 if (!pktlist->qlen) 395 return -EINVAL; 396 397 target_list = pktlist; 398 /* for host with broken sg support, prepare a page aligned list */ 399 __skb_queue_head_init(&local_list); 400 if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { 401 req_sz = 0; 402 skb_queue_walk(pktlist, pkt_next) 403 req_sz += pkt_next->len; 404 req_sz = ALIGN(req_sz, func->cur_blksize); 405 while (req_sz > PAGE_SIZE) { 406 pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE); 407 if (pkt_next == NULL) { 408 ret = -ENOMEM; 409 goto exit; 410 } 411 __skb_queue_tail(&local_list, pkt_next); 412 req_sz -= PAGE_SIZE; 413 } 414 pkt_next = brcmu_pkt_buf_get_skb(req_sz); 415 if (pkt_next == NULL) { 416 ret = -ENOMEM; 417 goto exit; 418 } 419 __skb_queue_tail(&local_list, pkt_next); 420 target_list = &local_list; 421 } 422 423 func_blk_sz = func->cur_blksize; 424 max_req_sz = sdiodev->max_request_size; 425 max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count, 426 target_list->qlen); 427 428 memset(&mmc_req, 0, sizeof(struct mmc_request)); 429 memset(&mmc_cmd, 0, sizeof(struct mmc_command)); 430 memset(&mmc_dat, 0, sizeof(struct mmc_data)); 431 432 mmc_dat.sg = sdiodev->sgtable.sgl; 433 mmc_dat.blksz = func_blk_sz; 434 mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; 435 mmc_cmd.opcode = SD_IO_RW_EXTENDED; 436 mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */ 437 mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */ 438 mmc_cmd.arg |= 1 << 27; /* block mode */ 439 /* for function 1 the addr will be incremented */ 440 mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0; 441 mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC; 442 mmc_req.cmd = &mmc_cmd; 443 mmc_req.data = &mmc_dat; 444 445 req_sz = 0; 446 sg_cnt = 0; 447 sgl = sdiodev->sgtable.sgl; 448 skb_queue_walk(target_list, pkt_next) { 449 pkt_offset = 0; 450 while (pkt_offset < pkt_next->len) { 451 pkt_data = pkt_next->data + pkt_offset; 452 sg_data_sz = pkt_next->len - pkt_offset; 453 if (sg_data_sz > sdiodev->max_segment_size) 454 sg_data_sz = sdiodev->max_segment_size; 455 if (sg_data_sz > max_req_sz - req_sz) 456 sg_data_sz = max_req_sz - req_sz; 457 458 sg_set_buf(sgl, pkt_data, sg_data_sz); 459 sg_cnt++; 460 461 sgl = sg_next(sgl); 462 req_sz += sg_data_sz; 463 pkt_offset += sg_data_sz; 464 if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) { 465 ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, 466 sg_cnt, req_sz, func_blk_sz, 467 &addr, sdiodev, func, write); 468 if (ret) 469 goto exit_queue_walk; 470 req_sz = 0; 471 sg_cnt = 0; 472 sgl = sdiodev->sgtable.sgl; 473 } 474 } 475 } 476 if (sg_cnt) 477 ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, 478 sg_cnt, req_sz, func_blk_sz, 479 &addr, sdiodev, func, write); 480exit_queue_walk: 481 if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { 482 src = __skb_peek(&local_list); 483 src_offset = 0; 484 skb_queue_walk(pktlist, pkt_next) { 485 dst_offset = 0; 486 487 /* This is safe because we must have enough SKB data 488 * in the local list to cover everything in pktlist. 489 */ 490 while (1) { 491 req_sz = pkt_next->len - dst_offset; 492 if (req_sz > src->len - src_offset) 493 req_sz = src->len - src_offset; 494 495 orig_data = src->data + src_offset; 496 dst_data = pkt_next->data + dst_offset; 497 memcpy(dst_data, orig_data, req_sz); 498 499 src_offset += req_sz; 500 if (src_offset == src->len) { 501 src_offset = 0; 502 src = skb_peek_next(src, &local_list); 503 } 504 dst_offset += req_sz; 505 if (dst_offset == pkt_next->len) 506 break; 507 } 508 } 509 } 510 511exit: 512 sg_init_table(sdiodev->sgtable.sgl, sdiodev->sgtable.orig_nents); 513 while ((pkt_next = __skb_dequeue(&local_list)) != NULL) 514 brcmu_pkt_buf_free_skb(pkt_next); 515 516 return ret; 517} 518 519int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) 520{ 521 struct sk_buff *mypkt; 522 int err; 523 524 mypkt = brcmu_pkt_buf_get_skb(nbytes); 525 if (!mypkt) { 526 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n", 527 nbytes); 528 return -EIO; 529 } 530 531 err = brcmf_sdiod_recv_pkt(sdiodev, mypkt); 532 if (!err) 533 memcpy(buf, mypkt->data, nbytes); 534 535 brcmu_pkt_buf_free_skb(mypkt); 536 return err; 537} 538 539int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt) 540{ 541 u32 addr = sdiodev->cc_core->base; 542 int err = 0; 543 544 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len); 545 546 err = brcmf_sdiod_set_backplane_window(sdiodev, addr); 547 if (err) 548 goto done; 549 550 addr &= SBSDIO_SB_OFT_ADDR_MASK; 551 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 552 553 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt); 554 555done: 556 return err; 557} 558 559int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, 560 struct sk_buff_head *pktq, uint totlen) 561{ 562 struct sk_buff *glom_skb = NULL; 563 struct sk_buff *skb; 564 u32 addr = sdiodev->cc_core->base; 565 int err = 0; 566 567 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", 568 addr, pktq->qlen); 569 570 err = brcmf_sdiod_set_backplane_window(sdiodev, addr); 571 if (err) 572 goto done; 573 574 addr &= SBSDIO_SB_OFT_ADDR_MASK; 575 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 576 577 if (pktq->qlen == 1) 578 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, 579 __skb_peek(pktq)); 580 else if (!sdiodev->sg_support) { 581 glom_skb = brcmu_pkt_buf_get_skb(totlen); 582 if (!glom_skb) 583 return -ENOMEM; 584 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, 585 glom_skb); 586 if (err) 587 goto done; 588 589 skb_queue_walk(pktq, skb) { 590 memcpy(skb->data, glom_skb->data, skb->len); 591 skb_pull(glom_skb, skb->len); 592 } 593 } else 594 err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false, 595 addr, pktq); 596 597done: 598 brcmu_pkt_buf_free_skb(glom_skb); 599 return err; 600} 601 602int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes) 603{ 604 struct sk_buff *mypkt; 605 u32 addr = sdiodev->cc_core->base; 606 int err; 607 608 mypkt = brcmu_pkt_buf_get_skb(nbytes); 609 610 if (!mypkt) { 611 brcmf_err("brcmu_pkt_buf_get_skb failed: len %d\n", 612 nbytes); 613 return -EIO; 614 } 615 616 memcpy(mypkt->data, buf, nbytes); 617 618 err = brcmf_sdiod_set_backplane_window(sdiodev, addr); 619 if (err) 620 goto out; 621 622 addr &= SBSDIO_SB_OFT_ADDR_MASK; 623 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 624 625 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt); 626out: 627 brcmu_pkt_buf_free_skb(mypkt); 628 629 return err; 630} 631 632int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev, 633 struct sk_buff_head *pktq) 634{ 635 struct sk_buff *skb; 636 u32 addr = sdiodev->cc_core->base; 637 int err; 638 639 brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen); 640 641 err = brcmf_sdiod_set_backplane_window(sdiodev, addr); 642 if (err) 643 return err; 644 645 addr &= SBSDIO_SB_OFT_ADDR_MASK; 646 addr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 647 648 if (pktq->qlen == 1 || !sdiodev->sg_support) { 649 skb_queue_walk(pktq, skb) { 650 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, 651 addr, skb); 652 if (err) 653 break; 654 } 655 } else { 656 err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true, 657 addr, pktq); 658 } 659 660 return err; 661} 662 663int 664brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, 665 u8 *data, uint size) 666{ 667 int err = 0; 668 struct sk_buff *pkt; 669 u32 sdaddr; 670 uint dsize; 671 672 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); 673 pkt = dev_alloc_skb(dsize); 674 if (!pkt) { 675 brcmf_err("dev_alloc_skb failed: len %d\n", dsize); 676 return -EIO; 677 } 678 pkt->priority = 0; 679 680 /* Determine initial transfer parameters */ 681 sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK; 682 if ((sdaddr + size) & SBSDIO_SBWINDOW_MASK) 683 dsize = (SBSDIO_SB_OFT_ADDR_LIMIT - sdaddr); 684 else 685 dsize = size; 686 687 sdio_claim_host(sdiodev->func1); 688 689 /* Do the transfer(s) */ 690 while (size) { 691 /* Set the backplane window to include the start address */ 692 err = brcmf_sdiod_set_backplane_window(sdiodev, address); 693 if (err) 694 break; 695 696 brcmf_dbg(SDIO, "%s %d bytes at offset 0x%08x in window 0x%08x\n", 697 write ? "write" : "read", dsize, 698 sdaddr, address & SBSDIO_SBWINDOW_MASK); 699 700 sdaddr &= SBSDIO_SB_OFT_ADDR_MASK; 701 sdaddr |= SBSDIO_SB_ACCESS_2_4B_FLAG; 702 703 skb_put(pkt, dsize); 704 705 if (write) { 706 memcpy(pkt->data, data, dsize); 707 err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1, 708 sdaddr, pkt); 709 } else { 710 err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1, 711 sdaddr, pkt); 712 } 713 714 if (err) { 715 brcmf_err("membytes transfer failed\n"); 716 break; 717 } 718 if (!write) 719 memcpy(data, pkt->data, dsize); 720 skb_trim(pkt, 0); 721 722 /* Adjust for next transfer (if any) */ 723 size -= dsize; 724 if (size) { 725 data += dsize; 726 address += dsize; 727 sdaddr = 0; 728 dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size); 729 } 730 } 731 732 dev_kfree_skb(pkt); 733 734 sdio_release_host(sdiodev->func1); 735 736 return err; 737} 738 739int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func) 740{ 741 brcmf_dbg(SDIO, "Enter\n"); 742 743 /* Issue abort cmd52 command through F0 */ 744 brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL); 745 746 brcmf_dbg(SDIO, "Exit\n"); 747 return 0; 748} 749 750void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) 751{ 752 struct sdio_func *func; 753 struct mmc_host *host; 754 uint max_blocks; 755 uint nents; 756 int err; 757 758 func = sdiodev->func2; 759 host = func->card->host; 760 sdiodev->sg_support = host->max_segs > 1; 761 max_blocks = min_t(uint, host->max_blk_count, 511u); 762 sdiodev->max_request_size = min_t(uint, host->max_req_size, 763 max_blocks * func->cur_blksize); 764 sdiodev->max_segment_count = min_t(uint, host->max_segs, 765 SG_MAX_SINGLE_ALLOC); 766 sdiodev->max_segment_size = host->max_seg_size; 767 768 if (!sdiodev->sg_support) 769 return; 770 771 nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, 772 sdiodev->settings->bus.sdio.txglomsz); 773 nents += (nents >> 4) + 1; 774 775 WARN_ON(nents > sdiodev->max_segment_count); 776 777 brcmf_dbg(TRACE, "nents=%d\n", nents); 778 err = sg_alloc_table(&sdiodev->sgtable, nents, GFP_KERNEL); 779 if (err < 0) { 780 brcmf_err("allocation failed: disable scatter-gather"); 781 sdiodev->sg_support = false; 782 } 783 784 sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz; 785} 786 787#ifdef CONFIG_PM_SLEEP 788static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev) 789{ 790 sdiodev->freezer = kzalloc(sizeof(*sdiodev->freezer), GFP_KERNEL); 791 if (!sdiodev->freezer) 792 return -ENOMEM; 793 atomic_set(&sdiodev->freezer->thread_count, 0); 794 atomic_set(&sdiodev->freezer->freezing, 0); 795 init_waitqueue_head(&sdiodev->freezer->thread_freeze); 796 init_completion(&sdiodev->freezer->resumed); 797 return 0; 798} 799 800static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev) 801{ 802 if (sdiodev->freezer) { 803 WARN_ON(atomic_read(&sdiodev->freezer->freezing)); 804 kfree(sdiodev->freezer); 805 } 806} 807 808static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev) 809{ 810 atomic_t *expect = &sdiodev->freezer->thread_count; 811 int res = 0; 812 813 sdiodev->freezer->frozen_count = 0; 814 reinit_completion(&sdiodev->freezer->resumed); 815 atomic_set(&sdiodev->freezer->freezing, 1); 816 brcmf_sdio_trigger_dpc(sdiodev->bus); 817 wait_event(sdiodev->freezer->thread_freeze, 818 atomic_read(expect) == sdiodev->freezer->frozen_count); 819 sdio_claim_host(sdiodev->func1); 820 res = brcmf_sdio_sleep(sdiodev->bus, true); 821 sdio_release_host(sdiodev->func1); 822 return res; 823} 824 825static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev) 826{ 827 sdio_claim_host(sdiodev->func1); 828 brcmf_sdio_sleep(sdiodev->bus, false); 829 sdio_release_host(sdiodev->func1); 830 atomic_set(&sdiodev->freezer->freezing, 0); 831 complete_all(&sdiodev->freezer->resumed); 832} 833 834bool brcmf_sdiod_freezing(struct brcmf_sdio_dev *sdiodev) 835{ 836 return atomic_read(&sdiodev->freezer->freezing); 837} 838 839void brcmf_sdiod_try_freeze(struct brcmf_sdio_dev *sdiodev) 840{ 841 if (!brcmf_sdiod_freezing(sdiodev)) 842 return; 843 sdiodev->freezer->frozen_count++; 844 wake_up(&sdiodev->freezer->thread_freeze); 845 wait_for_completion(&sdiodev->freezer->resumed); 846} 847 848void brcmf_sdiod_freezer_count(struct brcmf_sdio_dev *sdiodev) 849{ 850 atomic_inc(&sdiodev->freezer->thread_count); 851} 852 853void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev) 854{ 855 atomic_dec(&sdiodev->freezer->thread_count); 856} 857#else 858static int brcmf_sdiod_freezer_attach(struct brcmf_sdio_dev *sdiodev) 859{ 860 return 0; 861} 862 863static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev) 864{ 865} 866#endif /* CONFIG_PM_SLEEP */ 867 868int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) 869{ 870 sdiodev->state = BRCMF_SDIOD_DOWN; 871 if (sdiodev->bus) { 872 brcmf_sdio_remove(sdiodev->bus); 873 sdiodev->bus = NULL; 874 } 875 876 brcmf_sdiod_freezer_detach(sdiodev); 877 878 /* Disable Function 2 */ 879 sdio_claim_host(sdiodev->func2); 880 sdio_disable_func(sdiodev->func2); 881 sdio_release_host(sdiodev->func2); 882 883 /* Disable Function 1 */ 884 sdio_claim_host(sdiodev->func1); 885 sdio_disable_func(sdiodev->func1); 886 sdio_release_host(sdiodev->func1); 887 888 sg_free_table(&sdiodev->sgtable); 889 sdiodev->sbwad = 0; 890 891 pm_runtime_allow(sdiodev->func1->card->host->parent); 892 return 0; 893} 894 895static void brcmf_sdiod_host_fixup(struct mmc_host *host) 896{ 897 /* runtime-pm powers off the device */ 898 pm_runtime_forbid(host->parent); 899 /* avoid removal detection upon resume */ 900 host->caps |= MMC_CAP_NONREMOVABLE; 901} 902 903int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) 904{ 905 int ret = 0; 906 unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE; 907 908 sdio_claim_host(sdiodev->func1); 909 910 ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE); 911 if (ret) { 912 brcmf_err("Failed to set F1 blocksize\n"); 913 sdio_release_host(sdiodev->func1); 914 goto out; 915 } 916 switch (sdiodev->func2->device) { 917 case SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373: 918 f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE; 919 break; 920 case SDIO_DEVICE_ID_BROADCOM_4359: 921 case SDIO_DEVICE_ID_BROADCOM_4354: 922 case SDIO_DEVICE_ID_BROADCOM_4356: 923 f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE; 924 break; 925 case SDIO_DEVICE_ID_BROADCOM_4329: 926 f2_blksz = SDIO_4329_FUNC2_BLOCKSIZE; 927 break; 928 default: 929 break; 930 } 931 932 ret = sdio_set_block_size(sdiodev->func2, f2_blksz); 933 if (ret) { 934 brcmf_err("Failed to set F2 blocksize\n"); 935 sdio_release_host(sdiodev->func1); 936 goto out; 937 } else { 938 brcmf_dbg(SDIO, "set F2 blocksize to %d\n", f2_blksz); 939 } 940 941 /* increase F2 timeout */ 942 sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY; 943 944 /* Enable Function 1 */ 945 ret = sdio_enable_func(sdiodev->func1); 946 sdio_release_host(sdiodev->func1); 947 if (ret) { 948 brcmf_err("Failed to enable F1: err=%d\n", ret); 949 goto out; 950 } 951 952 ret = brcmf_sdiod_freezer_attach(sdiodev); 953 if (ret) 954 goto out; 955 956 /* try to attach to the target device */ 957 sdiodev->bus = brcmf_sdio_probe(sdiodev); 958 if (!sdiodev->bus) { 959 ret = -ENODEV; 960 goto out; 961 } 962 brcmf_sdiod_host_fixup(sdiodev->func2->card->host); 963out: 964 if (ret) 965 brcmf_sdiod_remove(sdiodev); 966 967 return ret; 968} 969 970#define BRCMF_SDIO_DEVICE(dev_id) \ 971 {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, dev_id)} 972 973/* devices we support, null terminated */ 974static const struct sdio_device_id brcmf_sdmmc_ids[] = { 975 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43143), 976 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43241), 977 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4329), 978 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4330), 979 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4334), 980 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), 981 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), 982 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), 983 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364), 984 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), 985 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), 986 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), 987 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345), 988 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455), 989 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), 990 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356), 991 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4359), 992 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373), 993 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012), 994 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752), 995 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359), 996 { /* end: all zeroes */ } 997}; 998MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); 999 1000 1001static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev, 1002 int val) 1003{ 1004#if IS_ENABLED(CONFIG_ACPI) 1005 struct acpi_device *adev; 1006 1007 adev = ACPI_COMPANION(dev); 1008 if (adev) 1009 adev->flags.power_manageable = 0; 1010#endif 1011} 1012 1013static int brcmf_ops_sdio_probe(struct sdio_func *func, 1014 const struct sdio_device_id *id) 1015{ 1016 int err; 1017 struct brcmf_sdio_dev *sdiodev; 1018 struct brcmf_bus *bus_if; 1019 struct device *dev; 1020 1021 brcmf_dbg(SDIO, "Enter\n"); 1022 brcmf_dbg(SDIO, "Class=%x\n", func->class); 1023 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); 1024 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); 1025 brcmf_dbg(SDIO, "Function#: %d\n", func->num); 1026 1027 dev = &func->dev; 1028 1029 /* Set MMC_QUIRK_LENIENT_FN0 for this card */ 1030 func->card->quirks |= MMC_QUIRK_LENIENT_FN0; 1031 1032 /* prohibit ACPI power management for this device */ 1033 brcmf_sdiod_acpi_set_power_manageable(dev, 0); 1034 1035 /* Consume func num 1 but dont do anything with it. */ 1036 if (func->num == 1) 1037 return 0; 1038 1039 /* Ignore anything but func 2 */ 1040 if (func->num != 2) 1041 return -ENODEV; 1042 1043 bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL); 1044 if (!bus_if) 1045 return -ENOMEM; 1046 sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL); 1047 if (!sdiodev) { 1048 kfree(bus_if); 1049 return -ENOMEM; 1050 } 1051 1052 /* store refs to functions used. mmc_card does 1053 * not hold the F0 function pointer. 1054 */ 1055 sdiodev->func1 = func->card->sdio_func[0]; 1056 sdiodev->func2 = func; 1057 1058 sdiodev->bus_if = bus_if; 1059 bus_if->bus_priv.sdio = sdiodev; 1060 bus_if->proto_type = BRCMF_PROTO_BCDC; 1061 dev_set_drvdata(&func->dev, bus_if); 1062 dev_set_drvdata(&sdiodev->func1->dev, bus_if); 1063 sdiodev->dev = &sdiodev->func1->dev; 1064 1065 brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN); 1066 1067 brcmf_dbg(SDIO, "F2 found, calling brcmf_sdiod_probe...\n"); 1068 err = brcmf_sdiod_probe(sdiodev); 1069 if (err) { 1070 brcmf_err("F2 error, probe failed %d...\n", err); 1071 goto fail; 1072 } 1073 1074 brcmf_dbg(SDIO, "F2 init completed...\n"); 1075 return 0; 1076 1077fail: 1078 dev_set_drvdata(&func->dev, NULL); 1079 dev_set_drvdata(&sdiodev->func1->dev, NULL); 1080 kfree(sdiodev); 1081 kfree(bus_if); 1082 return err; 1083} 1084 1085static void brcmf_ops_sdio_remove(struct sdio_func *func) 1086{ 1087 struct brcmf_bus *bus_if; 1088 struct brcmf_sdio_dev *sdiodev; 1089 1090 brcmf_dbg(SDIO, "Enter\n"); 1091 brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor); 1092 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); 1093 brcmf_dbg(SDIO, "Function: %d\n", func->num); 1094 1095 bus_if = dev_get_drvdata(&func->dev); 1096 if (bus_if) { 1097 sdiodev = bus_if->bus_priv.sdio; 1098 1099 /* start by unregistering irqs */ 1100 brcmf_sdiod_intr_unregister(sdiodev); 1101 1102 if (func->num != 1) 1103 return; 1104 1105 /* only proceed with rest of cleanup if func 1 */ 1106 brcmf_sdiod_remove(sdiodev); 1107 1108 dev_set_drvdata(&sdiodev->func1->dev, NULL); 1109 dev_set_drvdata(&sdiodev->func2->dev, NULL); 1110 1111 kfree(bus_if); 1112 kfree(sdiodev); 1113 } 1114 1115 brcmf_dbg(SDIO, "Exit\n"); 1116} 1117 1118void brcmf_sdio_wowl_config(struct device *dev, bool enabled) 1119{ 1120 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1121 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1122 mmc_pm_flag_t pm_caps = sdio_get_host_pm_caps(sdiodev->func1); 1123 1124 /* Power must be preserved to be able to support WOWL. */ 1125 if (!(pm_caps & MMC_PM_KEEP_POWER)) 1126 goto notsup; 1127 1128 if (sdiodev->settings->bus.sdio.oob_irq_supported || 1129 pm_caps & MMC_PM_WAKE_SDIO_IRQ) { 1130 sdiodev->wowl_enabled = enabled; 1131 brcmf_dbg(SDIO, "Configuring WOWL, enabled=%d\n", enabled); 1132 return; 1133 } 1134 1135notsup: 1136 brcmf_dbg(SDIO, "WOWL not supported\n"); 1137} 1138 1139#ifdef CONFIG_PM_SLEEP 1140static int brcmf_ops_sdio_suspend(struct device *dev) 1141{ 1142 struct sdio_func *func; 1143 struct brcmf_bus *bus_if; 1144 struct brcmf_sdio_dev *sdiodev; 1145 mmc_pm_flag_t sdio_flags; 1146 int ret = 0; 1147 1148 func = container_of(dev, struct sdio_func, dev); 1149 brcmf_dbg(SDIO, "Enter: F%d\n", func->num); 1150 if (func->num != 1) 1151 return 0; 1152 1153 1154 bus_if = dev_get_drvdata(dev); 1155 sdiodev = bus_if->bus_priv.sdio; 1156 1157 if (sdiodev->wowl_enabled) { 1158 brcmf_sdiod_freezer_on(sdiodev); 1159 brcmf_sdio_wd_timer(sdiodev->bus, 0); 1160 1161 sdio_flags = MMC_PM_KEEP_POWER; 1162 if (sdiodev->settings->bus.sdio.oob_irq_supported) 1163 enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); 1164 else 1165 sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; 1166 1167 if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) 1168 brcmf_err("Failed to set pm_flags %x\n", sdio_flags); 1169 1170 } else { 1171 /* power will be cut so remove device, probe again in resume */ 1172 brcmf_sdiod_intr_unregister(sdiodev); 1173 ret = brcmf_sdiod_remove(sdiodev); 1174 if (ret) 1175 brcmf_err("Failed to remove device on suspend\n"); 1176 } 1177 1178 return ret; 1179} 1180 1181static int brcmf_ops_sdio_resume(struct device *dev) 1182{ 1183 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1184 struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; 1185 struct sdio_func *func = container_of(dev, struct sdio_func, dev); 1186 int ret = 0; 1187 1188 brcmf_dbg(SDIO, "Enter: F%d\n", func->num); 1189 if (func->num != 2) 1190 return 0; 1191 1192 if (!sdiodev->wowl_enabled) { 1193 /* bus was powered off and device removed, probe again */ 1194 ret = brcmf_sdiod_probe(sdiodev); 1195 if (ret) 1196 brcmf_err("Failed to probe device on resume\n"); 1197 } else { 1198 if (sdiodev->settings->bus.sdio.oob_irq_supported) 1199 disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); 1200 1201 brcmf_sdiod_freezer_off(sdiodev); 1202 } 1203 1204 return ret; 1205} 1206 1207static const struct dev_pm_ops brcmf_sdio_pm_ops = { 1208 .suspend = brcmf_ops_sdio_suspend, 1209 .resume = brcmf_ops_sdio_resume, 1210}; 1211#endif /* CONFIG_PM_SLEEP */ 1212 1213static struct sdio_driver brcmf_sdmmc_driver = { 1214 .probe = brcmf_ops_sdio_probe, 1215 .remove = brcmf_ops_sdio_remove, 1216 .name = KBUILD_MODNAME, 1217 .id_table = brcmf_sdmmc_ids, 1218 .drv = { 1219 .owner = THIS_MODULE, 1220#ifdef CONFIG_PM_SLEEP 1221 .pm = &brcmf_sdio_pm_ops, 1222#endif /* CONFIG_PM_SLEEP */ 1223 .coredump = brcmf_dev_coredump, 1224 }, 1225}; 1226 1227int brcmf_sdio_register(void) 1228{ 1229 return sdio_register_driver(&brcmf_sdmmc_driver); 1230} 1231 1232void brcmf_sdio_exit(void) 1233{ 1234 brcmf_dbg(SDIO, "Enter\n"); 1235 1236 sdio_unregister_driver(&brcmf_sdmmc_driver); 1237} 1238