wmt-sdmmc.c (24405B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * WM8505/WM8650 SD/MMC Host Controller 4 * 5 * Copyright (C) 2010 Tony Prisk 6 * Copyright (C) 2008 WonderMedia Technologies, Inc. 7 */ 8 9#include <linux/init.h> 10#include <linux/module.h> 11#include <linux/platform_device.h> 12#include <linux/ioport.h> 13#include <linux/errno.h> 14#include <linux/dma-mapping.h> 15#include <linux/delay.h> 16#include <linux/io.h> 17#include <linux/irq.h> 18#include <linux/clk.h> 19#include <linux/interrupt.h> 20 21#include <linux/of.h> 22#include <linux/of_address.h> 23#include <linux/of_irq.h> 24#include <linux/of_device.h> 25 26#include <linux/mmc/host.h> 27#include <linux/mmc/mmc.h> 28#include <linux/mmc/sd.h> 29 30#include <asm/byteorder.h> 31 32 33#define DRIVER_NAME "wmt-sdhc" 34 35 36/* MMC/SD controller registers */ 37#define SDMMC_CTLR 0x00 38#define SDMMC_CMD 0x01 39#define SDMMC_RSPTYPE 0x02 40#define SDMMC_ARG 0x04 41#define SDMMC_BUSMODE 0x08 42#define SDMMC_BLKLEN 0x0C 43#define SDMMC_BLKCNT 0x0E 44#define SDMMC_RSP 0x10 45#define SDMMC_CBCR 0x20 46#define SDMMC_INTMASK0 0x24 47#define SDMMC_INTMASK1 0x25 48#define SDMMC_STS0 0x28 49#define SDMMC_STS1 0x29 50#define SDMMC_STS2 0x2A 51#define SDMMC_STS3 0x2B 52#define SDMMC_RSPTIMEOUT 0x2C 53#define SDMMC_CLK 0x30 /* VT8500 only */ 54#define SDMMC_EXTCTRL 0x34 55#define SDMMC_SBLKLEN 0x38 56#define SDMMC_DMATIMEOUT 0x3C 57 58 59/* SDMMC_CTLR bit fields */ 60#define CTLR_CMD_START 0x01 61#define CTLR_CMD_WRITE 0x04 62#define CTLR_FIFO_RESET 0x08 63 64/* SDMMC_BUSMODE bit fields */ 65#define BM_SPI_MODE 0x01 66#define BM_FOURBIT_MODE 0x02 67#define BM_EIGHTBIT_MODE 0x04 68#define BM_SD_OFF 0x10 69#define BM_SPI_CS 0x20 70#define BM_SD_POWER 0x40 71#define BM_SOFT_RESET 0x80 72 73/* SDMMC_BLKLEN bit fields */ 74#define BLKL_CRCERR_ABORT 0x0800 75#define BLKL_CD_POL_HIGH 0x1000 76#define BLKL_GPI_CD 0x2000 77#define BLKL_DATA3_CD 0x4000 78#define BLKL_INT_ENABLE 0x8000 79 80/* SDMMC_INTMASK0 bit fields */ 81#define INT0_MBLK_TRAN_DONE_INT_EN 0x10 82#define INT0_BLK_TRAN_DONE_INT_EN 0x20 83#define INT0_CD_INT_EN 0x40 84#define INT0_DI_INT_EN 0x80 85 86/* SDMMC_INTMASK1 bit fields */ 87#define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02 88#define INT1_CMD_RES_TOUT_INT_EN 0x04 89#define INT1_MBLK_AUTO_STOP_INT_EN 0x08 90#define INT1_DATA_TOUT_INT_EN 0x10 91#define INT1_RESCRC_ERR_INT_EN 0x20 92#define INT1_RCRC_ERR_INT_EN 0x40 93#define INT1_WCRC_ERR_INT_EN 0x80 94 95/* SDMMC_STS0 bit fields */ 96#define STS0_WRITE_PROTECT 0x02 97#define STS0_CD_DATA3 0x04 98#define STS0_CD_GPI 0x08 99#define STS0_MBLK_DONE 0x10 100#define STS0_BLK_DONE 0x20 101#define STS0_CARD_DETECT 0x40 102#define STS0_DEVICE_INS 0x80 103 104/* SDMMC_STS1 bit fields */ 105#define STS1_SDIO_INT 0x01 106#define STS1_CMDRSP_DONE 0x02 107#define STS1_RSP_TIMEOUT 0x04 108#define STS1_AUTOSTOP_DONE 0x08 109#define STS1_DATA_TIMEOUT 0x10 110#define STS1_RSP_CRC_ERR 0x20 111#define STS1_RCRC_ERR 0x40 112#define STS1_WCRC_ERR 0x80 113 114/* SDMMC_STS2 bit fields */ 115#define STS2_CMD_RES_BUSY 0x10 116#define STS2_DATARSP_BUSY 0x20 117#define STS2_DIS_FORCECLK 0x80 118 119/* SDMMC_EXTCTRL bit fields */ 120#define EXT_EIGHTBIT 0x04 121 122/* MMC/SD DMA Controller Registers */ 123#define SDDMA_GCR 0x100 124#define SDDMA_IER 0x104 125#define SDDMA_ISR 0x108 126#define SDDMA_DESPR 0x10C 127#define SDDMA_RBR 0x110 128#define SDDMA_DAR 0x114 129#define SDDMA_BAR 0x118 130#define SDDMA_CPR 0x11C 131#define SDDMA_CCR 0x120 132 133 134/* SDDMA_GCR bit fields */ 135#define DMA_GCR_DMA_EN 0x00000001 136#define DMA_GCR_SOFT_RESET 0x00000100 137 138/* SDDMA_IER bit fields */ 139#define DMA_IER_INT_EN 0x00000001 140 141/* SDDMA_ISR bit fields */ 142#define DMA_ISR_INT_STS 0x00000001 143 144/* SDDMA_RBR bit fields */ 145#define DMA_RBR_FORMAT 0x40000000 146#define DMA_RBR_END 0x80000000 147 148/* SDDMA_CCR bit fields */ 149#define DMA_CCR_RUN 0x00000080 150#define DMA_CCR_IF_TO_PERIPHERAL 0x00000000 151#define DMA_CCR_PERIPHERAL_TO_IF 0x00400000 152 153/* SDDMA_CCR event status */ 154#define DMA_CCR_EVT_NO_STATUS 0x00000000 155#define DMA_CCR_EVT_UNDERRUN 0x00000001 156#define DMA_CCR_EVT_OVERRUN 0x00000002 157#define DMA_CCR_EVT_DESP_READ 0x00000003 158#define DMA_CCR_EVT_DATA_RW 0x00000004 159#define DMA_CCR_EVT_EARLY_END 0x00000005 160#define DMA_CCR_EVT_SUCCESS 0x0000000F 161 162#define PDMA_READ 0x00 163#define PDMA_WRITE 0x01 164 165#define WMT_SD_POWER_OFF 0 166#define WMT_SD_POWER_ON 1 167 168struct wmt_dma_descriptor { 169 u32 flags; 170 u32 data_buffer_addr; 171 u32 branch_addr; 172 u32 reserved1; 173}; 174 175struct wmt_mci_caps { 176 unsigned int f_min; 177 unsigned int f_max; 178 u32 ocr_avail; 179 u32 caps; 180 u32 max_seg_size; 181 u32 max_segs; 182 u32 max_blk_size; 183}; 184 185struct wmt_mci_priv { 186 struct mmc_host *mmc; 187 void __iomem *sdmmc_base; 188 189 int irq_regular; 190 int irq_dma; 191 192 void *dma_desc_buffer; 193 dma_addr_t dma_desc_device_addr; 194 195 struct completion cmdcomp; 196 struct completion datacomp; 197 198 struct completion *comp_cmd; 199 struct completion *comp_dma; 200 201 struct mmc_request *req; 202 struct mmc_command *cmd; 203 204 struct clk *clk_sdmmc; 205 struct device *dev; 206 207 u8 power_inverted; 208 u8 cd_inverted; 209}; 210 211static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) 212{ 213 u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 214 215 if (enable ^ priv->power_inverted) 216 reg_tmp &= ~BM_SD_OFF; 217 else 218 reg_tmp |= BM_SD_OFF; 219 220 writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE); 221} 222 223static void wmt_mci_read_response(struct mmc_host *mmc) 224{ 225 struct wmt_mci_priv *priv; 226 int idx1, idx2; 227 u8 tmp_resp; 228 u32 response; 229 230 priv = mmc_priv(mmc); 231 232 for (idx1 = 0; idx1 < 4; idx1++) { 233 response = 0; 234 for (idx2 = 0; idx2 < 4; idx2++) { 235 if ((idx1 == 3) && (idx2 == 3)) 236 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP); 237 else 238 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP + 239 (idx1*4) + idx2 + 1); 240 response |= (tmp_resp << (idx2 * 8)); 241 } 242 priv->cmd->resp[idx1] = cpu_to_be32(response); 243 } 244} 245 246static void wmt_mci_start_command(struct wmt_mci_priv *priv) 247{ 248 u32 reg_tmp; 249 250 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 251 writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR); 252} 253 254static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype, 255 u32 arg, u8 rsptype) 256{ 257 struct wmt_mci_priv *priv; 258 u32 reg_tmp; 259 260 priv = mmc_priv(mmc); 261 262 /* write command, arg, resptype registers */ 263 writeb(command, priv->sdmmc_base + SDMMC_CMD); 264 writel(arg, priv->sdmmc_base + SDMMC_ARG); 265 writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE); 266 267 /* reset response FIFO */ 268 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 269 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); 270 271 /* ensure clock enabled - VT3465 */ 272 wmt_set_sd_power(priv, WMT_SD_POWER_ON); 273 274 /* clear status bits */ 275 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 276 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 277 writeb(0xFF, priv->sdmmc_base + SDMMC_STS2); 278 writeb(0xFF, priv->sdmmc_base + SDMMC_STS3); 279 280 /* set command type */ 281 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 282 writeb((reg_tmp & 0x0F) | (cmdtype << 4), 283 priv->sdmmc_base + SDMMC_CTLR); 284 285 return 0; 286} 287 288static void wmt_mci_disable_dma(struct wmt_mci_priv *priv) 289{ 290 writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR); 291 writel(0, priv->sdmmc_base + SDDMA_IER); 292} 293 294static void wmt_complete_data_request(struct wmt_mci_priv *priv) 295{ 296 struct mmc_request *req; 297 req = priv->req; 298 299 req->data->bytes_xfered = req->data->blksz * req->data->blocks; 300 301 /* unmap the DMA pages used for write data */ 302 if (req->data->flags & MMC_DATA_WRITE) 303 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, 304 req->data->sg_len, DMA_TO_DEVICE); 305 else 306 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, 307 req->data->sg_len, DMA_FROM_DEVICE); 308 309 /* Check if the DMA ISR returned a data error */ 310 if ((req->cmd->error) || (req->data->error)) 311 mmc_request_done(priv->mmc, req); 312 else { 313 wmt_mci_read_response(priv->mmc); 314 if (!req->data->stop) { 315 /* single-block read/write requests end here */ 316 mmc_request_done(priv->mmc, req); 317 } else { 318 /* 319 * we change the priv->cmd variable so the response is 320 * stored in the stop struct rather than the original 321 * calling command struct 322 */ 323 priv->comp_cmd = &priv->cmdcomp; 324 init_completion(priv->comp_cmd); 325 priv->cmd = req->data->stop; 326 wmt_mci_send_command(priv->mmc, req->data->stop->opcode, 327 7, req->data->stop->arg, 9); 328 wmt_mci_start_command(priv); 329 } 330 } 331} 332 333static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data) 334{ 335 struct wmt_mci_priv *priv; 336 337 int status; 338 339 priv = (struct wmt_mci_priv *)data; 340 341 status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F; 342 343 if (status != DMA_CCR_EVT_SUCCESS) { 344 dev_err(priv->dev, "DMA Error: Status = %d\n", status); 345 priv->req->data->error = -ETIMEDOUT; 346 complete(priv->comp_dma); 347 return IRQ_HANDLED; 348 } 349 350 priv->req->data->error = 0; 351 352 wmt_mci_disable_dma(priv); 353 354 complete(priv->comp_dma); 355 356 if (priv->comp_cmd) { 357 if (completion_done(priv->comp_cmd)) { 358 /* 359 * if the command (regular) interrupt has already 360 * completed, finish off the request otherwise we wait 361 * for the command interrupt and finish from there. 362 */ 363 wmt_complete_data_request(priv); 364 } 365 } 366 367 return IRQ_HANDLED; 368} 369 370static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data) 371{ 372 struct wmt_mci_priv *priv; 373 u32 status0; 374 u32 status1; 375 u32 status2; 376 u32 reg_tmp; 377 int cmd_done; 378 379 priv = (struct wmt_mci_priv *)data; 380 cmd_done = 0; 381 status0 = readb(priv->sdmmc_base + SDMMC_STS0); 382 status1 = readb(priv->sdmmc_base + SDMMC_STS1); 383 status2 = readb(priv->sdmmc_base + SDMMC_STS2); 384 385 /* Check for card insertion */ 386 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); 387 if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) { 388 mmc_detect_change(priv->mmc, 0); 389 if (priv->cmd) 390 priv->cmd->error = -ETIMEDOUT; 391 if (priv->comp_cmd) 392 complete(priv->comp_cmd); 393 if (priv->comp_dma) { 394 wmt_mci_disable_dma(priv); 395 complete(priv->comp_dma); 396 } 397 writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0); 398 return IRQ_HANDLED; 399 } 400 401 if ((!priv->req->data) || 402 ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) { 403 /* handle non-data & stop_transmission requests */ 404 if (status1 & STS1_CMDRSP_DONE) { 405 priv->cmd->error = 0; 406 cmd_done = 1; 407 } else if ((status1 & STS1_RSP_TIMEOUT) || 408 (status1 & STS1_DATA_TIMEOUT)) { 409 priv->cmd->error = -ETIMEDOUT; 410 cmd_done = 1; 411 } 412 413 if (cmd_done) { 414 priv->comp_cmd = NULL; 415 416 if (!priv->cmd->error) 417 wmt_mci_read_response(priv->mmc); 418 419 priv->cmd = NULL; 420 421 mmc_request_done(priv->mmc, priv->req); 422 } 423 } else { 424 /* handle data requests */ 425 if (status1 & STS1_CMDRSP_DONE) { 426 if (priv->cmd) 427 priv->cmd->error = 0; 428 if (priv->comp_cmd) 429 complete(priv->comp_cmd); 430 } 431 432 if ((status1 & STS1_RSP_TIMEOUT) || 433 (status1 & STS1_DATA_TIMEOUT)) { 434 if (priv->cmd) 435 priv->cmd->error = -ETIMEDOUT; 436 if (priv->comp_cmd) 437 complete(priv->comp_cmd); 438 if (priv->comp_dma) { 439 wmt_mci_disable_dma(priv); 440 complete(priv->comp_dma); 441 } 442 } 443 444 if (priv->comp_dma) { 445 /* 446 * If the dma interrupt has already completed, finish 447 * off the request; otherwise we wait for the DMA 448 * interrupt and finish from there. 449 */ 450 if (completion_done(priv->comp_dma)) 451 wmt_complete_data_request(priv); 452 } 453 } 454 455 writeb(status0, priv->sdmmc_base + SDMMC_STS0); 456 writeb(status1, priv->sdmmc_base + SDMMC_STS1); 457 writeb(status2, priv->sdmmc_base + SDMMC_STS2); 458 459 return IRQ_HANDLED; 460} 461 462static void wmt_reset_hardware(struct mmc_host *mmc) 463{ 464 struct wmt_mci_priv *priv; 465 u32 reg_tmp; 466 467 priv = mmc_priv(mmc); 468 469 /* reset controller */ 470 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 471 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); 472 473 /* reset response FIFO */ 474 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 475 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); 476 477 /* enable GPI pin to detect card */ 478 writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN); 479 480 /* clear interrupt status */ 481 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 482 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 483 484 /* setup interrupts */ 485 writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base + 486 SDMMC_INTMASK0); 487 writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN | 488 INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1); 489 490 /* set the DMA timeout */ 491 writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT); 492 493 /* auto clock freezing enable */ 494 reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2); 495 writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2); 496 497 /* set a default clock speed of 400Khz */ 498 clk_set_rate(priv->clk_sdmmc, 400000); 499} 500 501static int wmt_dma_init(struct mmc_host *mmc) 502{ 503 struct wmt_mci_priv *priv; 504 505 priv = mmc_priv(mmc); 506 507 writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR); 508 writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR); 509 if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0) 510 return 0; 511 else 512 return 1; 513} 514 515static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc, 516 u16 req_count, u32 buffer_addr, u32 branch_addr, int end) 517{ 518 desc->flags = 0x40000000 | req_count; 519 if (end) 520 desc->flags |= 0x80000000; 521 desc->data_buffer_addr = buffer_addr; 522 desc->branch_addr = branch_addr; 523} 524 525static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir) 526{ 527 struct wmt_mci_priv *priv; 528 u32 reg_tmp; 529 530 priv = mmc_priv(mmc); 531 532 /* Enable DMA Interrupts */ 533 writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER); 534 535 /* Write DMA Descriptor Pointer Register */ 536 writel(descaddr, priv->sdmmc_base + SDDMA_DESPR); 537 538 writel(0x00, priv->sdmmc_base + SDDMA_CCR); 539 540 if (dir == PDMA_WRITE) { 541 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); 542 writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base + 543 SDDMA_CCR); 544 } else { 545 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); 546 writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base + 547 SDDMA_CCR); 548 } 549} 550 551static void wmt_dma_start(struct wmt_mci_priv *priv) 552{ 553 u32 reg_tmp; 554 555 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); 556 writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR); 557} 558 559static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) 560{ 561 struct wmt_mci_priv *priv; 562 struct wmt_dma_descriptor *desc; 563 u8 command; 564 u8 cmdtype; 565 u32 arg; 566 u8 rsptype; 567 u32 reg_tmp; 568 569 struct scatterlist *sg; 570 int i; 571 int sg_cnt; 572 int offset; 573 u32 dma_address; 574 int desc_cnt; 575 576 priv = mmc_priv(mmc); 577 priv->req = req; 578 579 /* 580 * Use the cmd variable to pass a pointer to the resp[] structure 581 * This is required on multi-block requests to pass the pointer to the 582 * stop command 583 */ 584 priv->cmd = req->cmd; 585 586 command = req->cmd->opcode; 587 arg = req->cmd->arg; 588 rsptype = mmc_resp_type(req->cmd); 589 cmdtype = 0; 590 591 /* rsptype=7 only valid for SPI commands - should be =2 for SD */ 592 if (rsptype == 7) 593 rsptype = 2; 594 /* rsptype=21 is R1B, convert for controller */ 595 if (rsptype == 21) 596 rsptype = 9; 597 598 if (!req->data) { 599 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); 600 wmt_mci_start_command(priv); 601 /* completion is now handled in the regular_isr() */ 602 } 603 if (req->data) { 604 priv->comp_cmd = &priv->cmdcomp; 605 init_completion(priv->comp_cmd); 606 607 wmt_dma_init(mmc); 608 609 /* set controller data length */ 610 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 611 writew((reg_tmp & 0xF800) | (req->data->blksz - 1), 612 priv->sdmmc_base + SDMMC_BLKLEN); 613 614 /* set controller block count */ 615 writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT); 616 617 desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer; 618 619 if (req->data->flags & MMC_DATA_WRITE) { 620 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, 621 req->data->sg_len, DMA_TO_DEVICE); 622 cmdtype = 1; 623 if (req->data->blocks > 1) 624 cmdtype = 3; 625 } else { 626 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, 627 req->data->sg_len, DMA_FROM_DEVICE); 628 cmdtype = 2; 629 if (req->data->blocks > 1) 630 cmdtype = 4; 631 } 632 633 dma_address = priv->dma_desc_device_addr + 16; 634 desc_cnt = 0; 635 636 for_each_sg(req->data->sg, sg, sg_cnt, i) { 637 offset = 0; 638 while (offset < sg_dma_len(sg)) { 639 wmt_dma_init_descriptor(desc, req->data->blksz, 640 sg_dma_address(sg)+offset, 641 dma_address, 0); 642 desc++; 643 desc_cnt++; 644 offset += req->data->blksz; 645 dma_address += 16; 646 if (desc_cnt == req->data->blocks) 647 break; 648 } 649 } 650 desc--; 651 desc->flags |= 0x80000000; 652 653 if (req->data->flags & MMC_DATA_WRITE) 654 wmt_dma_config(mmc, priv->dma_desc_device_addr, 655 PDMA_WRITE); 656 else 657 wmt_dma_config(mmc, priv->dma_desc_device_addr, 658 PDMA_READ); 659 660 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); 661 662 priv->comp_dma = &priv->datacomp; 663 init_completion(priv->comp_dma); 664 665 wmt_dma_start(priv); 666 wmt_mci_start_command(priv); 667 } 668} 669 670static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 671{ 672 struct wmt_mci_priv *priv; 673 u32 busmode, extctrl; 674 675 priv = mmc_priv(mmc); 676 677 if (ios->power_mode == MMC_POWER_UP) { 678 wmt_reset_hardware(mmc); 679 680 wmt_set_sd_power(priv, WMT_SD_POWER_ON); 681 } 682 if (ios->power_mode == MMC_POWER_OFF) 683 wmt_set_sd_power(priv, WMT_SD_POWER_OFF); 684 685 if (ios->clock != 0) 686 clk_set_rate(priv->clk_sdmmc, ios->clock); 687 688 busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE); 689 extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL); 690 691 busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE); 692 extctrl &= ~EXT_EIGHTBIT; 693 694 switch (ios->bus_width) { 695 case MMC_BUS_WIDTH_8: 696 busmode |= BM_EIGHTBIT_MODE; 697 extctrl |= EXT_EIGHTBIT; 698 break; 699 case MMC_BUS_WIDTH_4: 700 busmode |= BM_FOURBIT_MODE; 701 break; 702 case MMC_BUS_WIDTH_1: 703 break; 704 } 705 706 writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE); 707 writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL); 708} 709 710static int wmt_mci_get_ro(struct mmc_host *mmc) 711{ 712 struct wmt_mci_priv *priv = mmc_priv(mmc); 713 714 return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT); 715} 716 717static int wmt_mci_get_cd(struct mmc_host *mmc) 718{ 719 struct wmt_mci_priv *priv = mmc_priv(mmc); 720 u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3; 721 722 return !(cd ^ priv->cd_inverted); 723} 724 725static const struct mmc_host_ops wmt_mci_ops = { 726 .request = wmt_mci_request, 727 .set_ios = wmt_mci_set_ios, 728 .get_ro = wmt_mci_get_ro, 729 .get_cd = wmt_mci_get_cd, 730}; 731 732/* Controller capabilities */ 733static struct wmt_mci_caps wm8505_caps = { 734 .f_min = 390425, 735 .f_max = 50000000, 736 .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34, 737 .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | 738 MMC_CAP_SD_HIGHSPEED, 739 .max_seg_size = 65024, 740 .max_segs = 128, 741 .max_blk_size = 2048, 742}; 743 744static const struct of_device_id wmt_mci_dt_ids[] = { 745 { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps }, 746 { /* Sentinel */ }, 747}; 748 749static int wmt_mci_probe(struct platform_device *pdev) 750{ 751 struct mmc_host *mmc; 752 struct wmt_mci_priv *priv; 753 struct device_node *np = pdev->dev.of_node; 754 const struct wmt_mci_caps *wmt_caps; 755 int ret; 756 int regular_irq, dma_irq; 757 758 wmt_caps = of_device_get_match_data(&pdev->dev); 759 if (!wmt_caps) { 760 dev_err(&pdev->dev, "Controller capabilities data missing\n"); 761 return -EFAULT; 762 } 763 764 if (!np) { 765 dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n"); 766 return -EFAULT; 767 } 768 769 regular_irq = irq_of_parse_and_map(np, 0); 770 dma_irq = irq_of_parse_and_map(np, 1); 771 772 if (!regular_irq || !dma_irq) { 773 dev_err(&pdev->dev, "Getting IRQs failed!\n"); 774 ret = -ENXIO; 775 goto fail1; 776 } 777 778 mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev); 779 if (!mmc) { 780 dev_err(&pdev->dev, "Failed to allocate mmc_host\n"); 781 ret = -ENOMEM; 782 goto fail1; 783 } 784 785 mmc->ops = &wmt_mci_ops; 786 mmc->f_min = wmt_caps->f_min; 787 mmc->f_max = wmt_caps->f_max; 788 mmc->ocr_avail = wmt_caps->ocr_avail; 789 mmc->caps = wmt_caps->caps; 790 791 mmc->max_seg_size = wmt_caps->max_seg_size; 792 mmc->max_segs = wmt_caps->max_segs; 793 mmc->max_blk_size = wmt_caps->max_blk_size; 794 795 mmc->max_req_size = (16*512*mmc->max_segs); 796 mmc->max_blk_count = mmc->max_req_size / 512; 797 798 priv = mmc_priv(mmc); 799 priv->mmc = mmc; 800 priv->dev = &pdev->dev; 801 802 priv->power_inverted = 0; 803 priv->cd_inverted = 0; 804 805 if (of_get_property(np, "sdon-inverted", NULL)) 806 priv->power_inverted = 1; 807 if (of_get_property(np, "cd-inverted", NULL)) 808 priv->cd_inverted = 1; 809 810 priv->sdmmc_base = of_iomap(np, 0); 811 if (!priv->sdmmc_base) { 812 dev_err(&pdev->dev, "Failed to map IO space\n"); 813 ret = -ENOMEM; 814 goto fail2; 815 } 816 817 priv->irq_regular = regular_irq; 818 priv->irq_dma = dma_irq; 819 820 ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv); 821 if (ret) { 822 dev_err(&pdev->dev, "Register regular IRQ fail\n"); 823 goto fail3; 824 } 825 826 ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv); 827 if (ret) { 828 dev_err(&pdev->dev, "Register DMA IRQ fail\n"); 829 goto fail4; 830 } 831 832 /* alloc some DMA buffers for descriptors/transfers */ 833 priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev, 834 mmc->max_blk_count * 16, 835 &priv->dma_desc_device_addr, 836 GFP_KERNEL); 837 if (!priv->dma_desc_buffer) { 838 dev_err(&pdev->dev, "DMA alloc fail\n"); 839 ret = -EPERM; 840 goto fail5; 841 } 842 843 platform_set_drvdata(pdev, mmc); 844 845 priv->clk_sdmmc = of_clk_get(np, 0); 846 if (IS_ERR(priv->clk_sdmmc)) { 847 dev_err(&pdev->dev, "Error getting clock\n"); 848 ret = PTR_ERR(priv->clk_sdmmc); 849 goto fail5; 850 } 851 852 ret = clk_prepare_enable(priv->clk_sdmmc); 853 if (ret) 854 goto fail6; 855 856 /* configure the controller to a known 'ready' state */ 857 wmt_reset_hardware(mmc); 858 859 mmc_add_host(mmc); 860 861 dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); 862 863 return 0; 864fail6: 865 clk_put(priv->clk_sdmmc); 866fail5: 867 free_irq(dma_irq, priv); 868fail4: 869 free_irq(regular_irq, priv); 870fail3: 871 iounmap(priv->sdmmc_base); 872fail2: 873 mmc_free_host(mmc); 874fail1: 875 return ret; 876} 877 878static int wmt_mci_remove(struct platform_device *pdev) 879{ 880 struct mmc_host *mmc; 881 struct wmt_mci_priv *priv; 882 struct resource *res; 883 u32 reg_tmp; 884 885 mmc = platform_get_drvdata(pdev); 886 priv = mmc_priv(mmc); 887 888 /* reset SD controller */ 889 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 890 writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); 891 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 892 writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN); 893 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 894 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 895 896 /* release the dma buffers */ 897 dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16, 898 priv->dma_desc_buffer, priv->dma_desc_device_addr); 899 900 mmc_remove_host(mmc); 901 902 free_irq(priv->irq_regular, priv); 903 free_irq(priv->irq_dma, priv); 904 905 iounmap(priv->sdmmc_base); 906 907 clk_disable_unprepare(priv->clk_sdmmc); 908 clk_put(priv->clk_sdmmc); 909 910 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 911 release_mem_region(res->start, resource_size(res)); 912 913 mmc_free_host(mmc); 914 915 dev_info(&pdev->dev, "WMT MCI device removed\n"); 916 917 return 0; 918} 919 920#ifdef CONFIG_PM 921static int wmt_mci_suspend(struct device *dev) 922{ 923 u32 reg_tmp; 924 struct mmc_host *mmc = dev_get_drvdata(dev); 925 struct wmt_mci_priv *priv; 926 927 if (!mmc) 928 return 0; 929 930 priv = mmc_priv(mmc); 931 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 932 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + 933 SDMMC_BUSMODE); 934 935 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 936 writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); 937 938 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 939 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 940 941 clk_disable(priv->clk_sdmmc); 942 return 0; 943} 944 945static int wmt_mci_resume(struct device *dev) 946{ 947 u32 reg_tmp; 948 struct mmc_host *mmc = dev_get_drvdata(dev); 949 struct wmt_mci_priv *priv; 950 951 if (mmc) { 952 priv = mmc_priv(mmc); 953 clk_enable(priv->clk_sdmmc); 954 955 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 956 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + 957 SDMMC_BUSMODE); 958 959 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 960 writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE), 961 priv->sdmmc_base + SDMMC_BLKLEN); 962 963 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); 964 writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + 965 SDMMC_INTMASK0); 966 967 } 968 969 return 0; 970} 971 972static const struct dev_pm_ops wmt_mci_pm = { 973 .suspend = wmt_mci_suspend, 974 .resume = wmt_mci_resume, 975}; 976 977#define wmt_mci_pm_ops (&wmt_mci_pm) 978 979#else /* !CONFIG_PM */ 980 981#define wmt_mci_pm_ops NULL 982 983#endif 984 985static struct platform_driver wmt_mci_driver = { 986 .probe = wmt_mci_probe, 987 .remove = wmt_mci_remove, 988 .driver = { 989 .name = DRIVER_NAME, 990 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 991 .pm = wmt_mci_pm_ops, 992 .of_match_table = wmt_mci_dt_ids, 993 }, 994}; 995 996module_platform_driver(wmt_mci_driver); 997 998MODULE_DESCRIPTION("Wondermedia MMC/SD Driver"); 999MODULE_AUTHOR("Tony Prisk"); 1000MODULE_LICENSE("GPL v2"); 1001MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);