spi-mtk-snfi.c (39646B)
1// SPDX-License-Identifier: GPL-2.0 2// 3// Driver for the SPI-NAND mode of Mediatek NAND Flash Interface 4// 5// Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com> 6// 7// This driver is based on the SPI-NAND mtd driver from Mediatek SDK: 8// 9// Copyright (C) 2020 MediaTek Inc. 10// Author: Weijie Gao <weijie.gao@mediatek.com> 11// 12// This controller organize the page data as several interleaved sectors 13// like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size) 14// +---------+------+------+---------+------+------+-----+ 15// | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... | 16// +---------+------+------+---------+------+------+-----+ 17// With auto-format turned on, DMA only returns this part: 18// +---------+---------+-----+ 19// | Sector1 | Sector2 | ... | 20// +---------+---------+-----+ 21// The FDM data will be filled to the registers, and ECC parity data isn't 22// accessible. 23// With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA 24// in it's original order shown in the first table. ECC can't be turned on when 25// auto-format is off. 26// 27// However, Linux SPI-NAND driver expects the data returned as: 28// +------+-----+ 29// | Page | OOB | 30// +------+-----+ 31// where the page data is continuously stored instead of interleaved. 32// So we assume all instructions matching the page_op template between ECC 33// prepare_io_req and finish_io_req are for page cache r/w. 34// Here's how this spi-mem driver operates when reading: 35// 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off). 36// 2. Perform page ops and let the controller fill the DMA bounce buffer with 37// de-interleaved sector data and set FDM registers. 38// 3. Return the data as: 39// +---------+---------+-----+------+------+-----+ 40// | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... | 41// +---------+---------+-----+------+------+-----+ 42// 4. For other matching spi_mem ops outside a prepare/finish_io_req pair, 43// read the data with auto-format off into the bounce buffer and copy 44// needed data to the buffer specified in the request. 45// 46// Write requests operates in a similar manner. 47// As a limitation of this strategy, we won't be able to access any ECC parity 48// data at all in Linux. 49// 50// Here's the bad block mark situation on MTK chips: 51// In older chips like mt7622, MTK uses the first FDM byte in the first sector 52// as the bad block mark. After de-interleaving, this byte appears at [pagesize] 53// in the returned data, which is the BBM position expected by kernel. However, 54// the conventional bad block mark is the first byte of the OOB, which is part 55// of the last sector data in the interleaved layout. Instead of fixing their 56// hardware, MTK decided to address this inconsistency in software. On these 57// later chips, the BootROM expects the following: 58// 1. The [pagesize] byte on a nand page is used as BBM, which will appear at 59// (page_size - (nsectors - 1) * spare_size) in the DMA buffer. 60// 2. The original byte stored at that position in the DMA buffer will be stored 61// as the first byte of the FDM section in the last sector. 62// We can't disagree with the BootROM, so after de-interleaving, we need to 63// perform the following swaps in read: 64// 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size], 65// which is the expected BBM position by kernel. 66// 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to 67// [page_size - (nsectors - 1) * spare_size] 68// Similarly, when writing, we need to perform swaps in the other direction. 69 70#include <linux/kernel.h> 71#include <linux/module.h> 72#include <linux/init.h> 73#include <linux/device.h> 74#include <linux/mutex.h> 75#include <linux/clk.h> 76#include <linux/interrupt.h> 77#include <linux/dma-mapping.h> 78#include <linux/iopoll.h> 79#include <linux/of_platform.h> 80#include <linux/mtd/nand-ecc-mtk.h> 81#include <linux/spi/spi.h> 82#include <linux/spi/spi-mem.h> 83#include <linux/mtd/nand.h> 84 85// NFI registers 86#define NFI_CNFG 0x000 87#define CNFG_OP_MODE_S 12 88#define CNFG_OP_MODE_CUST 6 89#define CNFG_OP_MODE_PROGRAM 3 90#define CNFG_AUTO_FMT_EN BIT(9) 91#define CNFG_HW_ECC_EN BIT(8) 92#define CNFG_DMA_BURST_EN BIT(2) 93#define CNFG_READ_MODE BIT(1) 94#define CNFG_DMA_MODE BIT(0) 95 96#define NFI_PAGEFMT 0x0004 97#define NFI_SPARE_SIZE_LS_S 16 98#define NFI_FDM_ECC_NUM_S 12 99#define NFI_FDM_NUM_S 8 100#define NFI_SPARE_SIZE_S 4 101#define NFI_SEC_SEL_512 BIT(2) 102#define NFI_PAGE_SIZE_S 0 103#define NFI_PAGE_SIZE_512_2K 0 104#define NFI_PAGE_SIZE_2K_4K 1 105#define NFI_PAGE_SIZE_4K_8K 2 106#define NFI_PAGE_SIZE_8K_16K 3 107 108#define NFI_CON 0x008 109#define CON_SEC_NUM_S 12 110#define CON_BWR BIT(9) 111#define CON_BRD BIT(8) 112#define CON_NFI_RST BIT(1) 113#define CON_FIFO_FLUSH BIT(0) 114 115#define NFI_INTR_EN 0x010 116#define NFI_INTR_STA 0x014 117#define NFI_IRQ_INTR_EN BIT(31) 118#define NFI_IRQ_CUS_READ BIT(8) 119#define NFI_IRQ_CUS_PG BIT(7) 120 121#define NFI_CMD 0x020 122#define NFI_CMD_DUMMY_READ 0x00 123#define NFI_CMD_DUMMY_WRITE 0x80 124 125#define NFI_STRDATA 0x040 126#define STR_DATA BIT(0) 127 128#define NFI_STA 0x060 129#define NFI_NAND_FSM GENMASK(28, 24) 130#define NFI_FSM GENMASK(19, 16) 131#define READ_EMPTY BIT(12) 132 133#define NFI_FIFOSTA 0x064 134#define FIFO_WR_REMAIN_S 8 135#define FIFO_RD_REMAIN_S 0 136 137#define NFI_ADDRCNTR 0x070 138#define SEC_CNTR GENMASK(16, 12) 139#define SEC_CNTR_S 12 140#define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) 141 142#define NFI_STRADDR 0x080 143 144#define NFI_BYTELEN 0x084 145#define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) 146 147#define NFI_FDM0L 0x0a0 148#define NFI_FDM0M 0x0a4 149#define NFI_FDML(n) (NFI_FDM0L + (n)*8) 150#define NFI_FDMM(n) (NFI_FDM0M + (n)*8) 151 152#define NFI_DEBUG_CON1 0x220 153#define WBUF_EN BIT(2) 154 155#define NFI_MASTERSTA 0x224 156#define MAS_ADDR GENMASK(11, 9) 157#define MAS_RD GENMASK(8, 6) 158#define MAS_WR GENMASK(5, 3) 159#define MAS_RDDLY GENMASK(2, 0) 160#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY) 161 162// SNFI registers 163#define SNF_MAC_CTL 0x500 164#define MAC_XIO_SEL BIT(4) 165#define SF_MAC_EN BIT(3) 166#define SF_TRIG BIT(2) 167#define WIP_READY BIT(1) 168#define WIP BIT(0) 169 170#define SNF_MAC_OUTL 0x504 171#define SNF_MAC_INL 0x508 172 173#define SNF_RD_CTL2 0x510 174#define DATA_READ_DUMMY_S 8 175#define DATA_READ_MAX_DUMMY 0xf 176#define DATA_READ_CMD_S 0 177 178#define SNF_RD_CTL3 0x514 179 180#define SNF_PG_CTL1 0x524 181#define PG_LOAD_CMD_S 8 182 183#define SNF_PG_CTL2 0x528 184 185#define SNF_MISC_CTL 0x538 186#define SW_RST BIT(28) 187#define FIFO_RD_LTC_S 25 188#define PG_LOAD_X4_EN BIT(20) 189#define DATA_READ_MODE_S 16 190#define DATA_READ_MODE GENMASK(18, 16) 191#define DATA_READ_MODE_X1 0 192#define DATA_READ_MODE_X2 1 193#define DATA_READ_MODE_X4 2 194#define DATA_READ_MODE_DUAL 5 195#define DATA_READ_MODE_QUAD 6 196#define PG_LOAD_CUSTOM_EN BIT(7) 197#define DATARD_CUSTOM_EN BIT(6) 198#define CS_DESELECT_CYC_S 0 199 200#define SNF_MISC_CTL2 0x53c 201#define PROGRAM_LOAD_BYTE_NUM_S 16 202#define READ_DATA_BYTE_NUM_S 11 203 204#define SNF_DLY_CTL3 0x548 205#define SFCK_SAM_DLY_S 0 206 207#define SNF_STA_CTL1 0x550 208#define CUS_PG_DONE BIT(28) 209#define CUS_READ_DONE BIT(27) 210#define SPI_STATE_S 0 211#define SPI_STATE GENMASK(3, 0) 212 213#define SNF_CFG 0x55c 214#define SPI_MODE BIT(0) 215 216#define SNF_GPRAM 0x800 217#define SNF_GPRAM_SIZE 0xa0 218 219#define SNFI_POLL_INTERVAL 1000000 220 221static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 }; 222 223struct mtk_snand_caps { 224 u16 sector_size; 225 u16 max_sectors; 226 u16 fdm_size; 227 u16 fdm_ecc_size; 228 u16 fifo_size; 229 230 bool bbm_swap; 231 bool empty_page_check; 232 u32 mastersta_mask; 233 234 const u8 *spare_sizes; 235 u32 num_spare_size; 236}; 237 238static const struct mtk_snand_caps mt7622_snand_caps = { 239 .sector_size = 512, 240 .max_sectors = 8, 241 .fdm_size = 8, 242 .fdm_ecc_size = 1, 243 .fifo_size = 32, 244 .bbm_swap = false, 245 .empty_page_check = false, 246 .mastersta_mask = NFI_MASTERSTA_MASK_7622, 247 .spare_sizes = mt7622_spare_sizes, 248 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) 249}; 250 251static const struct mtk_snand_caps mt7629_snand_caps = { 252 .sector_size = 512, 253 .max_sectors = 8, 254 .fdm_size = 8, 255 .fdm_ecc_size = 1, 256 .fifo_size = 32, 257 .bbm_swap = true, 258 .empty_page_check = false, 259 .mastersta_mask = NFI_MASTERSTA_MASK_7622, 260 .spare_sizes = mt7622_spare_sizes, 261 .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) 262}; 263 264struct mtk_snand_conf { 265 size_t page_size; 266 size_t oob_size; 267 u8 nsectors; 268 u8 spare_size; 269}; 270 271struct mtk_snand { 272 struct spi_controller *ctlr; 273 struct device *dev; 274 struct clk *nfi_clk; 275 struct clk *pad_clk; 276 void __iomem *nfi_base; 277 int irq; 278 struct completion op_done; 279 const struct mtk_snand_caps *caps; 280 struct mtk_ecc_config *ecc_cfg; 281 struct mtk_ecc *ecc; 282 struct mtk_snand_conf nfi_cfg; 283 struct mtk_ecc_stats ecc_stats; 284 struct nand_ecc_engine ecc_eng; 285 bool autofmt; 286 u8 *buf; 287 size_t buf_len; 288}; 289 290static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand) 291{ 292 struct nand_ecc_engine *eng = nand->ecc.engine; 293 294 return container_of(eng, struct mtk_snand, ecc_eng); 295} 296 297static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size) 298{ 299 if (snf->buf_len >= size) 300 return 0; 301 kfree(snf->buf); 302 snf->buf = kmalloc(size, GFP_KERNEL); 303 if (!snf->buf) 304 return -ENOMEM; 305 snf->buf_len = size; 306 memset(snf->buf, 0xff, snf->buf_len); 307 return 0; 308} 309 310static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg) 311{ 312 return readl(snf->nfi_base + reg); 313} 314 315static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val) 316{ 317 writel(val, snf->nfi_base + reg); 318} 319 320static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val) 321{ 322 writew(val, snf->nfi_base + reg); 323} 324 325static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set) 326{ 327 u32 val; 328 329 val = readl(snf->nfi_base + reg); 330 val &= ~clr; 331 val |= set; 332 writel(val, snf->nfi_base + reg); 333} 334 335static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len) 336{ 337 u32 i, val = 0, es = sizeof(u32); 338 339 for (i = reg; i < reg + len; i++) { 340 if (i == reg || i % es == 0) 341 val = nfi_read32(snf, i & ~(es - 1)); 342 343 *data++ = (u8)(val >> (8 * (i % es))); 344 } 345} 346 347static int mtk_nfi_reset(struct mtk_snand *snf) 348{ 349 u32 val, fifo_mask; 350 int ret; 351 352 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); 353 354 ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, 355 !(val & snf->caps->mastersta_mask), 0, 356 SNFI_POLL_INTERVAL); 357 if (ret) { 358 dev_err(snf->dev, "NFI master is still busy after reset\n"); 359 return ret; 360 } 361 362 ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val, 363 !(val & (NFI_FSM | NFI_NAND_FSM)), 0, 364 SNFI_POLL_INTERVAL); 365 if (ret) { 366 dev_err(snf->dev, "Failed to reset NFI\n"); 367 return ret; 368 } 369 370 fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) | 371 ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S); 372 ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val, 373 !(val & fifo_mask), 0, SNFI_POLL_INTERVAL); 374 if (ret) { 375 dev_err(snf->dev, "NFI FIFOs are not empty\n"); 376 return ret; 377 } 378 379 return 0; 380} 381 382static int mtk_snand_mac_reset(struct mtk_snand *snf) 383{ 384 int ret; 385 u32 val; 386 387 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST); 388 389 ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val, 390 !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL); 391 if (ret) 392 dev_err(snf->dev, "Failed to reset SNFI MAC\n"); 393 394 nfi_write32(snf, SNF_MISC_CTL, 395 (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S)); 396 397 return ret; 398} 399 400static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen) 401{ 402 int ret; 403 u32 val; 404 405 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN); 406 nfi_write32(snf, SNF_MAC_OUTL, outlen); 407 nfi_write32(snf, SNF_MAC_INL, inlen); 408 409 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG); 410 411 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, 412 val & WIP_READY, 0, SNFI_POLL_INTERVAL); 413 if (ret) { 414 dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); 415 goto cleanup; 416 } 417 418 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP), 419 0, SNFI_POLL_INTERVAL); 420 if (ret) 421 dev_err(snf->dev, "Timed out waiting for WIP cleared\n"); 422 423cleanup: 424 nfi_write32(snf, SNF_MAC_CTL, 0); 425 426 return ret; 427} 428 429static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op) 430{ 431 u32 rx_len = 0; 432 u32 reg_offs = 0; 433 u32 val = 0; 434 const u8 *tx_buf = NULL; 435 u8 *rx_buf = NULL; 436 int i, ret; 437 u8 b; 438 439 if (op->data.dir == SPI_MEM_DATA_IN) { 440 rx_len = op->data.nbytes; 441 rx_buf = op->data.buf.in; 442 } else { 443 tx_buf = op->data.buf.out; 444 } 445 446 mtk_snand_mac_reset(snf); 447 448 for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) { 449 b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff; 450 val |= b << (8 * (reg_offs % 4)); 451 if (reg_offs % 4 == 3) { 452 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 453 val = 0; 454 } 455 } 456 457 for (i = 0; i < op->addr.nbytes; i++, reg_offs++) { 458 b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff; 459 val |= b << (8 * (reg_offs % 4)); 460 if (reg_offs % 4 == 3) { 461 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 462 val = 0; 463 } 464 } 465 466 for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) { 467 if (reg_offs % 4 == 3) { 468 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 469 val = 0; 470 } 471 } 472 473 if (op->data.dir == SPI_MEM_DATA_OUT) { 474 for (i = 0; i < op->data.nbytes; i++, reg_offs++) { 475 val |= tx_buf[i] << (8 * (reg_offs % 4)); 476 if (reg_offs % 4 == 3) { 477 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); 478 val = 0; 479 } 480 } 481 } 482 483 if (reg_offs % 4) 484 nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val); 485 486 for (i = 0; i < reg_offs; i += 4) 487 dev_dbg(snf->dev, "%d: %08X", i, 488 nfi_read32(snf, SNF_GPRAM + i)); 489 490 dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len); 491 492 ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len); 493 if (ret) 494 return ret; 495 496 if (!rx_len) 497 return 0; 498 499 nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len); 500 return 0; 501} 502 503static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size, 504 u32 oob_size) 505{ 506 int spare_idx = -1; 507 u32 spare_size, spare_size_shift, pagesize_idx; 508 u32 sector_size_512; 509 u8 nsectors; 510 int i; 511 512 // skip if it's already configured as required. 513 if (snf->nfi_cfg.page_size == page_size && 514 snf->nfi_cfg.oob_size == oob_size) 515 return 0; 516 517 nsectors = page_size / snf->caps->sector_size; 518 if (nsectors > snf->caps->max_sectors) { 519 dev_err(snf->dev, "too many sectors required.\n"); 520 goto err; 521 } 522 523 if (snf->caps->sector_size == 512) { 524 sector_size_512 = NFI_SEC_SEL_512; 525 spare_size_shift = NFI_SPARE_SIZE_S; 526 } else { 527 sector_size_512 = 0; 528 spare_size_shift = NFI_SPARE_SIZE_LS_S; 529 } 530 531 switch (page_size) { 532 case SZ_512: 533 pagesize_idx = NFI_PAGE_SIZE_512_2K; 534 break; 535 case SZ_2K: 536 if (snf->caps->sector_size == 512) 537 pagesize_idx = NFI_PAGE_SIZE_2K_4K; 538 else 539 pagesize_idx = NFI_PAGE_SIZE_512_2K; 540 break; 541 case SZ_4K: 542 if (snf->caps->sector_size == 512) 543 pagesize_idx = NFI_PAGE_SIZE_4K_8K; 544 else 545 pagesize_idx = NFI_PAGE_SIZE_2K_4K; 546 break; 547 case SZ_8K: 548 if (snf->caps->sector_size == 512) 549 pagesize_idx = NFI_PAGE_SIZE_8K_16K; 550 else 551 pagesize_idx = NFI_PAGE_SIZE_4K_8K; 552 break; 553 case SZ_16K: 554 pagesize_idx = NFI_PAGE_SIZE_8K_16K; 555 break; 556 default: 557 dev_err(snf->dev, "unsupported page size.\n"); 558 goto err; 559 } 560 561 spare_size = oob_size / nsectors; 562 // If we're using the 1KB sector size, HW will automatically double the 563 // spare size. We should only use half of the value in this case. 564 if (snf->caps->sector_size == 1024) 565 spare_size /= 2; 566 567 for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { 568 if (snf->caps->spare_sizes[i] <= spare_size) { 569 spare_size = snf->caps->spare_sizes[i]; 570 if (snf->caps->sector_size == 1024) 571 spare_size *= 2; 572 spare_idx = i; 573 break; 574 } 575 } 576 577 if (spare_idx < 0) { 578 dev_err(snf->dev, "unsupported spare size: %u\n", spare_size); 579 goto err; 580 } 581 582 nfi_write32(snf, NFI_PAGEFMT, 583 (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) | 584 (snf->caps->fdm_size << NFI_FDM_NUM_S) | 585 (spare_idx << spare_size_shift) | 586 (pagesize_idx << NFI_PAGE_SIZE_S) | 587 sector_size_512); 588 589 snf->nfi_cfg.page_size = page_size; 590 snf->nfi_cfg.oob_size = oob_size; 591 snf->nfi_cfg.nsectors = nsectors; 592 snf->nfi_cfg.spare_size = spare_size; 593 594 dev_dbg(snf->dev, "page format: (%u + %u) * %u\n", 595 snf->caps->sector_size, spare_size, nsectors); 596 return snand_prepare_bouncebuf(snf, page_size + oob_size); 597err: 598 dev_err(snf->dev, "page size %u + %u is not supported\n", page_size, 599 oob_size); 600 return -EOPNOTSUPP; 601} 602 603static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section, 604 struct mtd_oob_region *oobecc) 605{ 606 // ECC area is not accessible 607 return -ERANGE; 608} 609 610static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section, 611 struct mtd_oob_region *oobfree) 612{ 613 struct nand_device *nand = mtd_to_nanddev(mtd); 614 struct mtk_snand *ms = nand_to_mtk_snand(nand); 615 616 if (section >= ms->nfi_cfg.nsectors) 617 return -ERANGE; 618 619 oobfree->length = ms->caps->fdm_size - 1; 620 oobfree->offset = section * ms->caps->fdm_size + 1; 621 return 0; 622} 623 624static const struct mtd_ooblayout_ops mtk_snand_ooblayout = { 625 .ecc = mtk_snand_ooblayout_ecc, 626 .free = mtk_snand_ooblayout_free, 627}; 628 629static int mtk_snand_ecc_init_ctx(struct nand_device *nand) 630{ 631 struct mtk_snand *snf = nand_to_mtk_snand(nand); 632 struct nand_ecc_props *conf = &nand->ecc.ctx.conf; 633 struct nand_ecc_props *reqs = &nand->ecc.requirements; 634 struct nand_ecc_props *user = &nand->ecc.user_conf; 635 struct mtd_info *mtd = nanddev_to_mtd(nand); 636 int step_size = 0, strength = 0, desired_correction = 0, steps; 637 bool ecc_user = false; 638 int ret; 639 u32 parity_bits, max_ecc_bytes; 640 struct mtk_ecc_config *ecc_cfg; 641 642 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, 643 nand->memorg.oobsize); 644 if (ret) 645 return ret; 646 647 ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); 648 if (!ecc_cfg) 649 return -ENOMEM; 650 651 nand->ecc.ctx.priv = ecc_cfg; 652 653 if (user->step_size && user->strength) { 654 step_size = user->step_size; 655 strength = user->strength; 656 ecc_user = true; 657 } else if (reqs->step_size && reqs->strength) { 658 step_size = reqs->step_size; 659 strength = reqs->strength; 660 } 661 662 if (step_size && strength) { 663 steps = mtd->writesize / step_size; 664 desired_correction = steps * strength; 665 strength = desired_correction / snf->nfi_cfg.nsectors; 666 } 667 668 ecc_cfg->mode = ECC_NFI_MODE; 669 ecc_cfg->sectors = snf->nfi_cfg.nsectors; 670 ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size; 671 672 // calculate the max possible strength under current page format 673 parity_bits = mtk_ecc_get_parity_bits(snf->ecc); 674 max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size; 675 ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits; 676 mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength); 677 678 // if there's a user requested strength, find the minimum strength that 679 // meets the requirement. Otherwise use the maximum strength which is 680 // expected by BootROM. 681 if (ecc_user && strength) { 682 u32 s_next = ecc_cfg->strength - 1; 683 684 while (1) { 685 mtk_ecc_adjust_strength(snf->ecc, &s_next); 686 if (s_next >= ecc_cfg->strength) 687 break; 688 if (s_next < strength) 689 break; 690 s_next = ecc_cfg->strength - 1; 691 } 692 } 693 694 mtd_set_ooblayout(mtd, &mtk_snand_ooblayout); 695 696 conf->step_size = snf->caps->sector_size; 697 conf->strength = ecc_cfg->strength; 698 699 if (ecc_cfg->strength < strength) 700 dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n", 701 strength); 702 dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n", 703 ecc_cfg->strength, snf->caps->sector_size); 704 705 return 0; 706} 707 708static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand) 709{ 710 struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); 711 712 kfree(ecc_cfg); 713} 714 715static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand, 716 struct nand_page_io_req *req) 717{ 718 struct mtk_snand *snf = nand_to_mtk_snand(nand); 719 struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); 720 int ret; 721 722 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, 723 nand->memorg.oobsize); 724 if (ret) 725 return ret; 726 snf->autofmt = true; 727 snf->ecc_cfg = ecc_cfg; 728 return 0; 729} 730 731static int mtk_snand_ecc_finish_io_req(struct nand_device *nand, 732 struct nand_page_io_req *req) 733{ 734 struct mtk_snand *snf = nand_to_mtk_snand(nand); 735 struct mtd_info *mtd = nanddev_to_mtd(nand); 736 737 snf->ecc_cfg = NULL; 738 snf->autofmt = false; 739 if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ)) 740 return 0; 741 742 if (snf->ecc_stats.failed) 743 mtd->ecc_stats.failed += snf->ecc_stats.failed; 744 mtd->ecc_stats.corrected += snf->ecc_stats.corrected; 745 return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; 746} 747 748static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { 749 .init_ctx = mtk_snand_ecc_init_ctx, 750 .cleanup_ctx = mtk_snand_ecc_cleanup_ctx, 751 .prepare_io_req = mtk_snand_ecc_prepare_io_req, 752 .finish_io_req = mtk_snand_ecc_finish_io_req, 753}; 754 755static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf) 756{ 757 u32 vall, valm; 758 u8 *oobptr = buf; 759 int i, j; 760 761 for (i = 0; i < snf->nfi_cfg.nsectors; i++) { 762 vall = nfi_read32(snf, NFI_FDML(i)); 763 valm = nfi_read32(snf, NFI_FDMM(i)); 764 765 for (j = 0; j < snf->caps->fdm_size; j++) 766 oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); 767 768 oobptr += snf->caps->fdm_size; 769 } 770} 771 772static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf) 773{ 774 u32 fdm_size = snf->caps->fdm_size; 775 const u8 *oobptr = buf; 776 u32 vall, valm; 777 int i, j; 778 779 for (i = 0; i < snf->nfi_cfg.nsectors; i++) { 780 vall = 0; 781 valm = 0; 782 783 for (j = 0; j < 8; j++) { 784 if (j < 4) 785 vall |= (j < fdm_size ? oobptr[j] : 0xff) 786 << (j * 8); 787 else 788 valm |= (j < fdm_size ? oobptr[j] : 0xff) 789 << ((j - 4) * 8); 790 } 791 792 nfi_write32(snf, NFI_FDML(i), vall); 793 nfi_write32(snf, NFI_FDMM(i), valm); 794 795 oobptr += fdm_size; 796 } 797} 798 799static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf) 800{ 801 u32 buf_bbm_pos, fdm_bbm_pos; 802 803 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) 804 return; 805 806 // swap [pagesize] byte on nand with the first fdm byte 807 // in the last sector. 808 buf_bbm_pos = snf->nfi_cfg.page_size - 809 (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size; 810 fdm_bbm_pos = snf->nfi_cfg.page_size + 811 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; 812 813 swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]); 814} 815 816static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf) 817{ 818 u32 fdm_bbm_pos1, fdm_bbm_pos2; 819 820 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) 821 return; 822 823 // swap the first fdm byte in the first and the last sector. 824 fdm_bbm_pos1 = snf->nfi_cfg.page_size; 825 fdm_bbm_pos2 = snf->nfi_cfg.page_size + 826 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; 827 swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]); 828} 829 830static int mtk_snand_read_page_cache(struct mtk_snand *snf, 831 const struct spi_mem_op *op) 832{ 833 u8 *buf = snf->buf; 834 u8 *buf_fdm = buf + snf->nfi_cfg.page_size; 835 // the address part to be sent by the controller 836 u32 op_addr = op->addr.val; 837 // where to start copying data from bounce buffer 838 u32 rd_offset = 0; 839 u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth); 840 u32 op_mode = 0; 841 u32 dma_len = snf->buf_len; 842 int ret = 0; 843 u32 rd_mode, rd_bytes, val; 844 dma_addr_t buf_dma; 845 846 if (snf->autofmt) { 847 u32 last_bit; 848 u32 mask; 849 850 dma_len = snf->nfi_cfg.page_size; 851 op_mode = CNFG_AUTO_FMT_EN; 852 if (op->data.ecc) 853 op_mode |= CNFG_HW_ECC_EN; 854 // extract the plane bit: 855 // Find the highest bit set in (pagesize+oobsize). 856 // Bits higher than that in op->addr are kept and sent over SPI 857 // Lower bits are used as an offset for copying data from DMA 858 // bounce buffer. 859 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); 860 mask = (1 << last_bit) - 1; 861 rd_offset = op_addr & mask; 862 op_addr &= ~mask; 863 864 // check if we can dma to the caller memory 865 if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size) 866 buf = op->data.buf.in; 867 } 868 mtk_snand_mac_reset(snf); 869 mtk_nfi_reset(snf); 870 871 // command and dummy cycles 872 nfi_write32(snf, SNF_RD_CTL2, 873 (dummy_clk << DATA_READ_DUMMY_S) | 874 (op->cmd.opcode << DATA_READ_CMD_S)); 875 876 // read address 877 nfi_write32(snf, SNF_RD_CTL3, op_addr); 878 879 // Set read op_mode 880 if (op->data.buswidth == 4) 881 rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD : 882 DATA_READ_MODE_X4; 883 else if (op->data.buswidth == 2) 884 rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL : 885 DATA_READ_MODE_X2; 886 else 887 rd_mode = DATA_READ_MODE_X1; 888 rd_mode <<= DATA_READ_MODE_S; 889 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, 890 rd_mode | DATARD_CUSTOM_EN); 891 892 // Set bytes to read 893 rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * 894 snf->nfi_cfg.nsectors; 895 nfi_write32(snf, SNF_MISC_CTL2, 896 (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes); 897 898 // NFI read prepare 899 nfi_write16(snf, NFI_CNFG, 900 (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN | 901 CNFG_READ_MODE | CNFG_DMA_MODE | op_mode); 902 903 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); 904 905 buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE); 906 ret = dma_mapping_error(snf->dev, buf_dma); 907 if (ret) { 908 dev_err(snf->dev, "DMA mapping failed.\n"); 909 goto cleanup; 910 } 911 nfi_write32(snf, NFI_STRADDR, buf_dma); 912 if (op->data.ecc) { 913 snf->ecc_cfg->op = ECC_DECODE; 914 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); 915 if (ret) 916 goto cleanup_dma; 917 } 918 // Prepare for custom read interrupt 919 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ); 920 reinit_completion(&snf->op_done); 921 922 // Trigger NFI into custom mode 923 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ); 924 925 // Start DMA read 926 nfi_rmw32(snf, NFI_CON, 0, CON_BRD); 927 nfi_write16(snf, NFI_STRDATA, STR_DATA); 928 929 if (!wait_for_completion_timeout( 930 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { 931 dev_err(snf->dev, "DMA timed out for reading from cache.\n"); 932 ret = -ETIMEDOUT; 933 goto cleanup; 934 } 935 936 // Wait for BUS_SEC_CNTR returning expected value 937 ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val, 938 BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, 939 SNFI_POLL_INTERVAL); 940 if (ret) { 941 dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); 942 goto cleanup2; 943 } 944 945 // Wait for bus becoming idle 946 ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, 947 !(val & snf->caps->mastersta_mask), 0, 948 SNFI_POLL_INTERVAL); 949 if (ret) { 950 dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); 951 goto cleanup2; 952 } 953 954 if (op->data.ecc) { 955 ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); 956 if (ret) { 957 dev_err(snf->dev, "wait ecc done timeout\n"); 958 goto cleanup2; 959 } 960 // save status before disabling ecc 961 mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats, 962 snf->nfi_cfg.nsectors); 963 } 964 965 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); 966 967 if (snf->autofmt) { 968 mtk_snand_read_fdm(snf, buf_fdm); 969 if (snf->caps->bbm_swap) { 970 mtk_snand_bm_swap(snf, buf); 971 mtk_snand_fdm_bm_swap(snf); 972 } 973 } 974 975 // copy data back 976 if (nfi_read32(snf, NFI_STA) & READ_EMPTY) { 977 memset(op->data.buf.in, 0xff, op->data.nbytes); 978 snf->ecc_stats.bitflips = 0; 979 snf->ecc_stats.failed = 0; 980 snf->ecc_stats.corrected = 0; 981 } else { 982 if (buf == op->data.buf.in) { 983 u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size; 984 u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size; 985 986 if (req_left) 987 memcpy(op->data.buf.in + snf->nfi_cfg.page_size, 988 buf_fdm, 989 cap_len < req_left ? cap_len : req_left); 990 } else if (rd_offset < snf->buf_len) { 991 u32 cap_len = snf->buf_len - rd_offset; 992 993 if (op->data.nbytes < cap_len) 994 cap_len = op->data.nbytes; 995 memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len); 996 } 997 } 998cleanup2: 999 if (op->data.ecc) 1000 mtk_ecc_disable(snf->ecc); 1001cleanup_dma: 1002 // unmap dma only if any error happens. (otherwise it's done before 1003 // data copying) 1004 if (ret) 1005 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); 1006cleanup: 1007 // Stop read 1008 nfi_write32(snf, NFI_CON, 0); 1009 nfi_write16(snf, NFI_CNFG, 0); 1010 1011 // Clear SNF done flag 1012 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE); 1013 nfi_write32(snf, SNF_STA_CTL1, 0); 1014 1015 // Disable interrupt 1016 nfi_read32(snf, NFI_INTR_STA); 1017 nfi_write32(snf, NFI_INTR_EN, 0); 1018 1019 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0); 1020 return ret; 1021} 1022 1023static int mtk_snand_write_page_cache(struct mtk_snand *snf, 1024 const struct spi_mem_op *op) 1025{ 1026 // the address part to be sent by the controller 1027 u32 op_addr = op->addr.val; 1028 // where to start copying data from bounce buffer 1029 u32 wr_offset = 0; 1030 u32 op_mode = 0; 1031 int ret = 0; 1032 u32 wr_mode = 0; 1033 u32 dma_len = snf->buf_len; 1034 u32 wr_bytes, val; 1035 size_t cap_len; 1036 dma_addr_t buf_dma; 1037 1038 if (snf->autofmt) { 1039 u32 last_bit; 1040 u32 mask; 1041 1042 dma_len = snf->nfi_cfg.page_size; 1043 op_mode = CNFG_AUTO_FMT_EN; 1044 if (op->data.ecc) 1045 op_mode |= CNFG_HW_ECC_EN; 1046 1047 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); 1048 mask = (1 << last_bit) - 1; 1049 wr_offset = op_addr & mask; 1050 op_addr &= ~mask; 1051 } 1052 mtk_snand_mac_reset(snf); 1053 mtk_nfi_reset(snf); 1054 1055 if (wr_offset) 1056 memset(snf->buf, 0xff, wr_offset); 1057 1058 cap_len = snf->buf_len - wr_offset; 1059 if (op->data.nbytes < cap_len) 1060 cap_len = op->data.nbytes; 1061 memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len); 1062 if (snf->autofmt) { 1063 if (snf->caps->bbm_swap) { 1064 mtk_snand_fdm_bm_swap(snf); 1065 mtk_snand_bm_swap(snf, snf->buf); 1066 } 1067 mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size); 1068 } 1069 1070 // Command 1071 nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S)); 1072 1073 // write address 1074 nfi_write32(snf, SNF_PG_CTL2, op_addr); 1075 1076 // Set read op_mode 1077 if (op->data.buswidth == 4) 1078 wr_mode = PG_LOAD_X4_EN; 1079 1080 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, 1081 wr_mode | PG_LOAD_CUSTOM_EN); 1082 1083 // Set bytes to write 1084 wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * 1085 snf->nfi_cfg.nsectors; 1086 nfi_write32(snf, SNF_MISC_CTL2, 1087 (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes); 1088 1089 // NFI write prepare 1090 nfi_write16(snf, NFI_CNFG, 1091 (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) | 1092 CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode); 1093 1094 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); 1095 buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE); 1096 ret = dma_mapping_error(snf->dev, buf_dma); 1097 if (ret) { 1098 dev_err(snf->dev, "DMA mapping failed.\n"); 1099 goto cleanup; 1100 } 1101 nfi_write32(snf, NFI_STRADDR, buf_dma); 1102 if (op->data.ecc) { 1103 snf->ecc_cfg->op = ECC_ENCODE; 1104 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); 1105 if (ret) 1106 goto cleanup_dma; 1107 } 1108 // Prepare for custom write interrupt 1109 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG); 1110 reinit_completion(&snf->op_done); 1111 ; 1112 1113 // Trigger NFI into custom mode 1114 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE); 1115 1116 // Start DMA write 1117 nfi_rmw32(snf, NFI_CON, 0, CON_BWR); 1118 nfi_write16(snf, NFI_STRDATA, STR_DATA); 1119 1120 if (!wait_for_completion_timeout( 1121 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { 1122 dev_err(snf->dev, "DMA timed out for program load.\n"); 1123 ret = -ETIMEDOUT; 1124 goto cleanup_ecc; 1125 } 1126 1127 // Wait for NFI_SEC_CNTR returning expected value 1128 ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val, 1129 NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, 1130 SNFI_POLL_INTERVAL); 1131 if (ret) 1132 dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n"); 1133 1134cleanup_ecc: 1135 if (op->data.ecc) 1136 mtk_ecc_disable(snf->ecc); 1137cleanup_dma: 1138 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE); 1139cleanup: 1140 // Stop write 1141 nfi_write32(snf, NFI_CON, 0); 1142 nfi_write16(snf, NFI_CNFG, 0); 1143 1144 // Clear SNF done flag 1145 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE); 1146 nfi_write32(snf, SNF_STA_CTL1, 0); 1147 1148 // Disable interrupt 1149 nfi_read32(snf, NFI_INTR_STA); 1150 nfi_write32(snf, NFI_INTR_EN, 0); 1151 1152 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0); 1153 1154 return ret; 1155} 1156 1157/** 1158 * mtk_snand_is_page_ops() - check if the op is a controller supported page op. 1159 * @op spi-mem op to check 1160 * 1161 * Check whether op can be executed with read_from_cache or program_load 1162 * mode in the controller. 1163 * This controller can execute typical Read From Cache and Program Load 1164 * instructions found on SPI-NAND with 2-byte address. 1165 * DTR and cmd buswidth & nbytes should be checked before calling this. 1166 * 1167 * Return: true if the op matches the instruction template 1168 */ 1169static bool mtk_snand_is_page_ops(const struct spi_mem_op *op) 1170{ 1171 if (op->addr.nbytes != 2) 1172 return false; 1173 1174 if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && 1175 op->addr.buswidth != 4) 1176 return false; 1177 1178 // match read from page instructions 1179 if (op->data.dir == SPI_MEM_DATA_IN) { 1180 // check dummy cycle first 1181 if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > 1182 DATA_READ_MAX_DUMMY) 1183 return false; 1184 // quad io / quad out 1185 if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) && 1186 op->data.buswidth == 4) 1187 return true; 1188 1189 // dual io / dual out 1190 if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) && 1191 op->data.buswidth == 2) 1192 return true; 1193 1194 // standard spi 1195 if (op->addr.buswidth == 1 && op->data.buswidth == 1) 1196 return true; 1197 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 1198 // check dummy cycle first 1199 if (op->dummy.nbytes) 1200 return false; 1201 // program load quad out 1202 if (op->addr.buswidth == 1 && op->data.buswidth == 4) 1203 return true; 1204 // standard spi 1205 if (op->addr.buswidth == 1 && op->data.buswidth == 1) 1206 return true; 1207 } 1208 return false; 1209} 1210 1211static bool mtk_snand_supports_op(struct spi_mem *mem, 1212 const struct spi_mem_op *op) 1213{ 1214 if (!spi_mem_default_supports_op(mem, op)) 1215 return false; 1216 if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) 1217 return false; 1218 if (mtk_snand_is_page_ops(op)) 1219 return true; 1220 return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) && 1221 (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) && 1222 (op->data.nbytes == 0 || op->data.buswidth == 1)); 1223} 1224 1225static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 1226{ 1227 struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); 1228 // page ops transfer size must be exactly ((sector_size + spare_size) * 1229 // nsectors). Limit the op size if the caller requests more than that. 1230 // exec_op will read more than needed and discard the leftover if the 1231 // caller requests less data. 1232 if (mtk_snand_is_page_ops(op)) { 1233 size_t l; 1234 // skip adjust_op_size for page ops 1235 if (ms->autofmt) 1236 return 0; 1237 l = ms->caps->sector_size + ms->nfi_cfg.spare_size; 1238 l *= ms->nfi_cfg.nsectors; 1239 if (op->data.nbytes > l) 1240 op->data.nbytes = l; 1241 } else { 1242 size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; 1243 1244 if (hl >= SNF_GPRAM_SIZE) 1245 return -EOPNOTSUPP; 1246 if (op->data.nbytes > SNF_GPRAM_SIZE - hl) 1247 op->data.nbytes = SNF_GPRAM_SIZE - hl; 1248 } 1249 return 0; 1250} 1251 1252static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 1253{ 1254 struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); 1255 1256 dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, 1257 op->addr.val, op->addr.buswidth, op->addr.nbytes, 1258 op->data.buswidth, op->data.nbytes); 1259 if (mtk_snand_is_page_ops(op)) { 1260 if (op->data.dir == SPI_MEM_DATA_IN) 1261 return mtk_snand_read_page_cache(ms, op); 1262 else 1263 return mtk_snand_write_page_cache(ms, op); 1264 } else { 1265 return mtk_snand_mac_io(ms, op); 1266 } 1267} 1268 1269static const struct spi_controller_mem_ops mtk_snand_mem_ops = { 1270 .adjust_op_size = mtk_snand_adjust_op_size, 1271 .supports_op = mtk_snand_supports_op, 1272 .exec_op = mtk_snand_exec_op, 1273}; 1274 1275static const struct spi_controller_mem_caps mtk_snand_mem_caps = { 1276 .ecc = true, 1277}; 1278 1279static irqreturn_t mtk_snand_irq(int irq, void *id) 1280{ 1281 struct mtk_snand *snf = id; 1282 u32 sta, ien; 1283 1284 sta = nfi_read32(snf, NFI_INTR_STA); 1285 ien = nfi_read32(snf, NFI_INTR_EN); 1286 1287 if (!(sta & ien)) 1288 return IRQ_NONE; 1289 1290 nfi_write32(snf, NFI_INTR_EN, 0); 1291 complete(&snf->op_done); 1292 return IRQ_HANDLED; 1293} 1294 1295static const struct of_device_id mtk_snand_ids[] = { 1296 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps }, 1297 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps }, 1298 {}, 1299}; 1300 1301MODULE_DEVICE_TABLE(of, mtk_snand_ids); 1302 1303static int mtk_snand_enable_clk(struct mtk_snand *ms) 1304{ 1305 int ret; 1306 1307 ret = clk_prepare_enable(ms->nfi_clk); 1308 if (ret) { 1309 dev_err(ms->dev, "unable to enable nfi clk\n"); 1310 return ret; 1311 } 1312 ret = clk_prepare_enable(ms->pad_clk); 1313 if (ret) { 1314 dev_err(ms->dev, "unable to enable pad clk\n"); 1315 goto err1; 1316 } 1317 return 0; 1318err1: 1319 clk_disable_unprepare(ms->nfi_clk); 1320 return ret; 1321} 1322 1323static void mtk_snand_disable_clk(struct mtk_snand *ms) 1324{ 1325 clk_disable_unprepare(ms->pad_clk); 1326 clk_disable_unprepare(ms->nfi_clk); 1327} 1328 1329static int mtk_snand_probe(struct platform_device *pdev) 1330{ 1331 struct device_node *np = pdev->dev.of_node; 1332 const struct of_device_id *dev_id; 1333 struct spi_controller *ctlr; 1334 struct mtk_snand *ms; 1335 int ret; 1336 1337 dev_id = of_match_node(mtk_snand_ids, np); 1338 if (!dev_id) 1339 return -EINVAL; 1340 1341 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms)); 1342 if (!ctlr) 1343 return -ENOMEM; 1344 platform_set_drvdata(pdev, ctlr); 1345 1346 ms = spi_controller_get_devdata(ctlr); 1347 1348 ms->ctlr = ctlr; 1349 ms->caps = dev_id->data; 1350 1351 ms->ecc = of_mtk_ecc_get(np); 1352 if (IS_ERR(ms->ecc)) 1353 return PTR_ERR(ms->ecc); 1354 else if (!ms->ecc) 1355 return -ENODEV; 1356 1357 ms->nfi_base = devm_platform_ioremap_resource(pdev, 0); 1358 if (IS_ERR(ms->nfi_base)) { 1359 ret = PTR_ERR(ms->nfi_base); 1360 goto release_ecc; 1361 } 1362 1363 ms->dev = &pdev->dev; 1364 1365 ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk"); 1366 if (IS_ERR(ms->nfi_clk)) { 1367 ret = PTR_ERR(ms->nfi_clk); 1368 dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret); 1369 goto release_ecc; 1370 } 1371 1372 ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk"); 1373 if (IS_ERR(ms->pad_clk)) { 1374 ret = PTR_ERR(ms->pad_clk); 1375 dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret); 1376 goto release_ecc; 1377 } 1378 1379 ret = mtk_snand_enable_clk(ms); 1380 if (ret) 1381 goto release_ecc; 1382 1383 init_completion(&ms->op_done); 1384 1385 ms->irq = platform_get_irq(pdev, 0); 1386 if (ms->irq < 0) { 1387 ret = ms->irq; 1388 goto disable_clk; 1389 } 1390 ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0, 1391 "mtk-snand", ms); 1392 if (ret) { 1393 dev_err(ms->dev, "failed to request snfi irq\n"); 1394 goto disable_clk; 1395 } 1396 1397 ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32)); 1398 if (ret) { 1399 dev_err(ms->dev, "failed to set dma mask\n"); 1400 goto disable_clk; 1401 } 1402 1403 // switch to SNFI mode 1404 nfi_write32(ms, SNF_CFG, SPI_MODE); 1405 1406 // setup an initial page format for ops matching page_cache_op template 1407 // before ECC is called. 1408 ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size, 1409 ms->caps->spare_sizes[0]); 1410 if (ret) { 1411 dev_err(ms->dev, "failed to set initial page format\n"); 1412 goto disable_clk; 1413 } 1414 1415 // setup ECC engine 1416 ms->ecc_eng.dev = &pdev->dev; 1417 ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; 1418 ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops; 1419 ms->ecc_eng.priv = ms; 1420 1421 ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng); 1422 if (ret) { 1423 dev_err(&pdev->dev, "failed to register ecc engine.\n"); 1424 goto disable_clk; 1425 } 1426 1427 ctlr->num_chipselect = 1; 1428 ctlr->mem_ops = &mtk_snand_mem_ops; 1429 ctlr->mem_caps = &mtk_snand_mem_caps; 1430 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 1431 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 1432 ctlr->dev.of_node = pdev->dev.of_node; 1433 ret = spi_register_controller(ctlr); 1434 if (ret) { 1435 dev_err(&pdev->dev, "spi_register_controller failed.\n"); 1436 goto disable_clk; 1437 } 1438 1439 return 0; 1440disable_clk: 1441 mtk_snand_disable_clk(ms); 1442release_ecc: 1443 mtk_ecc_release(ms->ecc); 1444 return ret; 1445} 1446 1447static int mtk_snand_remove(struct platform_device *pdev) 1448{ 1449 struct spi_controller *ctlr = platform_get_drvdata(pdev); 1450 struct mtk_snand *ms = spi_controller_get_devdata(ctlr); 1451 1452 spi_unregister_controller(ctlr); 1453 mtk_snand_disable_clk(ms); 1454 mtk_ecc_release(ms->ecc); 1455 kfree(ms->buf); 1456 return 0; 1457} 1458 1459static struct platform_driver mtk_snand_driver = { 1460 .probe = mtk_snand_probe, 1461 .remove = mtk_snand_remove, 1462 .driver = { 1463 .name = "mtk-snand", 1464 .of_match_table = mtk_snand_ids, 1465 }, 1466}; 1467 1468module_platform_driver(mtk_snand_driver); 1469 1470MODULE_LICENSE("GPL"); 1471MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 1472MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");