spi-mtk-nor.c (25496B)
1// SPDX-License-Identifier: GPL-2.0 2// 3// Mediatek SPI NOR controller driver 4// 5// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com> 6 7#include <linux/bits.h> 8#include <linux/clk.h> 9#include <linux/completion.h> 10#include <linux/dma-mapping.h> 11#include <linux/interrupt.h> 12#include <linux/io.h> 13#include <linux/iopoll.h> 14#include <linux/kernel.h> 15#include <linux/module.h> 16#include <linux/of_device.h> 17#include <linux/pm_runtime.h> 18#include <linux/spi/spi.h> 19#include <linux/spi/spi-mem.h> 20#include <linux/string.h> 21 22#define DRIVER_NAME "mtk-spi-nor" 23 24#define MTK_NOR_REG_CMD 0x00 25#define MTK_NOR_CMD_WRITE BIT(4) 26#define MTK_NOR_CMD_PROGRAM BIT(2) 27#define MTK_NOR_CMD_READ BIT(0) 28#define MTK_NOR_CMD_MASK GENMASK(5, 0) 29 30#define MTK_NOR_REG_PRG_CNT 0x04 31#define MTK_NOR_PRG_CNT_MAX 56 32#define MTK_NOR_REG_RDATA 0x0c 33 34#define MTK_NOR_REG_RADR0 0x10 35#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n)) 36#define MTK_NOR_REG_RADR3 0xc8 37 38#define MTK_NOR_REG_WDATA 0x1c 39 40#define MTK_NOR_REG_PRGDATA0 0x20 41#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n)) 42#define MTK_NOR_REG_PRGDATA_MAX 5 43 44#define MTK_NOR_REG_SHIFT0 0x38 45#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n)) 46#define MTK_NOR_REG_SHIFT_MAX 9 47 48#define MTK_NOR_REG_CFG1 0x60 49#define MTK_NOR_FAST_READ BIT(0) 50 51#define MTK_NOR_REG_CFG2 0x64 52#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4) 53#define MTK_NOR_WR_BUF_EN BIT(0) 54 55#define MTK_NOR_REG_PP_DATA 0x98 56 57#define MTK_NOR_REG_IRQ_STAT 0xa8 58#define MTK_NOR_REG_IRQ_EN 0xac 59#define MTK_NOR_IRQ_DMA BIT(7) 60#define MTK_NOR_IRQ_MASK GENMASK(7, 0) 61 62#define MTK_NOR_REG_CFG3 0xb4 63#define MTK_NOR_DISABLE_WREN BIT(7) 64#define MTK_NOR_DISABLE_SR_POLL BIT(5) 65 66#define MTK_NOR_REG_WP 0xc4 67#define MTK_NOR_ENABLE_SF_CMD 0x30 68 69#define MTK_NOR_REG_BUSCFG 0xcc 70#define MTK_NOR_4B_ADDR BIT(4) 71#define MTK_NOR_QUAD_ADDR BIT(3) 72#define MTK_NOR_QUAD_READ BIT(2) 73#define MTK_NOR_DUAL_ADDR BIT(1) 74#define MTK_NOR_DUAL_READ BIT(0) 75#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0) 76 77#define MTK_NOR_REG_DMA_CTL 0x718 78#define MTK_NOR_DMA_START BIT(0) 79 80#define MTK_NOR_REG_DMA_FADR 0x71c 81#define MTK_NOR_REG_DMA_DADR 0x720 82#define MTK_NOR_REG_DMA_END_DADR 0x724 83#define MTK_NOR_REG_DMA_DADR_HB 0x738 84#define MTK_NOR_REG_DMA_END_DADR_HB 0x73c 85 86#define MTK_NOR_PRG_MAX_SIZE 6 87// Reading DMA src/dst addresses have to be 16-byte aligned 88#define MTK_NOR_DMA_ALIGN 16 89#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1) 90// and we allocate a bounce buffer if destination address isn't aligned. 91#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE 92 93// Buffered page program can do one 128-byte transfer 94#define MTK_NOR_PP_SIZE 128 95 96#define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000) 97 98struct mtk_nor_caps { 99 u8 dma_bits; 100 101 /* extra_dummy_bit is adding for the IP of new SoCs. 102 * Some new SoCs modify the timing of fetching registers' values 103 * and IDs of nor flash, they need a extra_dummy_bit which can add 104 * more clock cycles for fetching data. 105 */ 106 u8 extra_dummy_bit; 107}; 108 109struct mtk_nor { 110 struct spi_controller *ctlr; 111 struct device *dev; 112 void __iomem *base; 113 u8 *buffer; 114 dma_addr_t buffer_dma; 115 struct clk *spi_clk; 116 struct clk *ctlr_clk; 117 struct clk *axi_clk; 118 struct clk *axi_s_clk; 119 unsigned int spi_freq; 120 bool wbuf_en; 121 bool has_irq; 122 bool high_dma; 123 struct completion op_done; 124 const struct mtk_nor_caps *caps; 125}; 126 127static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr) 128{ 129 u32 val = readl(sp->base + reg); 130 131 val &= ~clr; 132 val |= set; 133 writel(val, sp->base + reg); 134} 135 136static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk) 137{ 138 ulong delay = CLK_TO_US(sp, clk); 139 u32 reg; 140 int ret; 141 142 writel(cmd, sp->base + MTK_NOR_REG_CMD); 143 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd), 144 delay / 3, (delay + 1) * 200); 145 if (ret < 0) 146 dev_err(sp->dev, "command %u timeout.\n", cmd); 147 return ret; 148} 149 150static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op) 151{ 152 u32 addr = op->addr.val; 153 int i; 154 155 for (i = 0; i < 3; i++) { 156 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i)); 157 addr >>= 8; 158 } 159 if (op->addr.nbytes == 4) { 160 writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3); 161 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0); 162 } else { 163 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR); 164 } 165} 166 167static bool need_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 168{ 169 return ((uintptr_t)op->data.buf.in & MTK_NOR_DMA_ALIGN_MASK); 170} 171 172static bool mtk_nor_match_read(const struct spi_mem_op *op) 173{ 174 int dummy = 0; 175 176 if (op->dummy.nbytes) 177 dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth; 178 179 if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) { 180 if (op->addr.buswidth == 1) 181 return dummy == 8; 182 else if (op->addr.buswidth == 2) 183 return dummy == 4; 184 else if (op->addr.buswidth == 4) 185 return dummy == 6; 186 } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) { 187 if (op->cmd.opcode == 0x03) 188 return dummy == 0; 189 else if (op->cmd.opcode == 0x0b) 190 return dummy == 8; 191 } 192 return false; 193} 194 195static bool mtk_nor_match_prg(const struct spi_mem_op *op) 196{ 197 int tx_len, rx_len, prg_len, prg_left; 198 199 // prg mode is spi-only. 200 if ((op->cmd.buswidth > 1) || (op->addr.buswidth > 1) || 201 (op->dummy.buswidth > 1) || (op->data.buswidth > 1)) 202 return false; 203 204 tx_len = op->cmd.nbytes + op->addr.nbytes; 205 206 if (op->data.dir == SPI_MEM_DATA_OUT) { 207 // count dummy bytes only if we need to write data after it 208 tx_len += op->dummy.nbytes; 209 210 // leave at least one byte for data 211 if (tx_len > MTK_NOR_REG_PRGDATA_MAX) 212 return false; 213 214 // if there's no addr, meaning adjust_op_size is impossible, 215 // check data length as well. 216 if ((!op->addr.nbytes) && 217 (tx_len + op->data.nbytes > MTK_NOR_REG_PRGDATA_MAX + 1)) 218 return false; 219 } else if (op->data.dir == SPI_MEM_DATA_IN) { 220 if (tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) 221 return false; 222 223 rx_len = op->data.nbytes; 224 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 225 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 226 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 227 if (rx_len > prg_left) { 228 if (!op->addr.nbytes) 229 return false; 230 rx_len = prg_left; 231 } 232 233 prg_len = tx_len + op->dummy.nbytes + rx_len; 234 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 235 return false; 236 } else { 237 prg_len = tx_len + op->dummy.nbytes; 238 if (prg_len > MTK_NOR_PRG_CNT_MAX / 8) 239 return false; 240 } 241 return true; 242} 243 244static void mtk_nor_adj_prg_size(struct spi_mem_op *op) 245{ 246 int tx_len, tx_left, prg_left; 247 248 tx_len = op->cmd.nbytes + op->addr.nbytes; 249 if (op->data.dir == SPI_MEM_DATA_OUT) { 250 tx_len += op->dummy.nbytes; 251 tx_left = MTK_NOR_REG_PRGDATA_MAX + 1 - tx_len; 252 if (op->data.nbytes > tx_left) 253 op->data.nbytes = tx_left; 254 } else if (op->data.dir == SPI_MEM_DATA_IN) { 255 prg_left = MTK_NOR_PRG_CNT_MAX / 8 - tx_len - op->dummy.nbytes; 256 if (prg_left > MTK_NOR_REG_SHIFT_MAX + 1) 257 prg_left = MTK_NOR_REG_SHIFT_MAX + 1; 258 if (op->data.nbytes > prg_left) 259 op->data.nbytes = prg_left; 260 } 261} 262 263static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) 264{ 265 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 266 267 if (!op->data.nbytes) 268 return 0; 269 270 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 271 if ((op->data.dir == SPI_MEM_DATA_IN) && 272 mtk_nor_match_read(op)) { 273 // limit size to prevent timeout calculation overflow 274 if (op->data.nbytes > 0x400000) 275 op->data.nbytes = 0x400000; 276 277 if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) || 278 (op->data.nbytes < MTK_NOR_DMA_ALIGN)) 279 op->data.nbytes = 1; 280 else if (!need_bounce(sp, op)) 281 op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK; 282 else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE) 283 op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE; 284 return 0; 285 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 286 if (op->data.nbytes >= MTK_NOR_PP_SIZE) 287 op->data.nbytes = MTK_NOR_PP_SIZE; 288 else 289 op->data.nbytes = 1; 290 return 0; 291 } 292 } 293 294 mtk_nor_adj_prg_size(op); 295 return 0; 296} 297 298static bool mtk_nor_supports_op(struct spi_mem *mem, 299 const struct spi_mem_op *op) 300{ 301 if (!spi_mem_default_supports_op(mem, op)) 302 return false; 303 304 if (op->cmd.buswidth != 1) 305 return false; 306 307 if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) { 308 switch (op->data.dir) { 309 case SPI_MEM_DATA_IN: 310 if (mtk_nor_match_read(op)) 311 return true; 312 break; 313 case SPI_MEM_DATA_OUT: 314 if ((op->addr.buswidth == 1) && 315 (op->dummy.nbytes == 0) && 316 (op->data.buswidth == 1)) 317 return true; 318 break; 319 default: 320 break; 321 } 322 } 323 324 return mtk_nor_match_prg(op); 325} 326 327static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op) 328{ 329 u32 reg = 0; 330 331 if (op->addr.nbytes == 4) 332 reg |= MTK_NOR_4B_ADDR; 333 334 if (op->data.buswidth == 4) { 335 reg |= MTK_NOR_QUAD_READ; 336 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4)); 337 if (op->addr.buswidth == 4) 338 reg |= MTK_NOR_QUAD_ADDR; 339 } else if (op->data.buswidth == 2) { 340 reg |= MTK_NOR_DUAL_READ; 341 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3)); 342 if (op->addr.buswidth == 2) 343 reg |= MTK_NOR_DUAL_ADDR; 344 } else { 345 if (op->cmd.opcode == 0x0b) 346 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0); 347 else 348 mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ); 349 } 350 mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK); 351} 352 353static int mtk_nor_dma_exec(struct mtk_nor *sp, u32 from, unsigned int length, 354 dma_addr_t dma_addr) 355{ 356 int ret = 0; 357 ulong delay; 358 u32 reg; 359 360 writel(from, sp->base + MTK_NOR_REG_DMA_FADR); 361 writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR); 362 writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR); 363 364 if (sp->high_dma) { 365 writel(upper_32_bits(dma_addr), 366 sp->base + MTK_NOR_REG_DMA_DADR_HB); 367 writel(upper_32_bits(dma_addr + length), 368 sp->base + MTK_NOR_REG_DMA_END_DADR_HB); 369 } 370 371 if (sp->has_irq) { 372 reinit_completion(&sp->op_done); 373 mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0); 374 } 375 376 mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0); 377 378 delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE); 379 380 if (sp->has_irq) { 381 if (!wait_for_completion_timeout(&sp->op_done, 382 (delay + 1) * 100)) 383 ret = -ETIMEDOUT; 384 } else { 385 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg, 386 !(reg & MTK_NOR_DMA_START), delay / 3, 387 (delay + 1) * 100); 388 } 389 390 if (ret < 0) 391 dev_err(sp->dev, "dma read timeout.\n"); 392 393 return ret; 394} 395 396static int mtk_nor_read_bounce(struct mtk_nor *sp, const struct spi_mem_op *op) 397{ 398 unsigned int rdlen; 399 int ret; 400 401 if (op->data.nbytes & MTK_NOR_DMA_ALIGN_MASK) 402 rdlen = (op->data.nbytes + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK; 403 else 404 rdlen = op->data.nbytes; 405 406 ret = mtk_nor_dma_exec(sp, op->addr.val, rdlen, sp->buffer_dma); 407 408 if (!ret) 409 memcpy(op->data.buf.in, sp->buffer, op->data.nbytes); 410 411 return ret; 412} 413 414static int mtk_nor_read_dma(struct mtk_nor *sp, const struct spi_mem_op *op) 415{ 416 int ret; 417 dma_addr_t dma_addr; 418 419 if (need_bounce(sp, op)) 420 return mtk_nor_read_bounce(sp, op); 421 422 dma_addr = dma_map_single(sp->dev, op->data.buf.in, 423 op->data.nbytes, DMA_FROM_DEVICE); 424 425 if (dma_mapping_error(sp->dev, dma_addr)) 426 return -EINVAL; 427 428 ret = mtk_nor_dma_exec(sp, op->addr.val, op->data.nbytes, dma_addr); 429 430 dma_unmap_single(sp->dev, dma_addr, op->data.nbytes, DMA_FROM_DEVICE); 431 432 return ret; 433} 434 435static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op) 436{ 437 u8 *buf = op->data.buf.in; 438 int ret; 439 440 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE); 441 if (!ret) 442 buf[0] = readb(sp->base + MTK_NOR_REG_RDATA); 443 return ret; 444} 445 446static int mtk_nor_write_buffer_enable(struct mtk_nor *sp) 447{ 448 int ret; 449 u32 val; 450 451 if (sp->wbuf_en) 452 return 0; 453 454 val = readl(sp->base + MTK_NOR_REG_CFG2); 455 writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 456 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 457 val & MTK_NOR_WR_BUF_EN, 0, 10000); 458 if (!ret) 459 sp->wbuf_en = true; 460 return ret; 461} 462 463static int mtk_nor_write_buffer_disable(struct mtk_nor *sp) 464{ 465 int ret; 466 u32 val; 467 468 if (!sp->wbuf_en) 469 return 0; 470 val = readl(sp->base + MTK_NOR_REG_CFG2); 471 writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2); 472 ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val, 473 !(val & MTK_NOR_WR_BUF_EN), 0, 10000); 474 if (!ret) 475 sp->wbuf_en = false; 476 return ret; 477} 478 479static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op) 480{ 481 const u8 *buf = op->data.buf.out; 482 u32 val; 483 int ret, i; 484 485 ret = mtk_nor_write_buffer_enable(sp); 486 if (ret < 0) 487 return ret; 488 489 for (i = 0; i < op->data.nbytes; i += 4) { 490 val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 | 491 buf[i]; 492 writel(val, sp->base + MTK_NOR_REG_PP_DATA); 493 } 494 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 495 (op->data.nbytes + 5) * BITS_PER_BYTE); 496} 497 498static int mtk_nor_pp_unbuffered(struct mtk_nor *sp, 499 const struct spi_mem_op *op) 500{ 501 const u8 *buf = op->data.buf.out; 502 int ret; 503 504 ret = mtk_nor_write_buffer_disable(sp); 505 if (ret < 0) 506 return ret; 507 writeb(buf[0], sp->base + MTK_NOR_REG_WDATA); 508 return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE); 509} 510 511static int mtk_nor_spi_mem_prg(struct mtk_nor *sp, const struct spi_mem_op *op) 512{ 513 int rx_len = 0; 514 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 515 int tx_len, prg_len; 516 int i, ret; 517 void __iomem *reg; 518 u8 bufbyte; 519 520 tx_len = op->cmd.nbytes + op->addr.nbytes; 521 522 // count dummy bytes only if we need to write data after it 523 if (op->data.dir == SPI_MEM_DATA_OUT) 524 tx_len += op->dummy.nbytes + op->data.nbytes; 525 else if (op->data.dir == SPI_MEM_DATA_IN) 526 rx_len = op->data.nbytes; 527 528 prg_len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes + 529 op->data.nbytes; 530 531 // an invalid op may reach here if the caller calls exec_op without 532 // adjust_op_size. return -EINVAL instead of -ENOTSUPP so that 533 // spi-mem won't try this op again with generic spi transfers. 534 if ((tx_len > MTK_NOR_REG_PRGDATA_MAX + 1) || 535 (rx_len > MTK_NOR_REG_SHIFT_MAX + 1) || 536 (prg_len > MTK_NOR_PRG_CNT_MAX / 8)) 537 return -EINVAL; 538 539 // fill tx data 540 for (i = op->cmd.nbytes; i > 0; i--, reg_offset--) { 541 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 542 bufbyte = (op->cmd.opcode >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 543 writeb(bufbyte, reg); 544 } 545 546 for (i = op->addr.nbytes; i > 0; i--, reg_offset--) { 547 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 548 bufbyte = (op->addr.val >> ((i - 1) * BITS_PER_BYTE)) & 0xff; 549 writeb(bufbyte, reg); 550 } 551 552 if (op->data.dir == SPI_MEM_DATA_OUT) { 553 for (i = 0; i < op->dummy.nbytes; i++, reg_offset--) { 554 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 555 writeb(0, reg); 556 } 557 558 for (i = 0; i < op->data.nbytes; i++, reg_offset--) { 559 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 560 writeb(((const u8 *)(op->data.buf.out))[i], reg); 561 } 562 } 563 564 for (; reg_offset >= 0; reg_offset--) { 565 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 566 writeb(0, reg); 567 } 568 569 // trigger op 570 if (rx_len) 571 writel(prg_len * BITS_PER_BYTE + sp->caps->extra_dummy_bit, 572 sp->base + MTK_NOR_REG_PRG_CNT); 573 else 574 writel(prg_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 575 576 ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 577 prg_len * BITS_PER_BYTE); 578 if (ret) 579 return ret; 580 581 // fetch read data 582 reg_offset = 0; 583 if (op->data.dir == SPI_MEM_DATA_IN) { 584 for (i = op->data.nbytes - 1; i >= 0; i--, reg_offset++) { 585 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 586 ((u8 *)(op->data.buf.in))[i] = readb(reg); 587 } 588 } 589 590 return 0; 591} 592 593static int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 594{ 595 struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master); 596 int ret; 597 598 if ((op->data.nbytes == 0) || 599 ((op->addr.nbytes != 3) && (op->addr.nbytes != 4))) 600 return mtk_nor_spi_mem_prg(sp, op); 601 602 if (op->data.dir == SPI_MEM_DATA_OUT) { 603 mtk_nor_set_addr(sp, op); 604 writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0); 605 if (op->data.nbytes == MTK_NOR_PP_SIZE) 606 return mtk_nor_pp_buffered(sp, op); 607 return mtk_nor_pp_unbuffered(sp, op); 608 } 609 610 if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) { 611 ret = mtk_nor_write_buffer_disable(sp); 612 if (ret < 0) 613 return ret; 614 mtk_nor_setup_bus(sp, op); 615 if (op->data.nbytes == 1) { 616 mtk_nor_set_addr(sp, op); 617 return mtk_nor_read_pio(sp, op); 618 } else { 619 return mtk_nor_read_dma(sp, op); 620 } 621 } 622 623 return mtk_nor_spi_mem_prg(sp, op); 624} 625 626static int mtk_nor_setup(struct spi_device *spi) 627{ 628 struct mtk_nor *sp = spi_controller_get_devdata(spi->master); 629 630 if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) { 631 dev_err(&spi->dev, "spi clock should be %u Hz.\n", 632 sp->spi_freq); 633 return -EINVAL; 634 } 635 spi->max_speed_hz = sp->spi_freq; 636 637 return 0; 638} 639 640static int mtk_nor_transfer_one_message(struct spi_controller *master, 641 struct spi_message *m) 642{ 643 struct mtk_nor *sp = spi_controller_get_devdata(master); 644 struct spi_transfer *t = NULL; 645 unsigned long trx_len = 0; 646 int stat = 0; 647 int reg_offset = MTK_NOR_REG_PRGDATA_MAX; 648 void __iomem *reg; 649 const u8 *txbuf; 650 u8 *rxbuf; 651 int i; 652 653 list_for_each_entry(t, &m->transfers, transfer_list) { 654 txbuf = t->tx_buf; 655 for (i = 0; i < t->len; i++, reg_offset--) { 656 reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset); 657 if (txbuf) 658 writeb(txbuf[i], reg); 659 else 660 writeb(0, reg); 661 } 662 trx_len += t->len; 663 } 664 665 writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT); 666 667 stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM, 668 trx_len * BITS_PER_BYTE); 669 if (stat < 0) 670 goto msg_done; 671 672 reg_offset = trx_len - 1; 673 list_for_each_entry(t, &m->transfers, transfer_list) { 674 rxbuf = t->rx_buf; 675 for (i = 0; i < t->len; i++, reg_offset--) { 676 reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset); 677 if (rxbuf) 678 rxbuf[i] = readb(reg); 679 } 680 } 681 682 m->actual_length = trx_len; 683msg_done: 684 m->status = stat; 685 spi_finalize_current_message(master); 686 687 return 0; 688} 689 690static void mtk_nor_disable_clk(struct mtk_nor *sp) 691{ 692 clk_disable_unprepare(sp->spi_clk); 693 clk_disable_unprepare(sp->ctlr_clk); 694 clk_disable_unprepare(sp->axi_clk); 695 clk_disable_unprepare(sp->axi_s_clk); 696} 697 698static int mtk_nor_enable_clk(struct mtk_nor *sp) 699{ 700 int ret; 701 702 ret = clk_prepare_enable(sp->spi_clk); 703 if (ret) 704 return ret; 705 706 ret = clk_prepare_enable(sp->ctlr_clk); 707 if (ret) { 708 clk_disable_unprepare(sp->spi_clk); 709 return ret; 710 } 711 712 ret = clk_prepare_enable(sp->axi_clk); 713 if (ret) { 714 clk_disable_unprepare(sp->spi_clk); 715 clk_disable_unprepare(sp->ctlr_clk); 716 return ret; 717 } 718 719 ret = clk_prepare_enable(sp->axi_s_clk); 720 if (ret) { 721 clk_disable_unprepare(sp->spi_clk); 722 clk_disable_unprepare(sp->ctlr_clk); 723 clk_disable_unprepare(sp->axi_clk); 724 return ret; 725 } 726 727 return 0; 728} 729 730static void mtk_nor_init(struct mtk_nor *sp) 731{ 732 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 733 writel(MTK_NOR_IRQ_MASK, sp->base + MTK_NOR_REG_IRQ_STAT); 734 735 writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP); 736 mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0); 737 mtk_nor_rmw(sp, MTK_NOR_REG_CFG3, 738 MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0); 739} 740 741static irqreturn_t mtk_nor_irq_handler(int irq, void *data) 742{ 743 struct mtk_nor *sp = data; 744 u32 irq_status, irq_enabled; 745 746 irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT); 747 irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN); 748 // write status back to clear interrupt 749 writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT); 750 751 if (!(irq_status & irq_enabled)) 752 return IRQ_NONE; 753 754 if (irq_status & MTK_NOR_IRQ_DMA) { 755 complete(&sp->op_done); 756 writel(0, sp->base + MTK_NOR_REG_IRQ_EN); 757 } 758 759 return IRQ_HANDLED; 760} 761 762static size_t mtk_max_msg_size(struct spi_device *spi) 763{ 764 return MTK_NOR_PRG_MAX_SIZE; 765} 766 767static const struct spi_controller_mem_ops mtk_nor_mem_ops = { 768 .adjust_op_size = mtk_nor_adjust_op_size, 769 .supports_op = mtk_nor_supports_op, 770 .exec_op = mtk_nor_exec_op 771}; 772 773static const struct mtk_nor_caps mtk_nor_caps_mt8173 = { 774 .dma_bits = 32, 775 .extra_dummy_bit = 0, 776}; 777 778static const struct mtk_nor_caps mtk_nor_caps_mt8186 = { 779 .dma_bits = 32, 780 .extra_dummy_bit = 1, 781}; 782 783static const struct mtk_nor_caps mtk_nor_caps_mt8192 = { 784 .dma_bits = 36, 785 .extra_dummy_bit = 0, 786}; 787 788static const struct of_device_id mtk_nor_match[] = { 789 { .compatible = "mediatek,mt8173-nor", .data = &mtk_nor_caps_mt8173 }, 790 { .compatible = "mediatek,mt8186-nor", .data = &mtk_nor_caps_mt8186 }, 791 { .compatible = "mediatek,mt8192-nor", .data = &mtk_nor_caps_mt8192 }, 792 { /* sentinel */ } 793}; 794MODULE_DEVICE_TABLE(of, mtk_nor_match); 795 796static int mtk_nor_probe(struct platform_device *pdev) 797{ 798 struct spi_controller *ctlr; 799 struct mtk_nor *sp; 800 struct mtk_nor_caps *caps; 801 void __iomem *base; 802 struct clk *spi_clk, *ctlr_clk, *axi_clk, *axi_s_clk; 803 int ret, irq; 804 805 base = devm_platform_ioremap_resource(pdev, 0); 806 if (IS_ERR(base)) 807 return PTR_ERR(base); 808 809 spi_clk = devm_clk_get(&pdev->dev, "spi"); 810 if (IS_ERR(spi_clk)) 811 return PTR_ERR(spi_clk); 812 813 ctlr_clk = devm_clk_get(&pdev->dev, "sf"); 814 if (IS_ERR(ctlr_clk)) 815 return PTR_ERR(ctlr_clk); 816 817 axi_clk = devm_clk_get_optional(&pdev->dev, "axi"); 818 if (IS_ERR(axi_clk)) 819 return PTR_ERR(axi_clk); 820 821 axi_s_clk = devm_clk_get_optional(&pdev->dev, "axi_s"); 822 if (IS_ERR(axi_s_clk)) 823 return PTR_ERR(axi_s_clk); 824 825 caps = (struct mtk_nor_caps *)of_device_get_match_data(&pdev->dev); 826 827 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(caps->dma_bits)); 828 if (ret) { 829 dev_err(&pdev->dev, "failed to set dma mask(%u)\n", caps->dma_bits); 830 return ret; 831 } 832 833 ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp)); 834 if (!ctlr) { 835 dev_err(&pdev->dev, "failed to allocate spi controller\n"); 836 return -ENOMEM; 837 } 838 839 ctlr->bits_per_word_mask = SPI_BPW_MASK(8); 840 ctlr->dev.of_node = pdev->dev.of_node; 841 ctlr->max_message_size = mtk_max_msg_size; 842 ctlr->mem_ops = &mtk_nor_mem_ops; 843 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; 844 ctlr->num_chipselect = 1; 845 ctlr->setup = mtk_nor_setup; 846 ctlr->transfer_one_message = mtk_nor_transfer_one_message; 847 ctlr->auto_runtime_pm = true; 848 849 dev_set_drvdata(&pdev->dev, ctlr); 850 851 sp = spi_controller_get_devdata(ctlr); 852 sp->base = base; 853 sp->has_irq = false; 854 sp->wbuf_en = false; 855 sp->ctlr = ctlr; 856 sp->dev = &pdev->dev; 857 sp->spi_clk = spi_clk; 858 sp->ctlr_clk = ctlr_clk; 859 sp->axi_clk = axi_clk; 860 sp->axi_s_clk = axi_s_clk; 861 sp->caps = caps; 862 sp->high_dma = caps->dma_bits > 32; 863 sp->buffer = dmam_alloc_coherent(&pdev->dev, 864 MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN, 865 &sp->buffer_dma, GFP_KERNEL); 866 if (!sp->buffer) 867 return -ENOMEM; 868 869 if ((uintptr_t)sp->buffer & MTK_NOR_DMA_ALIGN_MASK) { 870 dev_err(sp->dev, "misaligned allocation of internal buffer.\n"); 871 return -ENOMEM; 872 } 873 874 ret = mtk_nor_enable_clk(sp); 875 if (ret < 0) 876 return ret; 877 878 sp->spi_freq = clk_get_rate(sp->spi_clk); 879 880 mtk_nor_init(sp); 881 882 irq = platform_get_irq_optional(pdev, 0); 883 884 if (irq < 0) { 885 dev_warn(sp->dev, "IRQ not available."); 886 } else { 887 ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0, 888 pdev->name, sp); 889 if (ret < 0) { 890 dev_warn(sp->dev, "failed to request IRQ."); 891 } else { 892 init_completion(&sp->op_done); 893 sp->has_irq = true; 894 } 895 } 896 897 pm_runtime_set_autosuspend_delay(&pdev->dev, -1); 898 pm_runtime_use_autosuspend(&pdev->dev); 899 pm_runtime_set_active(&pdev->dev); 900 pm_runtime_enable(&pdev->dev); 901 pm_runtime_get_noresume(&pdev->dev); 902 903 ret = devm_spi_register_controller(&pdev->dev, ctlr); 904 if (ret < 0) 905 goto err_probe; 906 907 pm_runtime_mark_last_busy(&pdev->dev); 908 pm_runtime_put_autosuspend(&pdev->dev); 909 910 dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq); 911 912 return 0; 913 914err_probe: 915 pm_runtime_disable(&pdev->dev); 916 pm_runtime_set_suspended(&pdev->dev); 917 pm_runtime_dont_use_autosuspend(&pdev->dev); 918 919 mtk_nor_disable_clk(sp); 920 921 return ret; 922} 923 924static int mtk_nor_remove(struct platform_device *pdev) 925{ 926 struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev); 927 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 928 929 pm_runtime_disable(&pdev->dev); 930 pm_runtime_set_suspended(&pdev->dev); 931 pm_runtime_dont_use_autosuspend(&pdev->dev); 932 933 mtk_nor_disable_clk(sp); 934 935 return 0; 936} 937 938static int __maybe_unused mtk_nor_runtime_suspend(struct device *dev) 939{ 940 struct spi_controller *ctlr = dev_get_drvdata(dev); 941 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 942 943 mtk_nor_disable_clk(sp); 944 945 return 0; 946} 947 948static int __maybe_unused mtk_nor_runtime_resume(struct device *dev) 949{ 950 struct spi_controller *ctlr = dev_get_drvdata(dev); 951 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 952 953 return mtk_nor_enable_clk(sp); 954} 955 956static int __maybe_unused mtk_nor_suspend(struct device *dev) 957{ 958 return pm_runtime_force_suspend(dev); 959} 960 961static int __maybe_unused mtk_nor_resume(struct device *dev) 962{ 963 struct spi_controller *ctlr = dev_get_drvdata(dev); 964 struct mtk_nor *sp = spi_controller_get_devdata(ctlr); 965 int ret; 966 967 ret = pm_runtime_force_resume(dev); 968 if (ret) 969 return ret; 970 971 mtk_nor_init(sp); 972 973 return 0; 974} 975 976static const struct dev_pm_ops mtk_nor_pm_ops = { 977 SET_RUNTIME_PM_OPS(mtk_nor_runtime_suspend, 978 mtk_nor_runtime_resume, NULL) 979 SET_SYSTEM_SLEEP_PM_OPS(mtk_nor_suspend, mtk_nor_resume) 980}; 981 982static struct platform_driver mtk_nor_driver = { 983 .driver = { 984 .name = DRIVER_NAME, 985 .of_match_table = mtk_nor_match, 986 .pm = &mtk_nor_pm_ops, 987 }, 988 .probe = mtk_nor_probe, 989 .remove = mtk_nor_remove, 990}; 991 992module_platform_driver(mtk_nor_driver); 993 994MODULE_DESCRIPTION("Mediatek SPI NOR controller driver"); 995MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>"); 996MODULE_LICENSE("GPL v2"); 997MODULE_ALIAS("platform:" DRIVER_NAME);