rz-dmac.c (25063B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Renesas RZ/G2L DMA Controller Driver 4 * 5 * Based on imx-dma.c 6 * 7 * Copyright (C) 2021 Renesas Electronics Corp. 8 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 */ 11 12#include <linux/dma-mapping.h> 13#include <linux/dmaengine.h> 14#include <linux/interrupt.h> 15#include <linux/list.h> 16#include <linux/module.h> 17#include <linux/of.h> 18#include <linux/of_dma.h> 19#include <linux/of_platform.h> 20#include <linux/platform_device.h> 21#include <linux/pm_runtime.h> 22#include <linux/slab.h> 23#include <linux/spinlock.h> 24 25#include "../dmaengine.h" 26#include "../virt-dma.h" 27 28enum rz_dmac_prep_type { 29 RZ_DMAC_DESC_MEMCPY, 30 RZ_DMAC_DESC_SLAVE_SG, 31}; 32 33struct rz_lmdesc { 34 u32 header; 35 u32 sa; 36 u32 da; 37 u32 tb; 38 u32 chcfg; 39 u32 chitvl; 40 u32 chext; 41 u32 nxla; 42}; 43 44struct rz_dmac_desc { 45 struct virt_dma_desc vd; 46 dma_addr_t src; 47 dma_addr_t dest; 48 size_t len; 49 struct list_head node; 50 enum dma_transfer_direction direction; 51 enum rz_dmac_prep_type type; 52 /* For slave sg */ 53 struct scatterlist *sg; 54 unsigned int sgcount; 55}; 56 57#define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd) 58 59struct rz_dmac_chan { 60 struct virt_dma_chan vc; 61 void __iomem *ch_base; 62 void __iomem *ch_cmn_base; 63 unsigned int index; 64 int irq; 65 struct rz_dmac_desc *desc; 66 int descs_allocated; 67 68 enum dma_slave_buswidth src_word_size; 69 enum dma_slave_buswidth dst_word_size; 70 dma_addr_t src_per_address; 71 dma_addr_t dst_per_address; 72 73 u32 chcfg; 74 u32 chctrl; 75 int mid_rid; 76 77 struct list_head ld_free; 78 struct list_head ld_queue; 79 struct list_head ld_active; 80 81 struct { 82 struct rz_lmdesc *base; 83 struct rz_lmdesc *head; 84 struct rz_lmdesc *tail; 85 dma_addr_t base_dma; 86 } lmdesc; 87}; 88 89#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) 90 91struct rz_dmac { 92 struct dma_device engine; 93 struct device *dev; 94 void __iomem *base; 95 void __iomem *ext_base; 96 97 unsigned int n_channels; 98 struct rz_dmac_chan *channels; 99 100 DECLARE_BITMAP(modules, 1024); 101}; 102 103#define to_rz_dmac(d) container_of(d, struct rz_dmac, engine) 104 105/* 106 * ----------------------------------------------------------------------------- 107 * Registers 108 */ 109 110#define CHSTAT 0x0024 111#define CHCTRL 0x0028 112#define CHCFG 0x002c 113#define NXLA 0x0038 114 115#define DCTRL 0x0000 116 117#define EACH_CHANNEL_OFFSET 0x0040 118#define CHANNEL_0_7_OFFSET 0x0000 119#define CHANNEL_0_7_COMMON_BASE 0x0300 120#define CHANNEL_8_15_OFFSET 0x0400 121#define CHANNEL_8_15_COMMON_BASE 0x0700 122 123#define CHSTAT_ER BIT(4) 124#define CHSTAT_EN BIT(0) 125 126#define CHCTRL_CLRINTMSK BIT(17) 127#define CHCTRL_CLRSUS BIT(9) 128#define CHCTRL_CLRTC BIT(6) 129#define CHCTRL_CLREND BIT(5) 130#define CHCTRL_CLRRQ BIT(4) 131#define CHCTRL_SWRST BIT(3) 132#define CHCTRL_STG BIT(2) 133#define CHCTRL_CLREN BIT(1) 134#define CHCTRL_SETEN BIT(0) 135#define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \ 136 CHCTRL_CLRTC | CHCTRL_CLREND | \ 137 CHCTRL_CLRRQ | CHCTRL_SWRST | \ 138 CHCTRL_CLREN) 139 140#define CHCFG_DMS BIT(31) 141#define CHCFG_DEM BIT(24) 142#define CHCFG_DAD BIT(21) 143#define CHCFG_SAD BIT(20) 144#define CHCFG_REQD BIT(3) 145#define CHCFG_SEL(bits) ((bits) & 0x07) 146#define CHCFG_MEM_COPY (0x80400008) 147#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16)) 148#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12)) 149#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) 150#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) 151#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) 152#define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5) 153 154#define MID_RID_MASK GENMASK(9, 0) 155#define CHCFG_MASK GENMASK(15, 10) 156#define CHCFG_DS_INVALID 0xFF 157#define DCTRL_LVINT BIT(1) 158#define DCTRL_PR BIT(0) 159#define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR) 160 161/* LINK MODE DESCRIPTOR */ 162#define HEADER_LV BIT(0) 163 164#define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 165#define RZ_DMAC_MAX_CHANNELS 16 166#define DMAC_NR_LMDESC 64 167 168/* 169 * ----------------------------------------------------------------------------- 170 * Device access 171 */ 172 173static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val, 174 unsigned int offset) 175{ 176 writel(val, dmac->base + offset); 177} 178 179static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val, 180 unsigned int offset) 181{ 182 writel(val, dmac->ext_base + offset); 183} 184 185static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset) 186{ 187 return readl(dmac->ext_base + offset); 188} 189 190static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val, 191 unsigned int offset, int which) 192{ 193 if (which) 194 writel(val, channel->ch_base + offset); 195 else 196 writel(val, channel->ch_cmn_base + offset); 197} 198 199static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel, 200 unsigned int offset, int which) 201{ 202 if (which) 203 return readl(channel->ch_base + offset); 204 else 205 return readl(channel->ch_cmn_base + offset); 206} 207 208/* 209 * ----------------------------------------------------------------------------- 210 * Initialization 211 */ 212 213static void rz_lmdesc_setup(struct rz_dmac_chan *channel, 214 struct rz_lmdesc *lmdesc) 215{ 216 u32 nxla; 217 218 channel->lmdesc.base = lmdesc; 219 channel->lmdesc.head = lmdesc; 220 channel->lmdesc.tail = lmdesc; 221 nxla = channel->lmdesc.base_dma; 222 while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) { 223 lmdesc->header = 0; 224 nxla += sizeof(*lmdesc); 225 lmdesc->nxla = nxla; 226 lmdesc++; 227 } 228 229 lmdesc->header = 0; 230 lmdesc->nxla = channel->lmdesc.base_dma; 231} 232 233/* 234 * ----------------------------------------------------------------------------- 235 * Descriptors preparation 236 */ 237 238static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel) 239{ 240 struct rz_lmdesc *lmdesc = channel->lmdesc.head; 241 242 while (!(lmdesc->header & HEADER_LV)) { 243 lmdesc->header = 0; 244 lmdesc++; 245 if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 246 lmdesc = channel->lmdesc.base; 247 } 248 channel->lmdesc.head = lmdesc; 249} 250 251static void rz_dmac_enable_hw(struct rz_dmac_chan *channel) 252{ 253 struct dma_chan *chan = &channel->vc.chan; 254 struct rz_dmac *dmac = to_rz_dmac(chan->device); 255 unsigned long flags; 256 u32 nxla; 257 u32 chctrl; 258 u32 chstat; 259 260 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 261 262 local_irq_save(flags); 263 264 rz_dmac_lmdesc_recycle(channel); 265 266 nxla = channel->lmdesc.base_dma + 267 (sizeof(struct rz_lmdesc) * (channel->lmdesc.head - 268 channel->lmdesc.base)); 269 270 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 271 if (!(chstat & CHSTAT_EN)) { 272 chctrl = (channel->chctrl | CHCTRL_SETEN); 273 rz_dmac_ch_writel(channel, nxla, NXLA, 1); 274 rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1); 275 rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1); 276 rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1); 277 } 278 279 local_irq_restore(flags); 280} 281 282static void rz_dmac_disable_hw(struct rz_dmac_chan *channel) 283{ 284 struct dma_chan *chan = &channel->vc.chan; 285 struct rz_dmac *dmac = to_rz_dmac(chan->device); 286 unsigned long flags; 287 288 dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 289 290 local_irq_save(flags); 291 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 292 local_irq_restore(flags); 293} 294 295static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) 296{ 297 u32 dmars_offset = (nr / 2) * 4; 298 u32 shift = (nr % 2) * 16; 299 u32 dmars32; 300 301 dmars32 = rz_dmac_ext_readl(dmac, dmars_offset); 302 dmars32 &= ~(0xffff << shift); 303 dmars32 |= dmars << shift; 304 305 rz_dmac_ext_writel(dmac, dmars32, dmars_offset); 306} 307 308static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel) 309{ 310 struct dma_chan *chan = &channel->vc.chan; 311 struct rz_dmac *dmac = to_rz_dmac(chan->device); 312 struct rz_lmdesc *lmdesc = channel->lmdesc.tail; 313 struct rz_dmac_desc *d = channel->desc; 314 u32 chcfg = CHCFG_MEM_COPY; 315 316 /* prepare descriptor */ 317 lmdesc->sa = d->src; 318 lmdesc->da = d->dest; 319 lmdesc->tb = d->len; 320 lmdesc->chcfg = chcfg; 321 lmdesc->chitvl = 0; 322 lmdesc->chext = 0; 323 lmdesc->header = HEADER_LV; 324 325 rz_dmac_set_dmars_register(dmac, channel->index, 0); 326 327 channel->chcfg = chcfg; 328 channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; 329} 330 331static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel) 332{ 333 struct dma_chan *chan = &channel->vc.chan; 334 struct rz_dmac *dmac = to_rz_dmac(chan->device); 335 struct rz_dmac_desc *d = channel->desc; 336 struct scatterlist *sg, *sgl = d->sg; 337 struct rz_lmdesc *lmdesc; 338 unsigned int i, sg_len = d->sgcount; 339 340 channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS; 341 342 if (d->direction == DMA_DEV_TO_MEM) { 343 channel->chcfg |= CHCFG_SAD; 344 channel->chcfg &= ~CHCFG_REQD; 345 } else { 346 channel->chcfg |= CHCFG_DAD | CHCFG_REQD; 347 } 348 349 lmdesc = channel->lmdesc.tail; 350 351 for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) { 352 if (d->direction == DMA_DEV_TO_MEM) { 353 lmdesc->sa = channel->src_per_address; 354 lmdesc->da = sg_dma_address(sg); 355 } else { 356 lmdesc->sa = sg_dma_address(sg); 357 lmdesc->da = channel->dst_per_address; 358 } 359 360 lmdesc->tb = sg_dma_len(sg); 361 lmdesc->chitvl = 0; 362 lmdesc->chext = 0; 363 if (i == (sg_len - 1)) { 364 lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM); 365 lmdesc->header = HEADER_LV; 366 } else { 367 lmdesc->chcfg = channel->chcfg; 368 lmdesc->header = HEADER_LV; 369 } 370 if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 371 lmdesc = channel->lmdesc.base; 372 } 373 374 channel->lmdesc.tail = lmdesc; 375 376 rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 377 channel->chctrl = CHCTRL_SETEN; 378} 379 380static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan) 381{ 382 struct rz_dmac_desc *d = chan->desc; 383 struct virt_dma_desc *vd; 384 385 vd = vchan_next_desc(&chan->vc); 386 if (!vd) 387 return 0; 388 389 list_del(&vd->node); 390 391 switch (d->type) { 392 case RZ_DMAC_DESC_MEMCPY: 393 rz_dmac_prepare_desc_for_memcpy(chan); 394 break; 395 396 case RZ_DMAC_DESC_SLAVE_SG: 397 rz_dmac_prepare_descs_for_slave_sg(chan); 398 break; 399 400 default: 401 return -EINVAL; 402 } 403 404 rz_dmac_enable_hw(chan); 405 406 return 0; 407} 408 409/* 410 * ----------------------------------------------------------------------------- 411 * DMA engine operations 412 */ 413 414static int rz_dmac_alloc_chan_resources(struct dma_chan *chan) 415{ 416 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 417 418 while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) { 419 struct rz_dmac_desc *desc; 420 421 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 422 if (!desc) 423 break; 424 425 list_add_tail(&desc->node, &channel->ld_free); 426 channel->descs_allocated++; 427 } 428 429 if (!channel->descs_allocated) 430 return -ENOMEM; 431 432 return channel->descs_allocated; 433} 434 435static void rz_dmac_free_chan_resources(struct dma_chan *chan) 436{ 437 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 438 struct rz_dmac *dmac = to_rz_dmac(chan->device); 439 struct rz_lmdesc *lmdesc = channel->lmdesc.base; 440 struct rz_dmac_desc *desc, *_desc; 441 unsigned long flags; 442 unsigned int i; 443 444 spin_lock_irqsave(&channel->vc.lock, flags); 445 446 for (i = 0; i < DMAC_NR_LMDESC; i++) 447 lmdesc[i].header = 0; 448 449 rz_dmac_disable_hw(channel); 450 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 451 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 452 453 if (channel->mid_rid >= 0) { 454 clear_bit(channel->mid_rid, dmac->modules); 455 channel->mid_rid = -EINVAL; 456 } 457 458 spin_unlock_irqrestore(&channel->vc.lock, flags); 459 460 list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) { 461 kfree(desc); 462 channel->descs_allocated--; 463 } 464 465 INIT_LIST_HEAD(&channel->ld_free); 466 vchan_free_chan_resources(&channel->vc); 467} 468 469static struct dma_async_tx_descriptor * 470rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 471 size_t len, unsigned long flags) 472{ 473 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 474 struct rz_dmac *dmac = to_rz_dmac(chan->device); 475 struct rz_dmac_desc *desc; 476 477 dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 478 __func__, channel->index, &src, &dest, len); 479 480 if (list_empty(&channel->ld_free)) 481 return NULL; 482 483 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 484 485 desc->type = RZ_DMAC_DESC_MEMCPY; 486 desc->src = src; 487 desc->dest = dest; 488 desc->len = len; 489 desc->direction = DMA_MEM_TO_MEM; 490 491 list_move_tail(channel->ld_free.next, &channel->ld_queue); 492 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 493} 494 495static struct dma_async_tx_descriptor * 496rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 497 unsigned int sg_len, 498 enum dma_transfer_direction direction, 499 unsigned long flags, void *context) 500{ 501 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 502 struct rz_dmac_desc *desc; 503 struct scatterlist *sg; 504 int dma_length = 0; 505 int i = 0; 506 507 if (list_empty(&channel->ld_free)) 508 return NULL; 509 510 desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 511 512 for_each_sg(sgl, sg, sg_len, i) { 513 dma_length += sg_dma_len(sg); 514 } 515 516 desc->type = RZ_DMAC_DESC_SLAVE_SG; 517 desc->sg = sgl; 518 desc->sgcount = sg_len; 519 desc->len = dma_length; 520 desc->direction = direction; 521 522 if (direction == DMA_DEV_TO_MEM) 523 desc->src = channel->src_per_address; 524 else 525 desc->dest = channel->dst_per_address; 526 527 list_move_tail(channel->ld_free.next, &channel->ld_queue); 528 return vchan_tx_prep(&channel->vc, &desc->vd, flags); 529} 530 531static int rz_dmac_terminate_all(struct dma_chan *chan) 532{ 533 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 534 unsigned long flags; 535 LIST_HEAD(head); 536 537 rz_dmac_disable_hw(channel); 538 spin_lock_irqsave(&channel->vc.lock, flags); 539 list_splice_tail_init(&channel->ld_active, &channel->ld_free); 540 list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 541 spin_unlock_irqrestore(&channel->vc.lock, flags); 542 vchan_get_all_descriptors(&channel->vc, &head); 543 vchan_dma_desc_free_list(&channel->vc, &head); 544 545 return 0; 546} 547 548static void rz_dmac_issue_pending(struct dma_chan *chan) 549{ 550 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 551 struct rz_dmac *dmac = to_rz_dmac(chan->device); 552 struct rz_dmac_desc *desc; 553 unsigned long flags; 554 555 spin_lock_irqsave(&channel->vc.lock, flags); 556 557 if (!list_empty(&channel->ld_queue)) { 558 desc = list_first_entry(&channel->ld_queue, 559 struct rz_dmac_desc, node); 560 channel->desc = desc; 561 if (vchan_issue_pending(&channel->vc)) { 562 if (rz_dmac_xfer_desc(channel) < 0) 563 dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n", 564 channel->index); 565 else 566 list_move_tail(channel->ld_queue.next, 567 &channel->ld_active); 568 } 569 } 570 571 spin_unlock_irqrestore(&channel->vc.lock, flags); 572} 573 574static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) 575{ 576 u8 i; 577 static const enum dma_slave_buswidth ds_lut[] = { 578 DMA_SLAVE_BUSWIDTH_1_BYTE, 579 DMA_SLAVE_BUSWIDTH_2_BYTES, 580 DMA_SLAVE_BUSWIDTH_4_BYTES, 581 DMA_SLAVE_BUSWIDTH_8_BYTES, 582 DMA_SLAVE_BUSWIDTH_16_BYTES, 583 DMA_SLAVE_BUSWIDTH_32_BYTES, 584 DMA_SLAVE_BUSWIDTH_64_BYTES, 585 DMA_SLAVE_BUSWIDTH_128_BYTES, 586 }; 587 588 for (i = 0; i < ARRAY_SIZE(ds_lut); i++) { 589 if (ds_lut[i] == ds) 590 return i; 591 } 592 593 return CHCFG_DS_INVALID; 594} 595 596static int rz_dmac_config(struct dma_chan *chan, 597 struct dma_slave_config *config) 598{ 599 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 600 u32 val; 601 602 channel->src_per_address = config->src_addr; 603 channel->src_word_size = config->src_addr_width; 604 channel->dst_per_address = config->dst_addr; 605 channel->dst_word_size = config->dst_addr_width; 606 607 val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); 608 if (val == CHCFG_DS_INVALID) 609 return -EINVAL; 610 611 channel->chcfg |= CHCFG_FILL_DDS(val); 612 613 val = rz_dmac_ds_to_val_mapping(config->src_addr_width); 614 if (val == CHCFG_DS_INVALID) 615 return -EINVAL; 616 617 channel->chcfg |= CHCFG_FILL_SDS(val); 618 619 return 0; 620} 621 622static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd) 623{ 624 /* 625 * Place holder 626 * Descriptor allocation is done during alloc_chan_resources and 627 * get freed during free_chan_resources. 628 * list is used to manage the descriptors and avoid any memory 629 * allocation/free during DMA read/write. 630 */ 631} 632 633/* 634 * ----------------------------------------------------------------------------- 635 * IRQ handling 636 */ 637 638static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel) 639{ 640 struct dma_chan *chan = &channel->vc.chan; 641 struct rz_dmac *dmac = to_rz_dmac(chan->device); 642 u32 chstat, chctrl; 643 644 chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 645 if (chstat & CHSTAT_ER) { 646 dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", 647 channel->index, chstat); 648 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 649 goto done; 650 } 651 652 chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1); 653 rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1); 654done: 655 return; 656} 657 658static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id) 659{ 660 struct rz_dmac_chan *channel = dev_id; 661 662 if (channel) { 663 rz_dmac_irq_handle_channel(channel); 664 return IRQ_WAKE_THREAD; 665 } 666 /* handle DMAERR irq */ 667 return IRQ_HANDLED; 668} 669 670static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id) 671{ 672 struct rz_dmac_chan *channel = dev_id; 673 struct rz_dmac_desc *desc = NULL; 674 unsigned long flags; 675 676 spin_lock_irqsave(&channel->vc.lock, flags); 677 678 if (list_empty(&channel->ld_active)) { 679 /* Someone might have called terminate all */ 680 goto out; 681 } 682 683 desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node); 684 vchan_cookie_complete(&desc->vd); 685 list_move_tail(channel->ld_active.next, &channel->ld_free); 686 if (!list_empty(&channel->ld_queue)) { 687 desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, 688 node); 689 channel->desc = desc; 690 if (rz_dmac_xfer_desc(channel) == 0) 691 list_move_tail(channel->ld_queue.next, &channel->ld_active); 692 } 693out: 694 spin_unlock_irqrestore(&channel->vc.lock, flags); 695 696 return IRQ_HANDLED; 697} 698 699/* 700 * ----------------------------------------------------------------------------- 701 * OF xlate and channel filter 702 */ 703 704static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg) 705{ 706 struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 707 struct rz_dmac *dmac = to_rz_dmac(chan->device); 708 struct of_phandle_args *dma_spec = arg; 709 u32 ch_cfg; 710 711 channel->mid_rid = dma_spec->args[0] & MID_RID_MASK; 712 ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10; 713 channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) | 714 CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg); 715 716 return !test_and_set_bit(channel->mid_rid, dmac->modules); 717} 718 719static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec, 720 struct of_dma *ofdma) 721{ 722 dma_cap_mask_t mask; 723 724 if (dma_spec->args_count != 1) 725 return NULL; 726 727 /* Only slave DMA channels can be allocated via DT */ 728 dma_cap_zero(mask); 729 dma_cap_set(DMA_SLAVE, mask); 730 731 return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec); 732} 733 734/* 735 * ----------------------------------------------------------------------------- 736 * Probe and remove 737 */ 738 739static int rz_dmac_chan_probe(struct rz_dmac *dmac, 740 struct rz_dmac_chan *channel, 741 unsigned int index) 742{ 743 struct platform_device *pdev = to_platform_device(dmac->dev); 744 struct rz_lmdesc *lmdesc; 745 char pdev_irqname[5]; 746 char *irqname; 747 int ret; 748 749 channel->index = index; 750 channel->mid_rid = -EINVAL; 751 752 /* Request the channel interrupt. */ 753 sprintf(pdev_irqname, "ch%u", index); 754 channel->irq = platform_get_irq_byname(pdev, pdev_irqname); 755 if (channel->irq < 0) 756 return channel->irq; 757 758 irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 759 dev_name(dmac->dev), index); 760 if (!irqname) 761 return -ENOMEM; 762 763 ret = devm_request_threaded_irq(dmac->dev, channel->irq, 764 rz_dmac_irq_handler, 765 rz_dmac_irq_handler_thread, 0, 766 irqname, channel); 767 if (ret) { 768 dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", 769 channel->irq, ret); 770 return ret; 771 } 772 773 /* Set io base address for each channel */ 774 if (index < 8) { 775 channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET + 776 EACH_CHANNEL_OFFSET * index; 777 channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE; 778 } else { 779 channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET + 780 EACH_CHANNEL_OFFSET * (index - 8); 781 channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE; 782 } 783 784 /* Allocate descriptors */ 785 lmdesc = dma_alloc_coherent(&pdev->dev, 786 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 787 &channel->lmdesc.base_dma, GFP_KERNEL); 788 if (!lmdesc) { 789 dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n"); 790 return -ENOMEM; 791 } 792 rz_lmdesc_setup(channel, lmdesc); 793 794 /* Initialize register for each channel */ 795 rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 796 797 channel->vc.desc_free = rz_dmac_virt_desc_free; 798 vchan_init(&channel->vc, &dmac->engine); 799 INIT_LIST_HEAD(&channel->ld_queue); 800 INIT_LIST_HEAD(&channel->ld_free); 801 INIT_LIST_HEAD(&channel->ld_active); 802 803 return 0; 804} 805 806static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) 807{ 808 struct device_node *np = dev->of_node; 809 int ret; 810 811 ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 812 if (ret < 0) { 813 dev_err(dev, "unable to read dma-channels property\n"); 814 return ret; 815 } 816 817 if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) { 818 dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); 819 return -EINVAL; 820 } 821 822 return 0; 823} 824 825static int rz_dmac_probe(struct platform_device *pdev) 826{ 827 const char *irqname = "error"; 828 struct dma_device *engine; 829 struct rz_dmac *dmac; 830 int channel_num; 831 unsigned int i; 832 int ret; 833 int irq; 834 835 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 836 if (!dmac) 837 return -ENOMEM; 838 839 dmac->dev = &pdev->dev; 840 platform_set_drvdata(pdev, dmac); 841 842 ret = rz_dmac_parse_of(&pdev->dev, dmac); 843 if (ret < 0) 844 return ret; 845 846 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 847 sizeof(*dmac->channels), GFP_KERNEL); 848 if (!dmac->channels) 849 return -ENOMEM; 850 851 /* Request resources */ 852 dmac->base = devm_platform_ioremap_resource(pdev, 0); 853 if (IS_ERR(dmac->base)) 854 return PTR_ERR(dmac->base); 855 856 dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 857 if (IS_ERR(dmac->ext_base)) 858 return PTR_ERR(dmac->ext_base); 859 860 /* Register interrupt handler for error */ 861 irq = platform_get_irq_byname(pdev, irqname); 862 if (irq < 0) 863 return irq; 864 865 ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0, 866 irqname, NULL); 867 if (ret) { 868 dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 869 irq, ret); 870 return ret; 871 } 872 873 /* Initialize the channels. */ 874 INIT_LIST_HEAD(&dmac->engine.channels); 875 876 pm_runtime_enable(&pdev->dev); 877 ret = pm_runtime_resume_and_get(&pdev->dev); 878 if (ret < 0) { 879 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n"); 880 goto err_pm_disable; 881 } 882 883 for (i = 0; i < dmac->n_channels; i++) { 884 ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); 885 if (ret < 0) 886 goto err; 887 } 888 889 /* Register the DMAC as a DMA provider for DT. */ 890 ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate, 891 NULL); 892 if (ret < 0) 893 goto err; 894 895 /* Register the DMA engine device. */ 896 engine = &dmac->engine; 897 dma_cap_set(DMA_SLAVE, engine->cap_mask); 898 dma_cap_set(DMA_MEMCPY, engine->cap_mask); 899 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL); 900 rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL); 901 902 engine->dev = &pdev->dev; 903 904 engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources; 905 engine->device_free_chan_resources = rz_dmac_free_chan_resources; 906 engine->device_tx_status = dma_cookie_status; 907 engine->device_prep_slave_sg = rz_dmac_prep_slave_sg; 908 engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy; 909 engine->device_config = rz_dmac_config; 910 engine->device_terminate_all = rz_dmac_terminate_all; 911 engine->device_issue_pending = rz_dmac_issue_pending; 912 913 engine->copy_align = DMAENGINE_ALIGN_1_BYTE; 914 dma_set_max_seg_size(engine->dev, U32_MAX); 915 916 ret = dma_async_device_register(engine); 917 if (ret < 0) { 918 dev_err(&pdev->dev, "unable to register\n"); 919 goto dma_register_err; 920 } 921 return 0; 922 923dma_register_err: 924 of_dma_controller_free(pdev->dev.of_node); 925err: 926 channel_num = i ? i - 1 : 0; 927 for (i = 0; i < channel_num; i++) { 928 struct rz_dmac_chan *channel = &dmac->channels[i]; 929 930 dma_free_coherent(&pdev->dev, 931 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 932 channel->lmdesc.base, 933 channel->lmdesc.base_dma); 934 } 935 936 pm_runtime_put(&pdev->dev); 937err_pm_disable: 938 pm_runtime_disable(&pdev->dev); 939 940 return ret; 941} 942 943static int rz_dmac_remove(struct platform_device *pdev) 944{ 945 struct rz_dmac *dmac = platform_get_drvdata(pdev); 946 unsigned int i; 947 948 for (i = 0; i < dmac->n_channels; i++) { 949 struct rz_dmac_chan *channel = &dmac->channels[i]; 950 951 dma_free_coherent(&pdev->dev, 952 sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 953 channel->lmdesc.base, 954 channel->lmdesc.base_dma); 955 } 956 of_dma_controller_free(pdev->dev.of_node); 957 dma_async_device_unregister(&dmac->engine); 958 pm_runtime_put(&pdev->dev); 959 pm_runtime_disable(&pdev->dev); 960 961 return 0; 962} 963 964static const struct of_device_id of_rz_dmac_match[] = { 965 { .compatible = "renesas,rz-dmac", }, 966 { /* Sentinel */ } 967}; 968MODULE_DEVICE_TABLE(of, of_rz_dmac_match); 969 970static struct platform_driver rz_dmac_driver = { 971 .driver = { 972 .name = "rz-dmac", 973 .of_match_table = of_rz_dmac_match, 974 }, 975 .probe = rz_dmac_probe, 976 .remove = rz_dmac_remove, 977}; 978 979module_platform_driver(rz_dmac_driver); 980 981MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver"); 982MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); 983MODULE_LICENSE("GPL v2");