s3c24xx-dma.c (38540B)
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * S3C24XX DMA handling 4 * 5 * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> 6 * 7 * based on amba-pl08x.c 8 * 9 * Copyright (c) 2006 ARM Ltd. 10 * Copyright (c) 2010 ST-Ericsson SA 11 * 12 * Author: Peter Pearse <peter.pearse@arm.com> 13 * Author: Linus Walleij <linus.walleij@stericsson.com> 14 * 15 * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals 16 * that can be routed to any of the 4 to 8 hardware-channels. 17 * 18 * Therefore on these DMA controllers the number of channels 19 * and the number of incoming DMA signals are two totally different things. 20 * It is usually not possible to theoretically handle all physical signals, 21 * so a multiplexing scheme with possible denial of use is necessary. 22 * 23 * Open items: 24 * - bursts 25 */ 26 27#include <linux/platform_device.h> 28#include <linux/types.h> 29#include <linux/dmaengine.h> 30#include <linux/dma-mapping.h> 31#include <linux/interrupt.h> 32#include <linux/clk.h> 33#include <linux/module.h> 34#include <linux/mod_devicetable.h> 35#include <linux/slab.h> 36#include <linux/platform_data/dma-s3c24xx.h> 37 38#include "dmaengine.h" 39#include "virt-dma.h" 40 41#define MAX_DMA_CHANNELS 8 42 43#define S3C24XX_DISRC 0x00 44#define S3C24XX_DISRCC 0x04 45#define S3C24XX_DISRCC_INC_INCREMENT 0 46#define S3C24XX_DISRCC_INC_FIXED BIT(0) 47#define S3C24XX_DISRCC_LOC_AHB 0 48#define S3C24XX_DISRCC_LOC_APB BIT(1) 49 50#define S3C24XX_DIDST 0x08 51#define S3C24XX_DIDSTC 0x0c 52#define S3C24XX_DIDSTC_INC_INCREMENT 0 53#define S3C24XX_DIDSTC_INC_FIXED BIT(0) 54#define S3C24XX_DIDSTC_LOC_AHB 0 55#define S3C24XX_DIDSTC_LOC_APB BIT(1) 56#define S3C24XX_DIDSTC_INT_TC0 0 57#define S3C24XX_DIDSTC_INT_RELOAD BIT(2) 58 59#define S3C24XX_DCON 0x10 60 61#define S3C24XX_DCON_TC_MASK 0xfffff 62#define S3C24XX_DCON_DSZ_BYTE (0 << 20) 63#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20) 64#define S3C24XX_DCON_DSZ_WORD (2 << 20) 65#define S3C24XX_DCON_DSZ_MASK (3 << 20) 66#define S3C24XX_DCON_DSZ_SHIFT 20 67#define S3C24XX_DCON_AUTORELOAD 0 68#define S3C24XX_DCON_NORELOAD BIT(22) 69#define S3C24XX_DCON_HWTRIG BIT(23) 70#define S3C24XX_DCON_HWSRC_SHIFT 24 71#define S3C24XX_DCON_SERV_SINGLE 0 72#define S3C24XX_DCON_SERV_WHOLE BIT(27) 73#define S3C24XX_DCON_TSZ_UNIT 0 74#define S3C24XX_DCON_TSZ_BURST4 BIT(28) 75#define S3C24XX_DCON_INT BIT(29) 76#define S3C24XX_DCON_SYNC_PCLK 0 77#define S3C24XX_DCON_SYNC_HCLK BIT(30) 78#define S3C24XX_DCON_DEMAND 0 79#define S3C24XX_DCON_HANDSHAKE BIT(31) 80 81#define S3C24XX_DSTAT 0x14 82#define S3C24XX_DSTAT_STAT_BUSY BIT(20) 83#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff 84 85#define S3C24XX_DMASKTRIG 0x20 86#define S3C24XX_DMASKTRIG_SWTRIG BIT(0) 87#define S3C24XX_DMASKTRIG_ON BIT(1) 88#define S3C24XX_DMASKTRIG_STOP BIT(2) 89 90#define S3C24XX_DMAREQSEL 0x24 91#define S3C24XX_DMAREQSEL_HW BIT(0) 92 93/* 94 * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel 95 * for a DMA source. Instead only specific channels are valid. 96 * All of these SoCs have 4 physical channels and the number of request 97 * source bits is 3. Additionally we also need 1 bit to mark the channel 98 * as valid. 99 * Therefore we separate the chansel element of the channel data into 4 100 * parts of 4 bits each, to hold the information if the channel is valid 101 * and the hw request source to use. 102 * 103 * Example: 104 * SDI is valid on channels 0, 2 and 3 - with varying hw request sources. 105 * For it the chansel field would look like 106 * 107 * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1 108 * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2 109 * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2 110 */ 111#define S3C24XX_CHANSEL_WIDTH 4 112#define S3C24XX_CHANSEL_VALID BIT(3) 113#define S3C24XX_CHANSEL_REQ_MASK 7 114 115/* 116 * struct soc_data - vendor-specific config parameters for individual SoCs 117 * @stride: spacing between the registers of each channel 118 * @has_reqsel: does the controller use the newer requestselection mechanism 119 * @has_clocks: are controllable dma-clocks present 120 */ 121struct soc_data { 122 int stride; 123 bool has_reqsel; 124 bool has_clocks; 125}; 126 127/* 128 * enum s3c24xx_dma_chan_state - holds the virtual channel states 129 * @S3C24XX_DMA_CHAN_IDLE: the channel is idle 130 * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport 131 * channel and is running a transfer on it 132 * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport 133 * channel to become available (only pertains to memcpy channels) 134 */ 135enum s3c24xx_dma_chan_state { 136 S3C24XX_DMA_CHAN_IDLE, 137 S3C24XX_DMA_CHAN_RUNNING, 138 S3C24XX_DMA_CHAN_WAITING, 139}; 140 141/* 142 * struct s3c24xx_sg - structure containing data per sg 143 * @src_addr: src address of sg 144 * @dst_addr: dst address of sg 145 * @len: transfer len in bytes 146 * @node: node for txd's dsg_list 147 */ 148struct s3c24xx_sg { 149 dma_addr_t src_addr; 150 dma_addr_t dst_addr; 151 size_t len; 152 struct list_head node; 153}; 154 155/* 156 * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor 157 * @vd: virtual DMA descriptor 158 * @dsg_list: list of children sg's 159 * @at: sg currently being transfered 160 * @width: transfer width 161 * @disrcc: value for source control register 162 * @didstc: value for destination control register 163 * @dcon: base value for dcon register 164 * @cyclic: indicate cyclic transfer 165 */ 166struct s3c24xx_txd { 167 struct virt_dma_desc vd; 168 struct list_head dsg_list; 169 struct list_head *at; 170 u8 width; 171 u32 disrcc; 172 u32 didstc; 173 u32 dcon; 174 bool cyclic; 175}; 176 177struct s3c24xx_dma_chan; 178 179/* 180 * struct s3c24xx_dma_phy - holder for the physical channels 181 * @id: physical index to this channel 182 * @valid: does the channel have all required elements 183 * @base: virtual memory base (remapped) for the this channel 184 * @irq: interrupt for this channel 185 * @clk: clock for this channel 186 * @lock: a lock to use when altering an instance of this struct 187 * @serving: virtual channel currently being served by this physicalchannel 188 * @host: a pointer to the host (internal use) 189 */ 190struct s3c24xx_dma_phy { 191 unsigned int id; 192 bool valid; 193 void __iomem *base; 194 int irq; 195 struct clk *clk; 196 spinlock_t lock; 197 struct s3c24xx_dma_chan *serving; 198 struct s3c24xx_dma_engine *host; 199}; 200 201/* 202 * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel 203 * @id: the id of the channel 204 * @name: name of the channel 205 * @vc: wrappped virtual channel 206 * @phy: the physical channel utilized by this channel, if there is one 207 * @runtime_addr: address for RX/TX according to the runtime config 208 * @at: active transaction on this channel 209 * @lock: a lock for this channel data 210 * @host: a pointer to the host (internal use) 211 * @state: whether the channel is idle, running etc 212 * @slave: whether this channel is a device (slave) or for memcpy 213 */ 214struct s3c24xx_dma_chan { 215 int id; 216 const char *name; 217 struct virt_dma_chan vc; 218 struct s3c24xx_dma_phy *phy; 219 struct dma_slave_config cfg; 220 struct s3c24xx_txd *at; 221 struct s3c24xx_dma_engine *host; 222 enum s3c24xx_dma_chan_state state; 223 bool slave; 224}; 225 226/* 227 * struct s3c24xx_dma_engine - the local state holder for the S3C24XX 228 * @pdev: the corresponding platform device 229 * @pdata: platform data passed in from the platform/machine 230 * @base: virtual memory base (remapped) 231 * @slave: slave engine for this instance 232 * @memcpy: memcpy engine for this instance 233 * @phy_chans: array of data for the physical channels 234 */ 235struct s3c24xx_dma_engine { 236 struct platform_device *pdev; 237 const struct s3c24xx_dma_platdata *pdata; 238 struct soc_data *sdata; 239 void __iomem *base; 240 struct dma_device slave; 241 struct dma_device memcpy; 242 struct s3c24xx_dma_phy *phy_chans; 243}; 244 245/* 246 * Physical channel handling 247 */ 248 249/* 250 * Check whether a certain channel is busy or not. 251 */ 252static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy) 253{ 254 unsigned int val = readl(phy->base + S3C24XX_DSTAT); 255 return val & S3C24XX_DSTAT_STAT_BUSY; 256} 257 258static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan, 259 struct s3c24xx_dma_phy *phy) 260{ 261 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 262 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 263 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; 264 int phyvalid; 265 266 /* every phy is valid for memcopy channels */ 267 if (!s3cchan->slave) 268 return true; 269 270 /* On newer variants all phys can be used for all virtual channels */ 271 if (s3cdma->sdata->has_reqsel) 272 return true; 273 274 phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH)); 275 return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false; 276} 277 278/* 279 * Allocate a physical channel for a virtual channel 280 * 281 * Try to locate a physical channel to be used for this transfer. If all 282 * are taken return NULL and the requester will have to cope by using 283 * some fallback PIO mode or retrying later. 284 */ 285static 286struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan) 287{ 288 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 289 struct s3c24xx_dma_phy *phy = NULL; 290 unsigned long flags; 291 int i; 292 int ret; 293 294 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { 295 phy = &s3cdma->phy_chans[i]; 296 297 if (!phy->valid) 298 continue; 299 300 if (!s3c24xx_dma_phy_valid(s3cchan, phy)) 301 continue; 302 303 spin_lock_irqsave(&phy->lock, flags); 304 305 if (!phy->serving) { 306 phy->serving = s3cchan; 307 spin_unlock_irqrestore(&phy->lock, flags); 308 break; 309 } 310 311 spin_unlock_irqrestore(&phy->lock, flags); 312 } 313 314 /* No physical channel available, cope with it */ 315 if (i == s3cdma->pdata->num_phy_channels) { 316 dev_warn(&s3cdma->pdev->dev, "no phy channel available\n"); 317 return NULL; 318 } 319 320 /* start the phy clock */ 321 if (s3cdma->sdata->has_clocks) { 322 ret = clk_enable(phy->clk); 323 if (ret) { 324 dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n", 325 phy->id, ret); 326 phy->serving = NULL; 327 return NULL; 328 } 329 } 330 331 return phy; 332} 333 334/* 335 * Mark the physical channel as free. 336 * 337 * This drops the link between the physical and virtual channel. 338 */ 339static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy) 340{ 341 struct s3c24xx_dma_engine *s3cdma = phy->host; 342 343 if (s3cdma->sdata->has_clocks) 344 clk_disable(phy->clk); 345 346 phy->serving = NULL; 347} 348 349/* 350 * Stops the channel by writing the stop bit. 351 * This should not be used for an on-going transfer, but as a method of 352 * shutting down a channel (eg, when it's no longer used) or terminating a 353 * transfer. 354 */ 355static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy) 356{ 357 writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG); 358} 359 360/* 361 * Virtual channel handling 362 */ 363 364static inline 365struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan) 366{ 367 return container_of(chan, struct s3c24xx_dma_chan, vc.chan); 368} 369 370static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan) 371{ 372 struct s3c24xx_dma_phy *phy = s3cchan->phy; 373 struct s3c24xx_txd *txd = s3cchan->at; 374 u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK; 375 376 return tc * txd->width; 377} 378 379static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan, 380 struct dma_slave_config *config) 381{ 382 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 383 unsigned long flags; 384 int ret = 0; 385 386 /* Reject definitely invalid configurations */ 387 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 388 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 389 return -EINVAL; 390 391 spin_lock_irqsave(&s3cchan->vc.lock, flags); 392 393 if (!s3cchan->slave) { 394 ret = -EINVAL; 395 goto out; 396 } 397 398 s3cchan->cfg = *config; 399 400out: 401 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 402 return ret; 403} 404 405/* 406 * Transfer handling 407 */ 408 409static inline 410struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx) 411{ 412 return container_of(tx, struct s3c24xx_txd, vd.tx); 413} 414 415static struct s3c24xx_txd *s3c24xx_dma_get_txd(void) 416{ 417 struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 418 419 if (txd) { 420 INIT_LIST_HEAD(&txd->dsg_list); 421 txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD; 422 } 423 424 return txd; 425} 426 427static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd) 428{ 429 struct s3c24xx_sg *dsg, *_dsg; 430 431 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 432 list_del(&dsg->node); 433 kfree(dsg); 434 } 435 436 kfree(txd); 437} 438 439static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan, 440 struct s3c24xx_txd *txd) 441{ 442 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 443 struct s3c24xx_dma_phy *phy = s3cchan->phy; 444 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 445 struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node); 446 u32 dcon = txd->dcon; 447 u32 val; 448 449 /* transfer-size and -count from len and width */ 450 switch (txd->width) { 451 case 1: 452 dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len; 453 break; 454 case 2: 455 dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2); 456 break; 457 case 4: 458 dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4); 459 break; 460 } 461 462 if (s3cchan->slave) { 463 struct s3c24xx_dma_channel *cdata = 464 &pdata->channels[s3cchan->id]; 465 466 if (s3cdma->sdata->has_reqsel) { 467 writel_relaxed((cdata->chansel << 1) | 468 S3C24XX_DMAREQSEL_HW, 469 phy->base + S3C24XX_DMAREQSEL); 470 } else { 471 int csel = cdata->chansel >> (phy->id * 472 S3C24XX_CHANSEL_WIDTH); 473 474 csel &= S3C24XX_CHANSEL_REQ_MASK; 475 dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT; 476 dcon |= S3C24XX_DCON_HWTRIG; 477 } 478 } else { 479 if (s3cdma->sdata->has_reqsel) 480 writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL); 481 } 482 483 writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC); 484 writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC); 485 writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST); 486 writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC); 487 writel_relaxed(dcon, phy->base + S3C24XX_DCON); 488 489 val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG); 490 val &= ~S3C24XX_DMASKTRIG_STOP; 491 val |= S3C24XX_DMASKTRIG_ON; 492 493 /* trigger the dma operation for memcpy transfers */ 494 if (!s3cchan->slave) 495 val |= S3C24XX_DMASKTRIG_SWTRIG; 496 497 writel(val, phy->base + S3C24XX_DMASKTRIG); 498} 499 500/* 501 * Set the initial DMA register values and start first sg. 502 */ 503static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) 504{ 505 struct s3c24xx_dma_phy *phy = s3cchan->phy; 506 struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc); 507 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); 508 509 list_del(&txd->vd.node); 510 511 s3cchan->at = txd; 512 513 /* Wait for channel inactive */ 514 while (s3c24xx_dma_phy_busy(phy)) 515 cpu_relax(); 516 517 /* point to the first element of the sg list */ 518 txd->at = txd->dsg_list.next; 519 s3c24xx_dma_start_next_sg(s3cchan, txd); 520} 521 522/* 523 * Try to allocate a physical channel. When successful, assign it to 524 * this virtual channel, and initiate the next descriptor. The 525 * virtual channel lock must be held at this point. 526 */ 527static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan) 528{ 529 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 530 struct s3c24xx_dma_phy *phy; 531 532 phy = s3c24xx_dma_get_phy(s3cchan); 533 if (!phy) { 534 dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n", 535 s3cchan->name); 536 s3cchan->state = S3C24XX_DMA_CHAN_WAITING; 537 return; 538 } 539 540 dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n", 541 phy->id, s3cchan->name); 542 543 s3cchan->phy = phy; 544 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; 545 546 s3c24xx_dma_start_next_txd(s3cchan); 547} 548 549static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy, 550 struct s3c24xx_dma_chan *s3cchan) 551{ 552 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 553 554 dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n", 555 phy->id, s3cchan->name); 556 557 /* 558 * We do this without taking the lock; we're really only concerned 559 * about whether this pointer is NULL or not, and we're guaranteed 560 * that this will only be called when it _already_ is non-NULL. 561 */ 562 phy->serving = s3cchan; 563 s3cchan->phy = phy; 564 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; 565 s3c24xx_dma_start_next_txd(s3cchan); 566} 567 568/* 569 * Free a physical DMA channel, potentially reallocating it to another 570 * virtual channel if we have any pending. 571 */ 572static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan) 573{ 574 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 575 struct s3c24xx_dma_chan *p, *next; 576 577retry: 578 next = NULL; 579 580 /* Find a waiting virtual channel for the next transfer. */ 581 list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node) 582 if (p->state == S3C24XX_DMA_CHAN_WAITING) { 583 next = p; 584 break; 585 } 586 587 if (!next) { 588 list_for_each_entry(p, &s3cdma->slave.channels, 589 vc.chan.device_node) 590 if (p->state == S3C24XX_DMA_CHAN_WAITING && 591 s3c24xx_dma_phy_valid(p, s3cchan->phy)) { 592 next = p; 593 break; 594 } 595 } 596 597 /* Ensure that the physical channel is stopped */ 598 s3c24xx_dma_terminate_phy(s3cchan->phy); 599 600 if (next) { 601 bool success; 602 603 /* 604 * Eww. We know this isn't going to deadlock 605 * but lockdep probably doesn't. 606 */ 607 spin_lock(&next->vc.lock); 608 /* Re-check the state now that we have the lock */ 609 success = next->state == S3C24XX_DMA_CHAN_WAITING; 610 if (success) 611 s3c24xx_dma_phy_reassign_start(s3cchan->phy, next); 612 spin_unlock(&next->vc.lock); 613 614 /* If the state changed, try to find another channel */ 615 if (!success) 616 goto retry; 617 } else { 618 /* No more jobs, so free up the physical channel */ 619 s3c24xx_dma_put_phy(s3cchan->phy); 620 } 621 622 s3cchan->phy = NULL; 623 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 624} 625 626static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) 627{ 628 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); 629 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); 630 631 if (!s3cchan->slave) 632 dma_descriptor_unmap(&vd->tx); 633 634 s3c24xx_dma_free_txd(txd); 635} 636 637static irqreturn_t s3c24xx_dma_irq(int irq, void *data) 638{ 639 struct s3c24xx_dma_phy *phy = data; 640 struct s3c24xx_dma_chan *s3cchan = phy->serving; 641 struct s3c24xx_txd *txd; 642 643 dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id); 644 645 /* 646 * Interrupts happen to notify the completion of a transfer and the 647 * channel should have moved into its stop state already on its own. 648 * Therefore interrupts on channels not bound to a virtual channel 649 * should never happen. Nevertheless send a terminate command to the 650 * channel if the unlikely case happens. 651 */ 652 if (unlikely(!s3cchan)) { 653 dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n", 654 phy->id); 655 656 s3c24xx_dma_terminate_phy(phy); 657 658 return IRQ_HANDLED; 659 } 660 661 spin_lock(&s3cchan->vc.lock); 662 txd = s3cchan->at; 663 if (txd) { 664 /* when more sg's are in this txd, start the next one */ 665 if (!list_is_last(txd->at, &txd->dsg_list)) { 666 txd->at = txd->at->next; 667 if (txd->cyclic) 668 vchan_cyclic_callback(&txd->vd); 669 s3c24xx_dma_start_next_sg(s3cchan, txd); 670 } else if (!txd->cyclic) { 671 s3cchan->at = NULL; 672 vchan_cookie_complete(&txd->vd); 673 674 /* 675 * And start the next descriptor (if any), 676 * otherwise free this channel. 677 */ 678 if (vchan_next_desc(&s3cchan->vc)) 679 s3c24xx_dma_start_next_txd(s3cchan); 680 else 681 s3c24xx_dma_phy_free(s3cchan); 682 } else { 683 vchan_cyclic_callback(&txd->vd); 684 685 /* Cyclic: reset at beginning */ 686 txd->at = txd->dsg_list.next; 687 s3c24xx_dma_start_next_sg(s3cchan, txd); 688 } 689 } 690 spin_unlock(&s3cchan->vc.lock); 691 692 return IRQ_HANDLED; 693} 694 695/* 696 * The DMA ENGINE API 697 */ 698 699static int s3c24xx_dma_terminate_all(struct dma_chan *chan) 700{ 701 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 702 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 703 LIST_HEAD(head); 704 unsigned long flags; 705 int ret; 706 707 spin_lock_irqsave(&s3cchan->vc.lock, flags); 708 709 if (!s3cchan->phy && !s3cchan->at) { 710 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", 711 s3cchan->id); 712 ret = -EINVAL; 713 goto unlock; 714 } 715 716 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 717 718 /* Mark physical channel as free */ 719 if (s3cchan->phy) 720 s3c24xx_dma_phy_free(s3cchan); 721 722 /* Dequeue current job */ 723 if (s3cchan->at) { 724 vchan_terminate_vdesc(&s3cchan->at->vd); 725 s3cchan->at = NULL; 726 } 727 728 /* Dequeue jobs not yet fired as well */ 729 730 vchan_get_all_descriptors(&s3cchan->vc, &head); 731 732 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 733 734 vchan_dma_desc_free_list(&s3cchan->vc, &head); 735 736 return 0; 737 738unlock: 739 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 740 741 return ret; 742} 743 744static void s3c24xx_dma_synchronize(struct dma_chan *chan) 745{ 746 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 747 748 vchan_synchronize(&s3cchan->vc); 749} 750 751static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) 752{ 753 /* Ensure all queued descriptors are freed */ 754 vchan_free_chan_resources(to_virt_chan(chan)); 755} 756 757static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, 758 dma_cookie_t cookie, struct dma_tx_state *txstate) 759{ 760 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 761 struct s3c24xx_txd *txd; 762 struct s3c24xx_sg *dsg; 763 struct virt_dma_desc *vd; 764 unsigned long flags; 765 enum dma_status ret; 766 size_t bytes = 0; 767 768 spin_lock_irqsave(&s3cchan->vc.lock, flags); 769 ret = dma_cookie_status(chan, cookie, txstate); 770 771 /* 772 * There's no point calculating the residue if there's 773 * no txstate to store the value. 774 */ 775 if (ret == DMA_COMPLETE || !txstate) { 776 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 777 return ret; 778 } 779 780 vd = vchan_find_desc(&s3cchan->vc, cookie); 781 if (vd) { 782 /* On the issued list, so hasn't been processed yet */ 783 txd = to_s3c24xx_txd(&vd->tx); 784 785 list_for_each_entry(dsg, &txd->dsg_list, node) 786 bytes += dsg->len; 787 } else { 788 /* 789 * Currently running, so sum over the pending sg's and 790 * the currently active one. 791 */ 792 txd = s3cchan->at; 793 794 dsg = list_entry(txd->at, struct s3c24xx_sg, node); 795 list_for_each_entry_from(dsg, &txd->dsg_list, node) 796 bytes += dsg->len; 797 798 bytes += s3c24xx_dma_getbytes_chan(s3cchan); 799 } 800 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 801 802 /* 803 * This cookie not complete yet 804 * Get number of bytes left in the active transactions and queue 805 */ 806 dma_set_residue(txstate, bytes); 807 808 /* Whether waiting or running, we're in progress */ 809 return ret; 810} 811 812/* 813 * Initialize a descriptor to be used by memcpy submit 814 */ 815static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( 816 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 817 size_t len, unsigned long flags) 818{ 819 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 820 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 821 struct s3c24xx_txd *txd; 822 struct s3c24xx_sg *dsg; 823 int src_mod, dest_mod; 824 825 dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n", 826 len, s3cchan->name); 827 828 if ((len & S3C24XX_DCON_TC_MASK) != len) { 829 dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len); 830 return NULL; 831 } 832 833 txd = s3c24xx_dma_get_txd(); 834 if (!txd) 835 return NULL; 836 837 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); 838 if (!dsg) { 839 s3c24xx_dma_free_txd(txd); 840 return NULL; 841 } 842 list_add_tail(&dsg->node, &txd->dsg_list); 843 844 dsg->src_addr = src; 845 dsg->dst_addr = dest; 846 dsg->len = len; 847 848 /* 849 * Determine a suitable transfer width. 850 * The DMA controller cannot fetch/store information which is not 851 * naturally aligned on the bus, i.e., a 4 byte fetch must start at 852 * an address divisible by 4 - more generally addr % width must be 0. 853 */ 854 src_mod = src % 4; 855 dest_mod = dest % 4; 856 switch (len % 4) { 857 case 0: 858 txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1; 859 break; 860 case 2: 861 txd->width = ((src_mod == 2 || src_mod == 0) && 862 (dest_mod == 2 || dest_mod == 0)) ? 2 : 1; 863 break; 864 default: 865 txd->width = 1; 866 break; 867 } 868 869 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; 870 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; 871 txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK | 872 S3C24XX_DCON_SERV_WHOLE; 873 874 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); 875} 876 877static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( 878 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, 879 enum dma_transfer_direction direction, unsigned long flags) 880{ 881 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 882 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 883 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 884 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; 885 struct s3c24xx_txd *txd; 886 struct s3c24xx_sg *dsg; 887 unsigned sg_len; 888 dma_addr_t slave_addr; 889 u32 hwcfg = 0; 890 int i; 891 892 dev_dbg(&s3cdma->pdev->dev, 893 "prepare cyclic transaction of %zu bytes with period %zu from %s\n", 894 size, period, s3cchan->name); 895 896 if (!is_slave_direction(direction)) { 897 dev_err(&s3cdma->pdev->dev, 898 "direction %d unsupported\n", direction); 899 return NULL; 900 } 901 902 txd = s3c24xx_dma_get_txd(); 903 if (!txd) 904 return NULL; 905 906 txd->cyclic = 1; 907 908 if (cdata->handshake) 909 txd->dcon |= S3C24XX_DCON_HANDSHAKE; 910 911 switch (cdata->bus) { 912 case S3C24XX_DMA_APB: 913 txd->dcon |= S3C24XX_DCON_SYNC_PCLK; 914 hwcfg |= S3C24XX_DISRCC_LOC_APB; 915 break; 916 case S3C24XX_DMA_AHB: 917 txd->dcon |= S3C24XX_DCON_SYNC_HCLK; 918 hwcfg |= S3C24XX_DISRCC_LOC_AHB; 919 break; 920 } 921 922 /* 923 * Always assume our peripheral desintation is a fixed 924 * address in memory. 925 */ 926 hwcfg |= S3C24XX_DISRCC_INC_FIXED; 927 928 /* 929 * Individual dma operations are requested by the slave, 930 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). 931 */ 932 txd->dcon |= S3C24XX_DCON_SERV_SINGLE; 933 934 if (direction == DMA_MEM_TO_DEV) { 935 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | 936 S3C24XX_DISRCC_INC_INCREMENT; 937 txd->didstc = hwcfg; 938 slave_addr = s3cchan->cfg.dst_addr; 939 txd->width = s3cchan->cfg.dst_addr_width; 940 } else { 941 txd->disrcc = hwcfg; 942 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | 943 S3C24XX_DIDSTC_INC_INCREMENT; 944 slave_addr = s3cchan->cfg.src_addr; 945 txd->width = s3cchan->cfg.src_addr_width; 946 } 947 948 sg_len = size / period; 949 950 for (i = 0; i < sg_len; i++) { 951 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); 952 if (!dsg) { 953 s3c24xx_dma_free_txd(txd); 954 return NULL; 955 } 956 list_add_tail(&dsg->node, &txd->dsg_list); 957 958 dsg->len = period; 959 /* Check last period length */ 960 if (i == sg_len - 1) 961 dsg->len = size - period * i; 962 if (direction == DMA_MEM_TO_DEV) { 963 dsg->src_addr = addr + period * i; 964 dsg->dst_addr = slave_addr; 965 } else { /* DMA_DEV_TO_MEM */ 966 dsg->src_addr = slave_addr; 967 dsg->dst_addr = addr + period * i; 968 } 969 } 970 971 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); 972} 973 974static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( 975 struct dma_chan *chan, struct scatterlist *sgl, 976 unsigned int sg_len, enum dma_transfer_direction direction, 977 unsigned long flags, void *context) 978{ 979 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 980 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 981 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 982 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; 983 struct s3c24xx_txd *txd; 984 struct s3c24xx_sg *dsg; 985 struct scatterlist *sg; 986 dma_addr_t slave_addr; 987 u32 hwcfg = 0; 988 int tmp; 989 990 dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n", 991 sg_dma_len(sgl), s3cchan->name); 992 993 txd = s3c24xx_dma_get_txd(); 994 if (!txd) 995 return NULL; 996 997 if (cdata->handshake) 998 txd->dcon |= S3C24XX_DCON_HANDSHAKE; 999 1000 switch (cdata->bus) { 1001 case S3C24XX_DMA_APB: 1002 txd->dcon |= S3C24XX_DCON_SYNC_PCLK; 1003 hwcfg |= S3C24XX_DISRCC_LOC_APB; 1004 break; 1005 case S3C24XX_DMA_AHB: 1006 txd->dcon |= S3C24XX_DCON_SYNC_HCLK; 1007 hwcfg |= S3C24XX_DISRCC_LOC_AHB; 1008 break; 1009 } 1010 1011 /* 1012 * Always assume our peripheral desintation is a fixed 1013 * address in memory. 1014 */ 1015 hwcfg |= S3C24XX_DISRCC_INC_FIXED; 1016 1017 /* 1018 * Individual dma operations are requested by the slave, 1019 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). 1020 */ 1021 txd->dcon |= S3C24XX_DCON_SERV_SINGLE; 1022 1023 if (direction == DMA_MEM_TO_DEV) { 1024 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | 1025 S3C24XX_DISRCC_INC_INCREMENT; 1026 txd->didstc = hwcfg; 1027 slave_addr = s3cchan->cfg.dst_addr; 1028 txd->width = s3cchan->cfg.dst_addr_width; 1029 } else if (direction == DMA_DEV_TO_MEM) { 1030 txd->disrcc = hwcfg; 1031 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | 1032 S3C24XX_DIDSTC_INC_INCREMENT; 1033 slave_addr = s3cchan->cfg.src_addr; 1034 txd->width = s3cchan->cfg.src_addr_width; 1035 } else { 1036 s3c24xx_dma_free_txd(txd); 1037 dev_err(&s3cdma->pdev->dev, 1038 "direction %d unsupported\n", direction); 1039 return NULL; 1040 } 1041 1042 for_each_sg(sgl, sg, sg_len, tmp) { 1043 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); 1044 if (!dsg) { 1045 s3c24xx_dma_free_txd(txd); 1046 return NULL; 1047 } 1048 list_add_tail(&dsg->node, &txd->dsg_list); 1049 1050 dsg->len = sg_dma_len(sg); 1051 if (direction == DMA_MEM_TO_DEV) { 1052 dsg->src_addr = sg_dma_address(sg); 1053 dsg->dst_addr = slave_addr; 1054 } else { /* DMA_DEV_TO_MEM */ 1055 dsg->src_addr = slave_addr; 1056 dsg->dst_addr = sg_dma_address(sg); 1057 } 1058 } 1059 1060 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); 1061} 1062 1063/* 1064 * Slave transactions callback to the slave device to allow 1065 * synchronization of slave DMA signals with the DMAC enable 1066 */ 1067static void s3c24xx_dma_issue_pending(struct dma_chan *chan) 1068{ 1069 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 1070 unsigned long flags; 1071 1072 spin_lock_irqsave(&s3cchan->vc.lock, flags); 1073 if (vchan_issue_pending(&s3cchan->vc)) { 1074 if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING) 1075 s3c24xx_dma_phy_alloc_and_start(s3cchan); 1076 } 1077 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 1078} 1079 1080/* 1081 * Bringup and teardown 1082 */ 1083 1084/* 1085 * Initialise the DMAC memcpy/slave channels. 1086 * Make a local wrapper to hold required data 1087 */ 1088static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, 1089 struct dma_device *dmadev, unsigned int channels, bool slave) 1090{ 1091 struct s3c24xx_dma_chan *chan; 1092 int i; 1093 1094 INIT_LIST_HEAD(&dmadev->channels); 1095 1096 /* 1097 * Register as many many memcpy as we have physical channels, 1098 * we won't always be able to use all but the code will have 1099 * to cope with that situation. 1100 */ 1101 for (i = 0; i < channels; i++) { 1102 chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); 1103 if (!chan) 1104 return -ENOMEM; 1105 1106 chan->id = i; 1107 chan->host = s3cdma; 1108 chan->state = S3C24XX_DMA_CHAN_IDLE; 1109 1110 if (slave) { 1111 chan->slave = true; 1112 chan->name = kasprintf(GFP_KERNEL, "slave%d", i); 1113 if (!chan->name) 1114 return -ENOMEM; 1115 } else { 1116 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1117 if (!chan->name) 1118 return -ENOMEM; 1119 } 1120 dev_dbg(dmadev->dev, 1121 "initialize virtual channel \"%s\"\n", 1122 chan->name); 1123 1124 chan->vc.desc_free = s3c24xx_dma_desc_free; 1125 vchan_init(&chan->vc, dmadev); 1126 } 1127 dev_info(dmadev->dev, "initialized %d virtual %s channels\n", 1128 i, slave ? "slave" : "memcpy"); 1129 return i; 1130} 1131 1132static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) 1133{ 1134 struct s3c24xx_dma_chan *chan = NULL; 1135 struct s3c24xx_dma_chan *next; 1136 1137 list_for_each_entry_safe(chan, 1138 next, &dmadev->channels, vc.chan.device_node) { 1139 list_del(&chan->vc.chan.device_node); 1140 tasklet_kill(&chan->vc.task); 1141 } 1142} 1143 1144/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ 1145static struct soc_data soc_s3c2410 = { 1146 .stride = 0x40, 1147 .has_reqsel = false, 1148 .has_clocks = false, 1149}; 1150 1151/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */ 1152static struct soc_data soc_s3c2412 = { 1153 .stride = 0x40, 1154 .has_reqsel = true, 1155 .has_clocks = true, 1156}; 1157 1158/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */ 1159static struct soc_data soc_s3c2443 = { 1160 .stride = 0x100, 1161 .has_reqsel = true, 1162 .has_clocks = true, 1163}; 1164 1165static const struct platform_device_id s3c24xx_dma_driver_ids[] = { 1166 { 1167 .name = "s3c2410-dma", 1168 .driver_data = (kernel_ulong_t)&soc_s3c2410, 1169 }, { 1170 .name = "s3c2412-dma", 1171 .driver_data = (kernel_ulong_t)&soc_s3c2412, 1172 }, { 1173 .name = "s3c2443-dma", 1174 .driver_data = (kernel_ulong_t)&soc_s3c2443, 1175 }, 1176 { }, 1177}; 1178 1179static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev) 1180{ 1181 return (struct soc_data *) 1182 platform_get_device_id(pdev)->driver_data; 1183} 1184 1185static int s3c24xx_dma_probe(struct platform_device *pdev) 1186{ 1187 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); 1188 struct s3c24xx_dma_engine *s3cdma; 1189 struct soc_data *sdata; 1190 struct resource *res; 1191 int ret; 1192 int i; 1193 1194 if (!pdata) { 1195 dev_err(&pdev->dev, "platform data missing\n"); 1196 return -ENODEV; 1197 } 1198 1199 /* Basic sanity check */ 1200 if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { 1201 dev_err(&pdev->dev, "too many dma channels %d, max %d\n", 1202 pdata->num_phy_channels, MAX_DMA_CHANNELS); 1203 return -EINVAL; 1204 } 1205 1206 sdata = s3c24xx_dma_get_soc_data(pdev); 1207 if (!sdata) 1208 return -EINVAL; 1209 1210 s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL); 1211 if (!s3cdma) 1212 return -ENOMEM; 1213 1214 s3cdma->pdev = pdev; 1215 s3cdma->pdata = pdata; 1216 s3cdma->sdata = sdata; 1217 1218 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1219 s3cdma->base = devm_ioremap_resource(&pdev->dev, res); 1220 if (IS_ERR(s3cdma->base)) 1221 return PTR_ERR(s3cdma->base); 1222 1223 s3cdma->phy_chans = devm_kcalloc(&pdev->dev, 1224 pdata->num_phy_channels, 1225 sizeof(struct s3c24xx_dma_phy), 1226 GFP_KERNEL); 1227 if (!s3cdma->phy_chans) 1228 return -ENOMEM; 1229 1230 /* acquire irqs and clocks for all physical channels */ 1231 for (i = 0; i < pdata->num_phy_channels; i++) { 1232 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; 1233 char clk_name[6]; 1234 1235 phy->id = i; 1236 phy->base = s3cdma->base + (i * sdata->stride); 1237 phy->host = s3cdma; 1238 1239 phy->irq = platform_get_irq(pdev, i); 1240 if (phy->irq < 0) 1241 continue; 1242 1243 ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq, 1244 0, pdev->name, phy); 1245 if (ret) { 1246 dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n", 1247 i, ret); 1248 continue; 1249 } 1250 1251 if (sdata->has_clocks) { 1252 sprintf(clk_name, "dma.%d", i); 1253 phy->clk = devm_clk_get(&pdev->dev, clk_name); 1254 if (IS_ERR(phy->clk) && sdata->has_clocks) { 1255 dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n", 1256 i, PTR_ERR(phy->clk)); 1257 continue; 1258 } 1259 1260 ret = clk_prepare(phy->clk); 1261 if (ret) { 1262 dev_err(&pdev->dev, "clock for phy %d failed, error %d\n", 1263 i, ret); 1264 continue; 1265 } 1266 } 1267 1268 spin_lock_init(&phy->lock); 1269 phy->valid = true; 1270 1271 dev_dbg(&pdev->dev, "physical channel %d is %s\n", 1272 i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE"); 1273 } 1274 1275 /* Initialize memcpy engine */ 1276 dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); 1277 dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); 1278 s3cdma->memcpy.dev = &pdev->dev; 1279 s3cdma->memcpy.device_free_chan_resources = 1280 s3c24xx_dma_free_chan_resources; 1281 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; 1282 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; 1283 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; 1284 s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; 1285 s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; 1286 s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize; 1287 1288 /* Initialize slave engine for SoC internal dedicated peripherals */ 1289 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); 1290 dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask); 1291 dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); 1292 s3cdma->slave.dev = &pdev->dev; 1293 s3cdma->slave.device_free_chan_resources = 1294 s3c24xx_dma_free_chan_resources; 1295 s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; 1296 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; 1297 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; 1298 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; 1299 s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; 1300 s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; 1301 s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize; 1302 s3cdma->slave.filter.map = pdata->slave_map; 1303 s3cdma->slave.filter.mapcnt = pdata->slavecnt; 1304 s3cdma->slave.filter.fn = s3c24xx_dma_filter; 1305 1306 /* Register as many memcpy channels as there are physical channels */ 1307 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, 1308 pdata->num_phy_channels, false); 1309 if (ret <= 0) { 1310 dev_warn(&pdev->dev, 1311 "%s failed to enumerate memcpy channels - %d\n", 1312 __func__, ret); 1313 goto err_memcpy; 1314 } 1315 1316 /* Register slave channels */ 1317 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave, 1318 pdata->num_channels, true); 1319 if (ret <= 0) { 1320 dev_warn(&pdev->dev, 1321 "%s failed to enumerate slave channels - %d\n", 1322 __func__, ret); 1323 goto err_slave; 1324 } 1325 1326 ret = dma_async_device_register(&s3cdma->memcpy); 1327 if (ret) { 1328 dev_warn(&pdev->dev, 1329 "%s failed to register memcpy as an async device - %d\n", 1330 __func__, ret); 1331 goto err_memcpy_reg; 1332 } 1333 1334 ret = dma_async_device_register(&s3cdma->slave); 1335 if (ret) { 1336 dev_warn(&pdev->dev, 1337 "%s failed to register slave as an async device - %d\n", 1338 __func__, ret); 1339 goto err_slave_reg; 1340 } 1341 1342 platform_set_drvdata(pdev, s3cdma); 1343 dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n", 1344 pdata->num_phy_channels); 1345 1346 return 0; 1347 1348err_slave_reg: 1349 dma_async_device_unregister(&s3cdma->memcpy); 1350err_memcpy_reg: 1351 s3c24xx_dma_free_virtual_channels(&s3cdma->slave); 1352err_slave: 1353 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); 1354err_memcpy: 1355 if (sdata->has_clocks) 1356 for (i = 0; i < pdata->num_phy_channels; i++) { 1357 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; 1358 if (phy->valid) 1359 clk_unprepare(phy->clk); 1360 } 1361 1362 return ret; 1363} 1364 1365static void s3c24xx_dma_free_irq(struct platform_device *pdev, 1366 struct s3c24xx_dma_engine *s3cdma) 1367{ 1368 int i; 1369 1370 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { 1371 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; 1372 1373 devm_free_irq(&pdev->dev, phy->irq, phy); 1374 } 1375} 1376 1377static int s3c24xx_dma_remove(struct platform_device *pdev) 1378{ 1379 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); 1380 struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev); 1381 struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev); 1382 int i; 1383 1384 dma_async_device_unregister(&s3cdma->slave); 1385 dma_async_device_unregister(&s3cdma->memcpy); 1386 1387 s3c24xx_dma_free_irq(pdev, s3cdma); 1388 1389 s3c24xx_dma_free_virtual_channels(&s3cdma->slave); 1390 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); 1391 1392 if (sdata->has_clocks) 1393 for (i = 0; i < pdata->num_phy_channels; i++) { 1394 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; 1395 if (phy->valid) 1396 clk_unprepare(phy->clk); 1397 } 1398 1399 return 0; 1400} 1401 1402static struct platform_driver s3c24xx_dma_driver = { 1403 .driver = { 1404 .name = "s3c24xx-dma", 1405 }, 1406 .id_table = s3c24xx_dma_driver_ids, 1407 .probe = s3c24xx_dma_probe, 1408 .remove = s3c24xx_dma_remove, 1409}; 1410 1411module_platform_driver(s3c24xx_dma_driver); 1412 1413bool s3c24xx_dma_filter(struct dma_chan *chan, void *param) 1414{ 1415 struct s3c24xx_dma_chan *s3cchan; 1416 1417 if (chan->device->dev->driver != &s3c24xx_dma_driver.driver) 1418 return false; 1419 1420 s3cchan = to_s3c24xx_dma_chan(chan); 1421 1422 return s3cchan->id == (uintptr_t)param; 1423} 1424EXPORT_SYMBOL(s3c24xx_dma_filter); 1425 1426MODULE_DESCRIPTION("S3C24XX DMA Driver"); 1427MODULE_AUTHOR("Heiko Stuebner"); 1428MODULE_LICENSE("GPL v2");