rcar_du_crtc.c (35138B)
1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * rcar_du_crtc.c -- R-Car Display Unit CRTCs 4 * 5 * Copyright (C) 2013-2015 Renesas Electronics Corporation 6 * 7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com) 8 */ 9 10#include <linux/clk.h> 11#include <linux/mutex.h> 12#include <linux/platform_device.h> 13#include <linux/sys_soc.h> 14 15#include <drm/drm_atomic.h> 16#include <drm/drm_atomic_helper.h> 17#include <drm/drm_bridge.h> 18#include <drm/drm_crtc.h> 19#include <drm/drm_device.h> 20#include <drm/drm_fb_cma_helper.h> 21#include <drm/drm_gem_cma_helper.h> 22#include <drm/drm_plane_helper.h> 23#include <drm/drm_vblank.h> 24 25#include "rcar_cmm.h" 26#include "rcar_du_crtc.h" 27#include "rcar_du_drv.h" 28#include "rcar_du_encoder.h" 29#include "rcar_du_kms.h" 30#include "rcar_du_plane.h" 31#include "rcar_du_regs.h" 32#include "rcar_du_vsp.h" 33#include "rcar_lvds.h" 34 35static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) 36{ 37 struct rcar_du_device *rcdu = rcrtc->dev; 38 39 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg); 40} 41 42static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data) 43{ 44 struct rcar_du_device *rcdu = rcrtc->dev; 45 46 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data); 47} 48 49static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) 50{ 51 struct rcar_du_device *rcdu = rcrtc->dev; 52 53 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 54 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr); 55} 56 57static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) 58{ 59 struct rcar_du_device *rcdu = rcrtc->dev; 60 61 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, 62 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set); 63} 64 65void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set) 66{ 67 struct rcar_du_device *rcdu = rcrtc->dev; 68 69 rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set; 70 rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr); 71} 72 73/* ----------------------------------------------------------------------------- 74 * Hardware Setup 75 */ 76 77struct dpll_info { 78 unsigned int output; 79 unsigned int fdpll; 80 unsigned int n; 81 unsigned int m; 82}; 83 84static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc, 85 struct dpll_info *dpll, 86 unsigned long input, 87 unsigned long target) 88{ 89 unsigned long best_diff = (unsigned long)-1; 90 unsigned long diff; 91 unsigned int fdpll; 92 unsigned int m; 93 unsigned int n; 94 95 /* 96 * fin fvco fout fclkout 97 * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out 98 * +-> | | | 99 * | | 100 * +---------------- [1/N] <------------+ 101 * 102 * fclkout = fvco / P / FDPLL -- (1) 103 * 104 * fin/M = fvco/P/N 105 * 106 * fvco = fin * P * N / M -- (2) 107 * 108 * (1) + (2) indicates 109 * 110 * fclkout = fin * N / M / FDPLL 111 * 112 * NOTES 113 * N : (n + 1) 114 * M : (m + 1) 115 * FDPLL : (fdpll + 1) 116 * P : 2 117 * 2kHz < fvco < 4096MHz 118 * 119 * To minimize the jitter, 120 * N : as large as possible 121 * M : as small as possible 122 */ 123 for (m = 0; m < 4; m++) { 124 for (n = 119; n > 38; n--) { 125 /* 126 * This code only runs on 64-bit architectures, the 127 * unsigned long type can thus be used for 64-bit 128 * computation. It will still compile without any 129 * warning on 32-bit architectures. 130 * 131 * To optimize calculations, use fout instead of fvco 132 * to verify the VCO frequency constraint. 133 */ 134 unsigned long fout = input * (n + 1) / (m + 1); 135 136 if (fout < 1000 || fout > 2048 * 1000 * 1000U) 137 continue; 138 139 for (fdpll = 1; fdpll < 32; fdpll++) { 140 unsigned long output; 141 142 output = fout / (fdpll + 1); 143 if (output >= 400 * 1000 * 1000) 144 continue; 145 146 diff = abs((long)output - (long)target); 147 if (best_diff > diff) { 148 best_diff = diff; 149 dpll->n = n; 150 dpll->m = m; 151 dpll->fdpll = fdpll; 152 dpll->output = output; 153 } 154 155 if (diff == 0) 156 goto done; 157 } 158 } 159 } 160 161done: 162 dev_dbg(rcrtc->dev->dev, 163 "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n", 164 dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff); 165} 166 167struct du_clk_params { 168 struct clk *clk; 169 unsigned long rate; 170 unsigned long diff; 171 u32 escr; 172}; 173 174static void rcar_du_escr_divider(struct clk *clk, unsigned long target, 175 u32 escr, struct du_clk_params *params) 176{ 177 unsigned long rate; 178 unsigned long diff; 179 u32 div; 180 181 /* 182 * If the target rate has already been achieved perfectly we can't do 183 * better. 184 */ 185 if (params->diff == 0) 186 return; 187 188 /* 189 * Compute the input clock rate and internal divisor values to obtain 190 * the clock rate closest to the target frequency. 191 */ 192 rate = clk_round_rate(clk, target); 193 div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1; 194 diff = abs(rate / (div + 1) - target); 195 196 /* 197 * Store the parameters if the resulting frequency is better than any 198 * previously calculated value. 199 */ 200 if (diff < params->diff) { 201 params->clk = clk; 202 params->rate = rate; 203 params->diff = diff; 204 params->escr = escr | div; 205 } 206} 207 208static const struct soc_device_attribute rcar_du_r8a7795_es1[] = { 209 { .soc_id = "r8a7795", .revision = "ES1.*" }, 210 { /* sentinel */ } 211}; 212 213static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) 214{ 215 const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; 216 struct rcar_du_device *rcdu = rcrtc->dev; 217 unsigned long mode_clock = mode->clock * 1000; 218 unsigned int hdse_offset; 219 u32 dsmr; 220 u32 escr; 221 222 if (rcdu->info->dpll_mask & (1 << rcrtc->index)) { 223 unsigned long target = mode_clock; 224 struct dpll_info dpll = { 0 }; 225 unsigned long extclk; 226 u32 dpllcr; 227 u32 div = 0; 228 229 /* 230 * DU channels that have a display PLL can't use the internal 231 * system clock, and have no internal clock divider. 232 */ 233 234 /* 235 * The H3 ES1.x exhibits dot clock duty cycle stability issues. 236 * We can work around them by configuring the DPLL to twice the 237 * desired frequency, coupled with a /2 post-divider. Restrict 238 * the workaround to H3 ES1.x as ES2.0 and all other SoCs have 239 * no post-divider when a display PLL is present (as shown by 240 * the workaround breaking HDMI output on M3-W during testing). 241 */ 242 if (soc_device_match(rcar_du_r8a7795_es1)) { 243 target *= 2; 244 div = 1; 245 } 246 247 extclk = clk_get_rate(rcrtc->extclock); 248 rcar_du_dpll_divider(rcrtc, &dpll, extclk, target); 249 250 dpllcr = DPLLCR_CODE | DPLLCR_CLKE 251 | DPLLCR_FDPLL(dpll.fdpll) 252 | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m) 253 | DPLLCR_STBY; 254 255 if (rcrtc->index == 1) 256 dpllcr |= DPLLCR_PLCS1 257 | DPLLCR_INCS_DOTCLKIN1; 258 else 259 dpllcr |= DPLLCR_PLCS0 260 | DPLLCR_INCS_DOTCLKIN0; 261 262 rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); 263 264 escr = ESCR_DCLKSEL_DCLKIN | div; 265 } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) || 266 rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) { 267 /* 268 * Use the external LVDS or DSI PLL output as the dot clock when 269 * outputting to the LVDS or DSI encoder on an SoC that supports 270 * this clock routing option. We use the clock directly in that 271 * case, without any additional divider. 272 */ 273 escr = ESCR_DCLKSEL_DCLKIN; 274 } else { 275 struct du_clk_params params = { .diff = (unsigned long)-1 }; 276 277 rcar_du_escr_divider(rcrtc->clock, mode_clock, 278 ESCR_DCLKSEL_CLKS, ¶ms); 279 if (rcrtc->extclock) 280 rcar_du_escr_divider(rcrtc->extclock, mode_clock, 281 ESCR_DCLKSEL_DCLKIN, ¶ms); 282 283 dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n", 284 mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext", 285 params.rate); 286 287 clk_set_rate(params.clk, params.rate); 288 escr = params.escr; 289 } 290 291 dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr); 292 293 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr); 294 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0); 295 296 /* Signal polarities */ 297 dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0) 298 | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0) 299 | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0) 300 | DSMR_DIPM_DISP | DSMR_CSPM; 301 rcar_du_crtc_write(rcrtc, DSMR, dsmr); 302 303 hdse_offset = 19; 304 if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) 305 hdse_offset += 25; 306 307 /* Display timings */ 308 rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 309 hdse_offset); 310 rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start + 311 mode->hdisplay - hdse_offset); 312 rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end - 313 mode->hsync_start - 1); 314 rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1); 315 316 rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal - 317 mode->crtc_vsync_end - 2); 318 rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal - 319 mode->crtc_vsync_end + 320 mode->crtc_vdisplay - 2); 321 rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal - 322 mode->crtc_vsync_end + 323 mode->crtc_vsync_start - 1); 324 rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1); 325 326 rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1); 327 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay); 328} 329 330static unsigned int plane_zpos(struct rcar_du_plane *plane) 331{ 332 return plane->plane.state->normalized_zpos; 333} 334 335static const struct rcar_du_format_info * 336plane_format(struct rcar_du_plane *plane) 337{ 338 return to_rcar_plane_state(plane->plane.state)->format; 339} 340 341static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) 342{ 343 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; 344 struct rcar_du_device *rcdu = rcrtc->dev; 345 unsigned int num_planes = 0; 346 unsigned int dptsr_planes; 347 unsigned int hwplanes = 0; 348 unsigned int prio = 0; 349 unsigned int i; 350 u32 dspr = 0; 351 352 for (i = 0; i < rcrtc->group->num_planes; ++i) { 353 struct rcar_du_plane *plane = &rcrtc->group->planes[i]; 354 unsigned int j; 355 356 if (plane->plane.state->crtc != &rcrtc->crtc || 357 !plane->plane.state->visible) 358 continue; 359 360 /* Insert the plane in the sorted planes array. */ 361 for (j = num_planes++; j > 0; --j) { 362 if (plane_zpos(planes[j-1]) <= plane_zpos(plane)) 363 break; 364 planes[j] = planes[j-1]; 365 } 366 367 planes[j] = plane; 368 prio += plane_format(plane)->planes * 4; 369 } 370 371 for (i = 0; i < num_planes; ++i) { 372 struct rcar_du_plane *plane = planes[i]; 373 struct drm_plane_state *state = plane->plane.state; 374 unsigned int index = to_rcar_plane_state(state)->hwindex; 375 376 prio -= 4; 377 dspr |= (index + 1) << prio; 378 hwplanes |= 1 << index; 379 380 if (plane_format(plane)->planes == 2) { 381 index = (index + 1) % 8; 382 383 prio -= 4; 384 dspr |= (index + 1) << prio; 385 hwplanes |= 1 << index; 386 } 387 } 388 389 /* If VSP+DU integration is enabled the plane assignment is fixed. */ 390 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) { 391 if (rcdu->info->gen < 3) { 392 dspr = (rcrtc->index % 2) + 1; 393 hwplanes = 1 << (rcrtc->index % 2); 394 } else { 395 dspr = (rcrtc->index % 2) ? 3 : 1; 396 hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0); 397 } 398 } 399 400 /* 401 * Update the planes to display timing and dot clock generator 402 * associations. 403 * 404 * Updating the DPTSR register requires restarting the CRTC group, 405 * resulting in visible flicker. To mitigate the issue only update the 406 * association if needed by enabled planes. Planes being disabled will 407 * keep their current association. 408 */ 409 mutex_lock(&rcrtc->group->lock); 410 411 dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes 412 : rcrtc->group->dptsr_planes & ~hwplanes; 413 414 if (dptsr_planes != rcrtc->group->dptsr_planes) { 415 rcar_du_group_write(rcrtc->group, DPTSR, 416 (dptsr_planes << 16) | dptsr_planes); 417 rcrtc->group->dptsr_planes = dptsr_planes; 418 419 if (rcrtc->group->used_crtcs) 420 rcar_du_group_restart(rcrtc->group); 421 } 422 423 /* Restart the group if plane sources have changed. */ 424 if (rcrtc->group->need_restart) 425 rcar_du_group_restart(rcrtc->group); 426 427 mutex_unlock(&rcrtc->group->lock); 428 429 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 430 dspr); 431} 432 433/* ----------------------------------------------------------------------------- 434 * Page Flip 435 */ 436 437void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc) 438{ 439 struct drm_pending_vblank_event *event; 440 struct drm_device *dev = rcrtc->crtc.dev; 441 unsigned long flags; 442 443 spin_lock_irqsave(&dev->event_lock, flags); 444 event = rcrtc->event; 445 rcrtc->event = NULL; 446 spin_unlock_irqrestore(&dev->event_lock, flags); 447 448 if (event == NULL) 449 return; 450 451 spin_lock_irqsave(&dev->event_lock, flags); 452 drm_crtc_send_vblank_event(&rcrtc->crtc, event); 453 wake_up(&rcrtc->flip_wait); 454 spin_unlock_irqrestore(&dev->event_lock, flags); 455 456 drm_crtc_vblank_put(&rcrtc->crtc); 457} 458 459static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc) 460{ 461 struct drm_device *dev = rcrtc->crtc.dev; 462 unsigned long flags; 463 bool pending; 464 465 spin_lock_irqsave(&dev->event_lock, flags); 466 pending = rcrtc->event != NULL; 467 spin_unlock_irqrestore(&dev->event_lock, flags); 468 469 return pending; 470} 471 472static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc) 473{ 474 struct rcar_du_device *rcdu = rcrtc->dev; 475 476 if (wait_event_timeout(rcrtc->flip_wait, 477 !rcar_du_crtc_page_flip_pending(rcrtc), 478 msecs_to_jiffies(50))) 479 return; 480 481 dev_warn(rcdu->dev, "page flip timeout\n"); 482 483 rcar_du_crtc_finish_page_flip(rcrtc); 484} 485 486/* ----------------------------------------------------------------------------- 487 * Color Management Module (CMM) 488 */ 489 490static int rcar_du_cmm_check(struct drm_crtc *crtc, 491 struct drm_crtc_state *state) 492{ 493 struct drm_property_blob *drm_lut = state->gamma_lut; 494 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 495 struct device *dev = rcrtc->dev->dev; 496 497 if (!drm_lut) 498 return 0; 499 500 /* We only accept fully populated LUT tables. */ 501 if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) { 502 dev_err(dev, "invalid gamma lut size: %zu bytes\n", 503 drm_lut->length); 504 return -EINVAL; 505 } 506 507 return 0; 508} 509 510static void rcar_du_cmm_setup(struct drm_crtc *crtc) 511{ 512 struct drm_property_blob *drm_lut = crtc->state->gamma_lut; 513 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 514 struct rcar_cmm_config cmm_config = {}; 515 516 if (!rcrtc->cmm) 517 return; 518 519 if (drm_lut) 520 cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data; 521 522 rcar_cmm_setup(rcrtc->cmm, &cmm_config); 523} 524 525/* ----------------------------------------------------------------------------- 526 * Start/Stop and Suspend/Resume 527 */ 528 529static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc) 530{ 531 /* Set display off and background to black */ 532 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0)); 533 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0)); 534 535 /* Configure display timings and output routing */ 536 rcar_du_crtc_set_display_timing(rcrtc); 537 rcar_du_group_set_routing(rcrtc->group); 538 539 /* Start with all planes disabled. */ 540 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); 541 542 /* Enable the VSP compositor. */ 543 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 544 rcar_du_vsp_enable(rcrtc); 545 546 /* Turn vertical blanking interrupt reporting on. */ 547 drm_crtc_vblank_on(&rcrtc->crtc); 548} 549 550static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc) 551{ 552 int ret; 553 554 /* 555 * Guard against double-get, as the function is called from both the 556 * .atomic_enable() and .atomic_begin() handlers. 557 */ 558 if (rcrtc->initialized) 559 return 0; 560 561 ret = clk_prepare_enable(rcrtc->clock); 562 if (ret < 0) 563 return ret; 564 565 ret = clk_prepare_enable(rcrtc->extclock); 566 if (ret < 0) 567 goto error_clock; 568 569 ret = rcar_du_group_get(rcrtc->group); 570 if (ret < 0) 571 goto error_group; 572 573 rcar_du_crtc_setup(rcrtc); 574 rcrtc->initialized = true; 575 576 return 0; 577 578error_group: 579 clk_disable_unprepare(rcrtc->extclock); 580error_clock: 581 clk_disable_unprepare(rcrtc->clock); 582 return ret; 583} 584 585static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc) 586{ 587 rcar_du_group_put(rcrtc->group); 588 589 clk_disable_unprepare(rcrtc->extclock); 590 clk_disable_unprepare(rcrtc->clock); 591 592 rcrtc->initialized = false; 593} 594 595static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) 596{ 597 bool interlaced; 598 599 /* 600 * Select master sync mode. This enables display operation in master 601 * sync mode (with the HSYNC and VSYNC signals configured as outputs and 602 * actively driven). 603 */ 604 interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE; 605 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK, 606 (interlaced ? DSYSR_SCM_INT_VIDEO : 0) | 607 DSYSR_TVM_MASTER); 608 609 rcar_du_group_start_stop(rcrtc->group, true); 610} 611 612static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc) 613{ 614 struct rcar_du_device *rcdu = rcrtc->dev; 615 struct drm_crtc *crtc = &rcrtc->crtc; 616 u32 status; 617 618 /* Make sure vblank interrupts are enabled. */ 619 drm_crtc_vblank_get(crtc); 620 621 /* 622 * Disable planes and calculate how many vertical blanking interrupts we 623 * have to wait for. If a vertical blanking interrupt has been triggered 624 * but not processed yet, we don't know whether it occurred before or 625 * after the planes got disabled. We thus have to wait for two vblank 626 * interrupts in that case. 627 */ 628 spin_lock_irq(&rcrtc->vblank_lock); 629 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); 630 status = rcar_du_crtc_read(rcrtc, DSSR); 631 rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1; 632 spin_unlock_irq(&rcrtc->vblank_lock); 633 634 if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0, 635 msecs_to_jiffies(100))) 636 dev_warn(rcdu->dev, "vertical blanking timeout\n"); 637 638 drm_crtc_vblank_put(crtc); 639} 640 641static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) 642{ 643 struct drm_crtc *crtc = &rcrtc->crtc; 644 645 /* 646 * Disable all planes and wait for the change to take effect. This is 647 * required as the plane enable registers are updated on vblank, and no 648 * vblank will occur once the CRTC is stopped. Disabling planes when 649 * starting the CRTC thus wouldn't be enough as it would start scanning 650 * out immediately from old frame buffers until the next vblank. 651 * 652 * This increases the CRTC stop delay, especially when multiple CRTCs 653 * are stopped in one operation as we now wait for one vblank per CRTC. 654 * Whether this can be improved needs to be researched. 655 */ 656 rcar_du_crtc_disable_planes(rcrtc); 657 658 /* 659 * Disable vertical blanking interrupt reporting. We first need to wait 660 * for page flip completion before stopping the CRTC as userspace 661 * expects page flips to eventually complete. 662 */ 663 rcar_du_crtc_wait_page_flip(rcrtc); 664 drm_crtc_vblank_off(crtc); 665 666 /* Disable the VSP compositor. */ 667 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 668 rcar_du_vsp_disable(rcrtc); 669 670 if (rcrtc->cmm) 671 rcar_cmm_disable(rcrtc->cmm); 672 673 /* 674 * Select switch sync mode. This stops display operation and configures 675 * the HSYNC and VSYNC signals as inputs. 676 * 677 * TODO: Find another way to stop the display for DUs that don't support 678 * TVM sync. 679 */ 680 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC)) 681 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK, 682 DSYSR_TVM_SWITCH); 683 684 rcar_du_group_start_stop(rcrtc->group, false); 685} 686 687/* ----------------------------------------------------------------------------- 688 * CRTC Functions 689 */ 690 691static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, 692 struct drm_atomic_state *state) 693{ 694 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, 695 crtc); 696 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state); 697 struct drm_encoder *encoder; 698 int ret; 699 700 ret = rcar_du_cmm_check(crtc, crtc_state); 701 if (ret) 702 return ret; 703 704 /* Store the routes from the CRTC output to the DU outputs. */ 705 rstate->outputs = 0; 706 707 drm_for_each_encoder_mask(encoder, crtc->dev, 708 crtc_state->encoder_mask) { 709 struct rcar_du_encoder *renc; 710 711 /* Skip the writeback encoder. */ 712 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) 713 continue; 714 715 renc = to_rcar_encoder(encoder); 716 rstate->outputs |= BIT(renc->output); 717 } 718 719 return 0; 720} 721 722static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, 723 struct drm_atomic_state *state) 724{ 725 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 726 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state); 727 struct rcar_du_device *rcdu = rcrtc->dev; 728 729 if (rcrtc->cmm) 730 rcar_cmm_enable(rcrtc->cmm); 731 rcar_du_crtc_get(rcrtc); 732 733 /* 734 * On D3/E3 the dot clock is provided by the LVDS encoder attached to 735 * the DU channel. We need to enable its clock output explicitly if 736 * the LVDS output is disabled. 737 */ 738 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) && 739 rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) { 740 struct drm_bridge *bridge = rcdu->lvds[rcrtc->index]; 741 const struct drm_display_mode *mode = 742 &crtc->state->adjusted_mode; 743 744 rcar_lvds_clk_enable(bridge, mode->clock * 1000); 745 } 746 747 rcar_du_crtc_start(rcrtc); 748 749 /* 750 * TODO: The chip manual indicates that CMM tables should be written 751 * after the DU channel has been activated. Investigate the impact 752 * of this restriction on the first displayed frame. 753 */ 754 rcar_du_cmm_setup(crtc); 755} 756 757static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, 758 struct drm_atomic_state *state) 759{ 760 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, 761 crtc); 762 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 763 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state); 764 struct rcar_du_device *rcdu = rcrtc->dev; 765 766 rcar_du_crtc_stop(rcrtc); 767 rcar_du_crtc_put(rcrtc); 768 769 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) && 770 rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) { 771 struct drm_bridge *bridge = rcdu->lvds[rcrtc->index]; 772 773 /* 774 * Disable the LVDS clock output, see 775 * rcar_du_crtc_atomic_enable(). 776 */ 777 rcar_lvds_clk_disable(bridge); 778 } 779 780 spin_lock_irq(&crtc->dev->event_lock); 781 if (crtc->state->event) { 782 drm_crtc_send_vblank_event(crtc, crtc->state->event); 783 crtc->state->event = NULL; 784 } 785 spin_unlock_irq(&crtc->dev->event_lock); 786} 787 788static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, 789 struct drm_atomic_state *state) 790{ 791 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 792 793 WARN_ON(!crtc->state->enable); 794 795 /* 796 * If a mode set is in progress we can be called with the CRTC disabled. 797 * We thus need to first get and setup the CRTC in order to configure 798 * planes. We must *not* put the CRTC in .atomic_flush(), as it must be 799 * kept awake until the .atomic_enable() call that will follow. The get 800 * operation in .atomic_enable() will in that case be a no-op, and the 801 * CRTC will be put later in .atomic_disable(). 802 * 803 * If a mode set is not in progress the CRTC is enabled, and the 804 * following get call will be a no-op. There is thus no need to balance 805 * it in .atomic_flush() either. 806 */ 807 rcar_du_crtc_get(rcrtc); 808 809 /* If the active state changed, we let .atomic_enable handle CMM. */ 810 if (crtc->state->color_mgmt_changed && !crtc->state->active_changed) 811 rcar_du_cmm_setup(crtc); 812 813 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 814 rcar_du_vsp_atomic_begin(rcrtc); 815} 816 817static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, 818 struct drm_atomic_state *state) 819{ 820 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 821 struct drm_device *dev = rcrtc->crtc.dev; 822 unsigned long flags; 823 824 rcar_du_crtc_update_planes(rcrtc); 825 826 if (crtc->state->event) { 827 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 828 829 spin_lock_irqsave(&dev->event_lock, flags); 830 rcrtc->event = crtc->state->event; 831 crtc->state->event = NULL; 832 spin_unlock_irqrestore(&dev->event_lock, flags); 833 } 834 835 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) 836 rcar_du_vsp_atomic_flush(rcrtc); 837} 838 839static enum drm_mode_status 840rcar_du_crtc_mode_valid(struct drm_crtc *crtc, 841 const struct drm_display_mode *mode) 842{ 843 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 844 struct rcar_du_device *rcdu = rcrtc->dev; 845 bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; 846 unsigned int min_sync_porch; 847 unsigned int vbp; 848 849 if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) 850 return MODE_NO_INTERLACE; 851 852 /* 853 * The hardware requires a minimum combined horizontal sync and back 854 * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is 855 * used), and a minimum vertical back porch of 3 lines. 856 */ 857 min_sync_porch = 20; 858 if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) 859 min_sync_porch += 25; 860 861 if (mode->htotal - mode->hsync_start < min_sync_porch) 862 return MODE_HBLANK_NARROW; 863 864 vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1); 865 if (vbp < 3) 866 return MODE_VBLANK_NARROW; 867 868 return MODE_OK; 869} 870 871static const struct drm_crtc_helper_funcs crtc_helper_funcs = { 872 .atomic_check = rcar_du_crtc_atomic_check, 873 .atomic_begin = rcar_du_crtc_atomic_begin, 874 .atomic_flush = rcar_du_crtc_atomic_flush, 875 .atomic_enable = rcar_du_crtc_atomic_enable, 876 .atomic_disable = rcar_du_crtc_atomic_disable, 877 .mode_valid = rcar_du_crtc_mode_valid, 878}; 879 880static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc) 881{ 882 struct rcar_du_device *rcdu = rcrtc->dev; 883 const char **sources; 884 unsigned int count; 885 int i = -1; 886 887 /* CRC available only on Gen3 HW. */ 888 if (rcdu->info->gen < 3) 889 return; 890 891 /* Reserve 1 for "auto" source. */ 892 count = rcrtc->vsp->num_planes + 1; 893 894 sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL); 895 if (!sources) 896 return; 897 898 sources[0] = kstrdup("auto", GFP_KERNEL); 899 if (!sources[0]) 900 goto error; 901 902 for (i = 0; i < rcrtc->vsp->num_planes; ++i) { 903 struct drm_plane *plane = &rcrtc->vsp->planes[i].plane; 904 char name[16]; 905 906 sprintf(name, "plane%u", plane->base.id); 907 sources[i + 1] = kstrdup(name, GFP_KERNEL); 908 if (!sources[i + 1]) 909 goto error; 910 } 911 912 rcrtc->sources = sources; 913 rcrtc->sources_count = count; 914 return; 915 916error: 917 while (i >= 0) { 918 kfree(sources[i]); 919 i--; 920 } 921 kfree(sources); 922} 923 924static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc) 925{ 926 unsigned int i; 927 928 if (!rcrtc->sources) 929 return; 930 931 for (i = 0; i < rcrtc->sources_count; i++) 932 kfree(rcrtc->sources[i]); 933 kfree(rcrtc->sources); 934 935 rcrtc->sources = NULL; 936 rcrtc->sources_count = 0; 937} 938 939static struct drm_crtc_state * 940rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc) 941{ 942 struct rcar_du_crtc_state *state; 943 struct rcar_du_crtc_state *copy; 944 945 if (WARN_ON(!crtc->state)) 946 return NULL; 947 948 state = to_rcar_crtc_state(crtc->state); 949 copy = kmemdup(state, sizeof(*state), GFP_KERNEL); 950 if (copy == NULL) 951 return NULL; 952 953 __drm_atomic_helper_crtc_duplicate_state(crtc, ©->state); 954 955 return ©->state; 956} 957 958static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc, 959 struct drm_crtc_state *state) 960{ 961 __drm_atomic_helper_crtc_destroy_state(state); 962 kfree(to_rcar_crtc_state(state)); 963} 964 965static void rcar_du_crtc_cleanup(struct drm_crtc *crtc) 966{ 967 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 968 969 rcar_du_crtc_crc_cleanup(rcrtc); 970 971 return drm_crtc_cleanup(crtc); 972} 973 974static void rcar_du_crtc_reset(struct drm_crtc *crtc) 975{ 976 struct rcar_du_crtc_state *state; 977 978 if (crtc->state) { 979 rcar_du_crtc_atomic_destroy_state(crtc, crtc->state); 980 crtc->state = NULL; 981 } 982 983 state = kzalloc(sizeof(*state), GFP_KERNEL); 984 if (state == NULL) 985 return; 986 987 state->crc.source = VSP1_DU_CRC_NONE; 988 state->crc.index = 0; 989 990 __drm_atomic_helper_crtc_reset(crtc, &state->state); 991} 992 993static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc) 994{ 995 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 996 997 rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL); 998 rcar_du_crtc_set(rcrtc, DIER, DIER_VBE); 999 rcrtc->vblank_enable = true; 1000 1001 return 0; 1002} 1003 1004static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc) 1005{ 1006 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 1007 1008 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE); 1009 rcrtc->vblank_enable = false; 1010} 1011 1012static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc, 1013 const char *source_name, 1014 enum vsp1_du_crc_source *source) 1015{ 1016 unsigned int index; 1017 int ret; 1018 1019 /* 1020 * Parse the source name. Supported values are "plane%u" to compute the 1021 * CRC on an input plane (%u is the plane ID), and "auto" to compute the 1022 * CRC on the composer (VSP) output. 1023 */ 1024 1025 if (!source_name) { 1026 *source = VSP1_DU_CRC_NONE; 1027 return 0; 1028 } else if (!strcmp(source_name, "auto")) { 1029 *source = VSP1_DU_CRC_OUTPUT; 1030 return 0; 1031 } else if (strstarts(source_name, "plane")) { 1032 unsigned int i; 1033 1034 *source = VSP1_DU_CRC_PLANE; 1035 1036 ret = kstrtouint(source_name + strlen("plane"), 10, &index); 1037 if (ret < 0) 1038 return ret; 1039 1040 for (i = 0; i < rcrtc->vsp->num_planes; ++i) { 1041 if (index == rcrtc->vsp->planes[i].plane.base.id) 1042 return i; 1043 } 1044 } 1045 1046 return -EINVAL; 1047} 1048 1049static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc, 1050 const char *source_name, 1051 size_t *values_cnt) 1052{ 1053 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 1054 enum vsp1_du_crc_source source; 1055 1056 if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) { 1057 DRM_DEBUG_DRIVER("unknown source %s\n", source_name); 1058 return -EINVAL; 1059 } 1060 1061 *values_cnt = 1; 1062 return 0; 1063} 1064 1065static const char *const * 1066rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count) 1067{ 1068 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 1069 1070 *count = rcrtc->sources_count; 1071 return rcrtc->sources; 1072} 1073 1074static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc, 1075 const char *source_name) 1076{ 1077 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); 1078 struct drm_modeset_acquire_ctx ctx; 1079 struct drm_crtc_state *crtc_state; 1080 struct drm_atomic_state *state; 1081 enum vsp1_du_crc_source source; 1082 unsigned int index; 1083 int ret; 1084 1085 ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source); 1086 if (ret < 0) 1087 return ret; 1088 1089 index = ret; 1090 1091 /* Perform an atomic commit to set the CRC source. */ 1092 drm_modeset_acquire_init(&ctx, 0); 1093 1094 state = drm_atomic_state_alloc(crtc->dev); 1095 if (!state) { 1096 ret = -ENOMEM; 1097 goto unlock; 1098 } 1099 1100 state->acquire_ctx = &ctx; 1101 1102retry: 1103 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1104 if (!IS_ERR(crtc_state)) { 1105 struct rcar_du_crtc_state *rcrtc_state; 1106 1107 rcrtc_state = to_rcar_crtc_state(crtc_state); 1108 rcrtc_state->crc.source = source; 1109 rcrtc_state->crc.index = index; 1110 1111 ret = drm_atomic_commit(state); 1112 } else { 1113 ret = PTR_ERR(crtc_state); 1114 } 1115 1116 if (ret == -EDEADLK) { 1117 drm_atomic_state_clear(state); 1118 drm_modeset_backoff(&ctx); 1119 goto retry; 1120 } 1121 1122 drm_atomic_state_put(state); 1123 1124unlock: 1125 drm_modeset_drop_locks(&ctx); 1126 drm_modeset_acquire_fini(&ctx); 1127 1128 return ret; 1129} 1130 1131static const struct drm_crtc_funcs crtc_funcs_gen2 = { 1132 .reset = rcar_du_crtc_reset, 1133 .destroy = drm_crtc_cleanup, 1134 .set_config = drm_atomic_helper_set_config, 1135 .page_flip = drm_atomic_helper_page_flip, 1136 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state, 1137 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state, 1138 .enable_vblank = rcar_du_crtc_enable_vblank, 1139 .disable_vblank = rcar_du_crtc_disable_vblank, 1140}; 1141 1142static const struct drm_crtc_funcs crtc_funcs_gen3 = { 1143 .reset = rcar_du_crtc_reset, 1144 .destroy = rcar_du_crtc_cleanup, 1145 .set_config = drm_atomic_helper_set_config, 1146 .page_flip = drm_atomic_helper_page_flip, 1147 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state, 1148 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state, 1149 .enable_vblank = rcar_du_crtc_enable_vblank, 1150 .disable_vblank = rcar_du_crtc_disable_vblank, 1151 .set_crc_source = rcar_du_crtc_set_crc_source, 1152 .verify_crc_source = rcar_du_crtc_verify_crc_source, 1153 .get_crc_sources = rcar_du_crtc_get_crc_sources, 1154}; 1155 1156/* ----------------------------------------------------------------------------- 1157 * Interrupt Handling 1158 */ 1159 1160static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) 1161{ 1162 struct rcar_du_crtc *rcrtc = arg; 1163 struct rcar_du_device *rcdu = rcrtc->dev; 1164 irqreturn_t ret = IRQ_NONE; 1165 u32 status; 1166 1167 spin_lock(&rcrtc->vblank_lock); 1168 1169 status = rcar_du_crtc_read(rcrtc, DSSR); 1170 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); 1171 1172 if (status & DSSR_VBK) { 1173 /* 1174 * Wake up the vblank wait if the counter reaches 0. This must 1175 * be protected by the vblank_lock to avoid races in 1176 * rcar_du_crtc_disable_planes(). 1177 */ 1178 if (rcrtc->vblank_count) { 1179 if (--rcrtc->vblank_count == 0) 1180 wake_up(&rcrtc->vblank_wait); 1181 } 1182 } 1183 1184 spin_unlock(&rcrtc->vblank_lock); 1185 1186 if (status & DSSR_VBK) { 1187 if (rcdu->info->gen < 3) { 1188 drm_crtc_handle_vblank(&rcrtc->crtc); 1189 rcar_du_crtc_finish_page_flip(rcrtc); 1190 } 1191 1192 ret = IRQ_HANDLED; 1193 } 1194 1195 return ret; 1196} 1197 1198/* ----------------------------------------------------------------------------- 1199 * Initialization 1200 */ 1201 1202int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, 1203 unsigned int hwindex) 1204{ 1205 static const unsigned int mmio_offsets[] = { 1206 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET 1207 }; 1208 1209 struct rcar_du_device *rcdu = rgrp->dev; 1210 struct platform_device *pdev = to_platform_device(rcdu->dev); 1211 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex]; 1212 struct drm_crtc *crtc = &rcrtc->crtc; 1213 struct drm_plane *primary; 1214 unsigned int irqflags; 1215 struct clk *clk; 1216 char clk_name[9]; 1217 char *name; 1218 int irq; 1219 int ret; 1220 1221 /* Get the CRTC clock and the optional external clock. */ 1222 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_CLOCK)) { 1223 sprintf(clk_name, "du.%u", hwindex); 1224 name = clk_name; 1225 } else { 1226 name = NULL; 1227 } 1228 1229 rcrtc->clock = devm_clk_get(rcdu->dev, name); 1230 if (IS_ERR(rcrtc->clock)) { 1231 dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex); 1232 return PTR_ERR(rcrtc->clock); 1233 } 1234 1235 sprintf(clk_name, "dclkin.%u", hwindex); 1236 clk = devm_clk_get(rcdu->dev, clk_name); 1237 if (!IS_ERR(clk)) { 1238 rcrtc->extclock = clk; 1239 } else if (PTR_ERR(clk) == -EPROBE_DEFER) { 1240 return -EPROBE_DEFER; 1241 } else if (rcdu->info->dpll_mask & BIT(hwindex)) { 1242 /* 1243 * DU channels that have a display PLL can't use the internal 1244 * system clock and thus require an external clock. 1245 */ 1246 ret = PTR_ERR(clk); 1247 dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret); 1248 return ret; 1249 } 1250 1251 init_waitqueue_head(&rcrtc->flip_wait); 1252 init_waitqueue_head(&rcrtc->vblank_wait); 1253 spin_lock_init(&rcrtc->vblank_lock); 1254 1255 rcrtc->dev = rcdu; 1256 rcrtc->group = rgrp; 1257 rcrtc->mmio_offset = mmio_offsets[hwindex]; 1258 rcrtc->index = hwindex; 1259 rcrtc->dsysr = rcrtc->index % 2 ? 0 : DSYSR_DRES; 1260 1261 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_TVM_SYNC)) 1262 rcrtc->dsysr |= DSYSR_TVM_TVSYNC; 1263 1264 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) 1265 primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane; 1266 else 1267 primary = &rgrp->planes[swindex % 2].plane; 1268 1269 ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL, 1270 rcdu->info->gen <= 2 ? 1271 &crtc_funcs_gen2 : &crtc_funcs_gen3, 1272 NULL); 1273 if (ret < 0) 1274 return ret; 1275 1276 /* CMM might be disabled for this CRTC. */ 1277 if (rcdu->cmms[swindex]) { 1278 rcrtc->cmm = rcdu->cmms[swindex]; 1279 rgrp->cmms_mask |= BIT(hwindex % 2); 1280 1281 drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE); 1282 drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE); 1283 } 1284 1285 drm_crtc_helper_add(crtc, &crtc_helper_funcs); 1286 1287 /* Register the interrupt handler. */ 1288 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ)) { 1289 /* The IRQ's are associated with the CRTC (sw)index. */ 1290 irq = platform_get_irq(pdev, swindex); 1291 irqflags = 0; 1292 } else { 1293 irq = platform_get_irq(pdev, 0); 1294 irqflags = IRQF_SHARED; 1295 } 1296 1297 if (irq < 0) { 1298 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex); 1299 return irq; 1300 } 1301 1302 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags, 1303 dev_name(rcdu->dev), rcrtc); 1304 if (ret < 0) { 1305 dev_err(rcdu->dev, 1306 "failed to register IRQ for CRTC %u\n", swindex); 1307 return ret; 1308 } 1309 1310 rcar_du_crtc_crc_init(rcrtc); 1311 1312 return 0; 1313}