dpu_hw_interrupts.c (14306B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. 3 */ 4 5#include <linux/bitops.h> 6#include <linux/debugfs.h> 7#include <linux/slab.h> 8 9#include "dpu_core_irq.h" 10#include "dpu_kms.h" 11#include "dpu_hw_interrupts.h" 12#include "dpu_hw_util.h" 13#include "dpu_hw_mdss.h" 14#include "dpu_trace.h" 15 16/* 17 * Register offsets in MDSS register file for the interrupt registers 18 * w.r.t. to the MDP base 19 */ 20#define MDP_SSPP_TOP0_OFF 0x0 21#define MDP_INTF_0_OFF 0x6A000 22#define MDP_INTF_1_OFF 0x6A800 23#define MDP_INTF_2_OFF 0x6B000 24#define MDP_INTF_3_OFF 0x6B800 25#define MDP_INTF_4_OFF 0x6C000 26#define MDP_INTF_5_OFF 0x6C800 27#define MDP_AD4_0_OFF 0x7C000 28#define MDP_AD4_1_OFF 0x7D000 29#define MDP_AD4_INTR_EN_OFF 0x41c 30#define MDP_AD4_INTR_CLEAR_OFF 0x424 31#define MDP_AD4_INTR_STATUS_OFF 0x420 32#define MDP_INTF_0_OFF_REV_7xxx 0x34000 33#define MDP_INTF_1_OFF_REV_7xxx 0x35000 34#define MDP_INTF_2_OFF_REV_7xxx 0x36000 35#define MDP_INTF_3_OFF_REV_7xxx 0x37000 36#define MDP_INTF_4_OFF_REV_7xxx 0x38000 37#define MDP_INTF_5_OFF_REV_7xxx 0x39000 38 39/** 40 * struct dpu_intr_reg - array of DPU register sets 41 * @clr_off: offset to CLEAR reg 42 * @en_off: offset to ENABLE reg 43 * @status_off: offset to STATUS reg 44 */ 45struct dpu_intr_reg { 46 u32 clr_off; 47 u32 en_off; 48 u32 status_off; 49}; 50 51/* 52 * struct dpu_intr_reg - List of DPU interrupt registers 53 * 54 * When making changes be sure to sync with dpu_hw_intr_reg 55 */ 56static const struct dpu_intr_reg dpu_intr_set[] = { 57 [MDP_SSPP_TOP0_INTR] = { 58 MDP_SSPP_TOP0_OFF+INTR_CLEAR, 59 MDP_SSPP_TOP0_OFF+INTR_EN, 60 MDP_SSPP_TOP0_OFF+INTR_STATUS 61 }, 62 [MDP_SSPP_TOP0_INTR2] = { 63 MDP_SSPP_TOP0_OFF+INTR2_CLEAR, 64 MDP_SSPP_TOP0_OFF+INTR2_EN, 65 MDP_SSPP_TOP0_OFF+INTR2_STATUS 66 }, 67 [MDP_SSPP_TOP0_HIST_INTR] = { 68 MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR, 69 MDP_SSPP_TOP0_OFF+HIST_INTR_EN, 70 MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS 71 }, 72 [MDP_INTF0_INTR] = { 73 MDP_INTF_0_OFF+INTF_INTR_CLEAR, 74 MDP_INTF_0_OFF+INTF_INTR_EN, 75 MDP_INTF_0_OFF+INTF_INTR_STATUS 76 }, 77 [MDP_INTF1_INTR] = { 78 MDP_INTF_1_OFF+INTF_INTR_CLEAR, 79 MDP_INTF_1_OFF+INTF_INTR_EN, 80 MDP_INTF_1_OFF+INTF_INTR_STATUS 81 }, 82 [MDP_INTF2_INTR] = { 83 MDP_INTF_2_OFF+INTF_INTR_CLEAR, 84 MDP_INTF_2_OFF+INTF_INTR_EN, 85 MDP_INTF_2_OFF+INTF_INTR_STATUS 86 }, 87 [MDP_INTF3_INTR] = { 88 MDP_INTF_3_OFF+INTF_INTR_CLEAR, 89 MDP_INTF_3_OFF+INTF_INTR_EN, 90 MDP_INTF_3_OFF+INTF_INTR_STATUS 91 }, 92 [MDP_INTF4_INTR] = { 93 MDP_INTF_4_OFF+INTF_INTR_CLEAR, 94 MDP_INTF_4_OFF+INTF_INTR_EN, 95 MDP_INTF_4_OFF+INTF_INTR_STATUS 96 }, 97 [MDP_INTF5_INTR] = { 98 MDP_INTF_5_OFF+INTF_INTR_CLEAR, 99 MDP_INTF_5_OFF+INTF_INTR_EN, 100 MDP_INTF_5_OFF+INTF_INTR_STATUS 101 }, 102 [MDP_AD4_0_INTR] = { 103 MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF, 104 MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF, 105 MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF, 106 }, 107 [MDP_AD4_1_INTR] = { 108 MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF, 109 MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF, 110 MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF, 111 }, 112 [MDP_INTF0_7xxx_INTR] = { 113 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR, 114 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN, 115 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS 116 }, 117 [MDP_INTF1_7xxx_INTR] = { 118 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR, 119 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN, 120 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS 121 }, 122 [MDP_INTF2_7xxx_INTR] = { 123 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR, 124 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN, 125 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS 126 }, 127 [MDP_INTF3_7xxx_INTR] = { 128 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR, 129 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN, 130 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS 131 }, 132 [MDP_INTF4_7xxx_INTR] = { 133 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR, 134 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN, 135 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS 136 }, 137 [MDP_INTF5_7xxx_INTR] = { 138 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR, 139 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN, 140 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS 141 }, 142}; 143 144#define DPU_IRQ_REG(irq_idx) (irq_idx / 32) 145#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32)) 146 147/** 148 * dpu_core_irq_callback_handler - dispatch core interrupts 149 * @dpu_kms: Pointer to DPU's KMS structure 150 * @irq_idx: interrupt index 151 */ 152static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx) 153{ 154 VERB("irq_idx=%d\n", irq_idx); 155 156 if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb) 157 DRM_ERROR("no registered cb, idx:%d\n", irq_idx); 158 159 atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count); 160 161 /* 162 * Perform registered function callback 163 */ 164 dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx); 165} 166 167irqreturn_t dpu_core_irq(struct msm_kms *kms) 168{ 169 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 170 struct dpu_hw_intr *intr = dpu_kms->hw_intr; 171 int reg_idx; 172 int irq_idx; 173 u32 irq_status; 174 u32 enable_mask; 175 int bit; 176 unsigned long irq_flags; 177 178 if (!intr) 179 return IRQ_NONE; 180 181 spin_lock_irqsave(&intr->irq_lock, irq_flags); 182 for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) { 183 if (!test_bit(reg_idx, &intr->irq_mask)) 184 continue; 185 186 /* Read interrupt status */ 187 irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off); 188 189 /* Read enable mask */ 190 enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off); 191 192 /* and clear the interrupt */ 193 if (irq_status) 194 DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, 195 irq_status); 196 197 /* Finally update IRQ status based on enable mask */ 198 irq_status &= enable_mask; 199 200 if (!irq_status) 201 continue; 202 203 /* 204 * Search through matching intr status. 205 */ 206 while ((bit = ffs(irq_status)) != 0) { 207 irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1); 208 209 dpu_core_irq_callback_handler(dpu_kms, irq_idx); 210 211 /* 212 * When callback finish, clear the irq_status 213 * with the matching mask. Once irq_status 214 * is all cleared, the search can be stopped. 215 */ 216 irq_status &= ~BIT(bit - 1); 217 } 218 } 219 220 /* ensure register writes go through */ 221 wmb(); 222 223 spin_unlock_irqrestore(&intr->irq_lock, irq_flags); 224 225 return IRQ_HANDLED; 226} 227 228static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) 229{ 230 int reg_idx; 231 const struct dpu_intr_reg *reg; 232 const char *dbgstr = NULL; 233 uint32_t cache_irq_mask; 234 235 if (!intr) 236 return -EINVAL; 237 238 if (irq_idx < 0 || irq_idx >= intr->total_irqs) { 239 pr_err("invalid IRQ index: [%d]\n", irq_idx); 240 return -EINVAL; 241 } 242 243 /* 244 * The cache_irq_mask and hardware RMW operations needs to be done 245 * under irq_lock and it's the caller's responsibility to ensure that's 246 * held. 247 */ 248 assert_spin_locked(&intr->irq_lock); 249 250 reg_idx = DPU_IRQ_REG(irq_idx); 251 reg = &dpu_intr_set[reg_idx]; 252 253 cache_irq_mask = intr->cache_irq_mask[reg_idx]; 254 if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) { 255 dbgstr = "DPU IRQ already set:"; 256 } else { 257 dbgstr = "DPU IRQ enabled:"; 258 259 cache_irq_mask |= DPU_IRQ_MASK(irq_idx); 260 /* Cleaning any pending interrupt */ 261 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx)); 262 /* Enabling interrupts with the new mask */ 263 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask); 264 265 /* ensure register write goes through */ 266 wmb(); 267 268 intr->cache_irq_mask[reg_idx] = cache_irq_mask; 269 } 270 271 pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr, 272 DPU_IRQ_MASK(irq_idx), cache_irq_mask); 273 274 return 0; 275} 276 277static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx) 278{ 279 int reg_idx; 280 const struct dpu_intr_reg *reg; 281 const char *dbgstr = NULL; 282 uint32_t cache_irq_mask; 283 284 if (!intr) 285 return -EINVAL; 286 287 if (irq_idx < 0 || irq_idx >= intr->total_irqs) { 288 pr_err("invalid IRQ index: [%d]\n", irq_idx); 289 return -EINVAL; 290 } 291 292 /* 293 * The cache_irq_mask and hardware RMW operations needs to be done 294 * under irq_lock and it's the caller's responsibility to ensure that's 295 * held. 296 */ 297 assert_spin_locked(&intr->irq_lock); 298 299 reg_idx = DPU_IRQ_REG(irq_idx); 300 reg = &dpu_intr_set[reg_idx]; 301 302 cache_irq_mask = intr->cache_irq_mask[reg_idx]; 303 if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) { 304 dbgstr = "DPU IRQ is already cleared:"; 305 } else { 306 dbgstr = "DPU IRQ mask disable:"; 307 308 cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx); 309 /* Disable interrupts based on the new mask */ 310 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask); 311 /* Cleaning any pending interrupt */ 312 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx)); 313 314 /* ensure register write goes through */ 315 wmb(); 316 317 intr->cache_irq_mask[reg_idx] = cache_irq_mask; 318 } 319 320 pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr, 321 DPU_IRQ_MASK(irq_idx), cache_irq_mask); 322 323 return 0; 324} 325 326static void dpu_clear_irqs(struct dpu_kms *dpu_kms) 327{ 328 struct dpu_hw_intr *intr = dpu_kms->hw_intr; 329 int i; 330 331 if (!intr) 332 return; 333 334 for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { 335 if (test_bit(i, &intr->irq_mask)) 336 DPU_REG_WRITE(&intr->hw, 337 dpu_intr_set[i].clr_off, 0xffffffff); 338 } 339 340 /* ensure register writes go through */ 341 wmb(); 342} 343 344static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms) 345{ 346 struct dpu_hw_intr *intr = dpu_kms->hw_intr; 347 int i; 348 349 if (!intr) 350 return; 351 352 for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) { 353 if (test_bit(i, &intr->irq_mask)) 354 DPU_REG_WRITE(&intr->hw, 355 dpu_intr_set[i].en_off, 0x00000000); 356 } 357 358 /* ensure register writes go through */ 359 wmb(); 360} 361 362u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx) 363{ 364 struct dpu_hw_intr *intr = dpu_kms->hw_intr; 365 int reg_idx; 366 unsigned long irq_flags; 367 u32 intr_status; 368 369 if (!intr) 370 return 0; 371 372 if (irq_idx < 0) { 373 DPU_ERROR("[%pS] invalid irq_idx=%d\n", 374 __builtin_return_address(0), irq_idx); 375 return 0; 376 } 377 378 if (irq_idx < 0 || irq_idx >= intr->total_irqs) { 379 pr_err("invalid IRQ index: [%d]\n", irq_idx); 380 return 0; 381 } 382 383 spin_lock_irqsave(&intr->irq_lock, irq_flags); 384 385 reg_idx = DPU_IRQ_REG(irq_idx); 386 intr_status = DPU_REG_READ(&intr->hw, 387 dpu_intr_set[reg_idx].status_off) & 388 DPU_IRQ_MASK(irq_idx); 389 if (intr_status) 390 DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, 391 intr_status); 392 393 /* ensure register writes go through */ 394 wmb(); 395 396 spin_unlock_irqrestore(&intr->irq_lock, irq_flags); 397 398 return intr_status; 399} 400 401static void __intr_offset(struct dpu_mdss_cfg *m, 402 void __iomem *addr, struct dpu_hw_blk_reg_map *hw) 403{ 404 hw->base_off = addr; 405 hw->blk_off = m->mdp[0].base; 406 hw->hwversion = m->hwversion; 407} 408 409struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr, 410 struct dpu_mdss_cfg *m) 411{ 412 struct dpu_hw_intr *intr; 413 int nirq = MDP_INTR_MAX * 32; 414 415 if (!addr || !m) 416 return ERR_PTR(-EINVAL); 417 418 intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL); 419 if (!intr) 420 return ERR_PTR(-ENOMEM); 421 422 __intr_offset(m, addr, &intr->hw); 423 424 intr->total_irqs = nirq; 425 426 intr->irq_mask = m->mdss_irqs; 427 428 spin_lock_init(&intr->irq_lock); 429 430 return intr; 431} 432 433void dpu_hw_intr_destroy(struct dpu_hw_intr *intr) 434{ 435 kfree(intr); 436} 437 438int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx, 439 void (*irq_cb)(void *arg, int irq_idx), 440 void *irq_arg) 441{ 442 unsigned long irq_flags; 443 int ret; 444 445 if (!irq_cb) { 446 DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb); 447 return -EINVAL; 448 } 449 450 if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { 451 DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); 452 return -EINVAL; 453 } 454 455 VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); 456 457 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); 458 459 if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) { 460 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); 461 462 return -EBUSY; 463 } 464 465 trace_dpu_core_irq_register_callback(irq_idx, irq_cb); 466 dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg; 467 dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb; 468 469 ret = dpu_hw_intr_enable_irq_locked( 470 dpu_kms->hw_intr, 471 irq_idx); 472 if (ret) 473 DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n", 474 irq_idx); 475 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); 476 477 trace_dpu_irq_register_success(irq_idx); 478 479 return 0; 480} 481 482int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx) 483{ 484 unsigned long irq_flags; 485 int ret; 486 487 if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) { 488 DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx); 489 return -EINVAL; 490 } 491 492 VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx); 493 494 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); 495 trace_dpu_core_irq_unregister_callback(irq_idx); 496 497 ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx); 498 if (ret) 499 DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n", 500 irq_idx, ret); 501 502 dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL; 503 dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL; 504 505 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); 506 507 trace_dpu_irq_unregister_success(irq_idx); 508 509 return 0; 510} 511 512#ifdef CONFIG_DEBUG_FS 513static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v) 514{ 515 struct dpu_kms *dpu_kms = s->private; 516 unsigned long irq_flags; 517 int i, irq_count; 518 void *cb; 519 520 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) { 521 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags); 522 irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count); 523 cb = dpu_kms->hw_intr->irq_tbl[i].cb; 524 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags); 525 526 if (irq_count || cb) 527 seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb); 528 } 529 530 return 0; 531} 532 533DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq); 534 535void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms, 536 struct dentry *parent) 537{ 538 debugfs_create_file("core_irq", 0600, parent, dpu_kms, 539 &dpu_debugfs_core_irq_fops); 540} 541#endif 542 543void dpu_core_irq_preinstall(struct msm_kms *kms) 544{ 545 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 546 int i; 547 548 pm_runtime_get_sync(&dpu_kms->pdev->dev); 549 dpu_clear_irqs(dpu_kms); 550 dpu_disable_all_irqs(dpu_kms); 551 pm_runtime_put_sync(&dpu_kms->pdev->dev); 552 553 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) 554 atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0); 555} 556 557void dpu_core_irq_uninstall(struct msm_kms *kms) 558{ 559 struct dpu_kms *dpu_kms = to_dpu_kms(kms); 560 int i; 561 562 if (!dpu_kms->hw_intr) 563 return; 564 565 pm_runtime_get_sync(&dpu_kms->pdev->dev); 566 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) 567 if (dpu_kms->hw_intr->irq_tbl[i].cb) 568 DPU_ERROR("irq_idx=%d still enabled/registered\n", i); 569 570 dpu_clear_irqs(dpu_kms); 571 dpu_disable_all_irqs(dpu_kms); 572 pm_runtime_put_sync(&dpu_kms->pdev->dev); 573}