regmap-irq.c (29922B)
1// SPDX-License-Identifier: GPL-2.0 2// 3// regmap based irq_chip 4// 5// Copyright 2011 Wolfson Microelectronics plc 6// 7// Author: Mark Brown <broonie@opensource.wolfsonmicro.com> 8 9#include <linux/device.h> 10#include <linux/export.h> 11#include <linux/interrupt.h> 12#include <linux/irq.h> 13#include <linux/irqdomain.h> 14#include <linux/pm_runtime.h> 15#include <linux/regmap.h> 16#include <linux/slab.h> 17 18#include "internal.h" 19 20struct regmap_irq_chip_data { 21 struct mutex lock; 22 struct irq_chip irq_chip; 23 24 struct regmap *map; 25 const struct regmap_irq_chip *chip; 26 27 int irq_base; 28 struct irq_domain *domain; 29 30 int irq; 31 int wake_count; 32 33 void *status_reg_buf; 34 unsigned int *main_status_buf; 35 unsigned int *status_buf; 36 unsigned int *mask_buf; 37 unsigned int *mask_buf_def; 38 unsigned int *wake_buf; 39 unsigned int *type_buf; 40 unsigned int *type_buf_def; 41 unsigned int **virt_buf; 42 43 unsigned int irq_reg_stride; 44 unsigned int type_reg_stride; 45 46 bool clear_status:1; 47}; 48 49static int sub_irq_reg(struct regmap_irq_chip_data *data, 50 unsigned int base_reg, int i) 51{ 52 const struct regmap_irq_chip *chip = data->chip; 53 struct regmap *map = data->map; 54 struct regmap_irq_sub_irq_map *subreg; 55 unsigned int offset; 56 int reg = 0; 57 58 if (!chip->sub_reg_offsets || !chip->not_fixed_stride) { 59 /* Assume linear mapping */ 60 reg = base_reg + (i * map->reg_stride * data->irq_reg_stride); 61 } else { 62 subreg = &chip->sub_reg_offsets[i]; 63 offset = subreg->offset[0]; 64 reg = base_reg + offset; 65 } 66 67 return reg; 68} 69 70static inline const 71struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, 72 int irq) 73{ 74 return &data->chip->irqs[irq]; 75} 76 77static void regmap_irq_lock(struct irq_data *data) 78{ 79 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 80 81 mutex_lock(&d->lock); 82} 83 84static int regmap_irq_update_bits(struct regmap_irq_chip_data *d, 85 unsigned int reg, unsigned int mask, 86 unsigned int val) 87{ 88 if (d->chip->mask_writeonly) 89 return regmap_write_bits(d->map, reg, mask, val); 90 else 91 return regmap_update_bits(d->map, reg, mask, val); 92} 93 94static void regmap_irq_sync_unlock(struct irq_data *data) 95{ 96 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 97 struct regmap *map = d->map; 98 int i, j, ret; 99 u32 reg; 100 u32 unmask_offset; 101 u32 val; 102 103 if (d->chip->runtime_pm) { 104 ret = pm_runtime_get_sync(map->dev); 105 if (ret < 0) 106 dev_err(map->dev, "IRQ sync failed to resume: %d\n", 107 ret); 108 } 109 110 if (d->clear_status) { 111 for (i = 0; i < d->chip->num_regs; i++) { 112 reg = sub_irq_reg(d, d->chip->status_base, i); 113 114 ret = regmap_read(map, reg, &val); 115 if (ret) 116 dev_err(d->map->dev, 117 "Failed to clear the interrupt status bits\n"); 118 } 119 120 d->clear_status = false; 121 } 122 123 /* 124 * If there's been a change in the mask write it back to the 125 * hardware. We rely on the use of the regmap core cache to 126 * suppress pointless writes. 127 */ 128 for (i = 0; i < d->chip->num_regs; i++) { 129 if (!d->chip->mask_base) 130 continue; 131 132 reg = sub_irq_reg(d, d->chip->mask_base, i); 133 if (d->chip->mask_invert) { 134 ret = regmap_irq_update_bits(d, reg, 135 d->mask_buf_def[i], ~d->mask_buf[i]); 136 } else if (d->chip->unmask_base) { 137 /* set mask with mask_base register */ 138 ret = regmap_irq_update_bits(d, reg, 139 d->mask_buf_def[i], ~d->mask_buf[i]); 140 if (ret < 0) 141 dev_err(d->map->dev, 142 "Failed to sync unmasks in %x\n", 143 reg); 144 unmask_offset = d->chip->unmask_base - 145 d->chip->mask_base; 146 /* clear mask with unmask_base register */ 147 ret = regmap_irq_update_bits(d, 148 reg + unmask_offset, 149 d->mask_buf_def[i], 150 d->mask_buf[i]); 151 } else { 152 ret = regmap_irq_update_bits(d, reg, 153 d->mask_buf_def[i], d->mask_buf[i]); 154 } 155 if (ret != 0) 156 dev_err(d->map->dev, "Failed to sync masks in %x\n", 157 reg); 158 159 reg = sub_irq_reg(d, d->chip->wake_base, i); 160 if (d->wake_buf) { 161 if (d->chip->wake_invert) 162 ret = regmap_irq_update_bits(d, reg, 163 d->mask_buf_def[i], 164 ~d->wake_buf[i]); 165 else 166 ret = regmap_irq_update_bits(d, reg, 167 d->mask_buf_def[i], 168 d->wake_buf[i]); 169 if (ret != 0) 170 dev_err(d->map->dev, 171 "Failed to sync wakes in %x: %d\n", 172 reg, ret); 173 } 174 175 if (!d->chip->init_ack_masked) 176 continue; 177 /* 178 * Ack all the masked interrupts unconditionally, 179 * OR if there is masked interrupt which hasn't been Acked, 180 * it'll be ignored in irq handler, then may introduce irq storm 181 */ 182 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) { 183 reg = sub_irq_reg(d, d->chip->ack_base, i); 184 185 /* some chips ack by write 0 */ 186 if (d->chip->ack_invert) 187 ret = regmap_write(map, reg, ~d->mask_buf[i]); 188 else 189 ret = regmap_write(map, reg, d->mask_buf[i]); 190 if (d->chip->clear_ack) { 191 if (d->chip->ack_invert && !ret) 192 ret = regmap_write(map, reg, UINT_MAX); 193 else if (!ret) 194 ret = regmap_write(map, reg, 0); 195 } 196 if (ret != 0) 197 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", 198 reg, ret); 199 } 200 } 201 202 /* Don't update the type bits if we're using mask bits for irq type. */ 203 if (!d->chip->type_in_mask) { 204 for (i = 0; i < d->chip->num_type_reg; i++) { 205 if (!d->type_buf_def[i]) 206 continue; 207 reg = sub_irq_reg(d, d->chip->type_base, i); 208 if (d->chip->type_invert) 209 ret = regmap_irq_update_bits(d, reg, 210 d->type_buf_def[i], ~d->type_buf[i]); 211 else 212 ret = regmap_irq_update_bits(d, reg, 213 d->type_buf_def[i], d->type_buf[i]); 214 if (ret != 0) 215 dev_err(d->map->dev, "Failed to sync type in %x\n", 216 reg); 217 } 218 } 219 220 if (d->chip->num_virt_regs) { 221 for (i = 0; i < d->chip->num_virt_regs; i++) { 222 for (j = 0; j < d->chip->num_regs; j++) { 223 reg = sub_irq_reg(d, d->chip->virt_reg_base[i], 224 j); 225 ret = regmap_write(map, reg, d->virt_buf[i][j]); 226 if (ret != 0) 227 dev_err(d->map->dev, 228 "Failed to write virt 0x%x: %d\n", 229 reg, ret); 230 } 231 } 232 } 233 234 if (d->chip->runtime_pm) 235 pm_runtime_put(map->dev); 236 237 /* If we've changed our wakeup count propagate it to the parent */ 238 if (d->wake_count < 0) 239 for (i = d->wake_count; i < 0; i++) 240 irq_set_irq_wake(d->irq, 0); 241 else if (d->wake_count > 0) 242 for (i = 0; i < d->wake_count; i++) 243 irq_set_irq_wake(d->irq, 1); 244 245 d->wake_count = 0; 246 247 mutex_unlock(&d->lock); 248} 249 250static void regmap_irq_enable(struct irq_data *data) 251{ 252 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 253 struct regmap *map = d->map; 254 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 255 unsigned int reg = irq_data->reg_offset / map->reg_stride; 256 unsigned int mask, type; 257 258 type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; 259 260 /* 261 * The type_in_mask flag means that the underlying hardware uses 262 * separate mask bits for rising and falling edge interrupts, but 263 * we want to make them into a single virtual interrupt with 264 * configurable edge. 265 * 266 * If the interrupt we're enabling defines the falling or rising 267 * masks then instead of using the regular mask bits for this 268 * interrupt, use the value previously written to the type buffer 269 * at the corresponding offset in regmap_irq_set_type(). 270 */ 271 if (d->chip->type_in_mask && type) 272 mask = d->type_buf[reg] & irq_data->mask; 273 else 274 mask = irq_data->mask; 275 276 if (d->chip->clear_on_unmask) 277 d->clear_status = true; 278 279 d->mask_buf[reg] &= ~mask; 280} 281 282static void regmap_irq_disable(struct irq_data *data) 283{ 284 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 285 struct regmap *map = d->map; 286 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 287 288 d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; 289} 290 291static int regmap_irq_set_type(struct irq_data *data, unsigned int type) 292{ 293 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 294 struct regmap *map = d->map; 295 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 296 int reg; 297 const struct regmap_irq_type *t = &irq_data->type; 298 299 if ((t->types_supported & type) != type) 300 return 0; 301 302 reg = t->type_reg_offset / map->reg_stride; 303 304 if (t->type_reg_mask) 305 d->type_buf[reg] &= ~t->type_reg_mask; 306 else 307 d->type_buf[reg] &= ~(t->type_falling_val | 308 t->type_rising_val | 309 t->type_level_low_val | 310 t->type_level_high_val); 311 switch (type) { 312 case IRQ_TYPE_EDGE_FALLING: 313 d->type_buf[reg] |= t->type_falling_val; 314 break; 315 316 case IRQ_TYPE_EDGE_RISING: 317 d->type_buf[reg] |= t->type_rising_val; 318 break; 319 320 case IRQ_TYPE_EDGE_BOTH: 321 d->type_buf[reg] |= (t->type_falling_val | 322 t->type_rising_val); 323 break; 324 325 case IRQ_TYPE_LEVEL_HIGH: 326 d->type_buf[reg] |= t->type_level_high_val; 327 break; 328 329 case IRQ_TYPE_LEVEL_LOW: 330 d->type_buf[reg] |= t->type_level_low_val; 331 break; 332 default: 333 return -EINVAL; 334 } 335 336 if (d->chip->set_type_virt) 337 return d->chip->set_type_virt(d->virt_buf, type, data->hwirq, 338 reg); 339 340 return 0; 341} 342 343static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) 344{ 345 struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); 346 struct regmap *map = d->map; 347 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 348 349 if (on) { 350 if (d->wake_buf) 351 d->wake_buf[irq_data->reg_offset / map->reg_stride] 352 &= ~irq_data->mask; 353 d->wake_count++; 354 } else { 355 if (d->wake_buf) 356 d->wake_buf[irq_data->reg_offset / map->reg_stride] 357 |= irq_data->mask; 358 d->wake_count--; 359 } 360 361 return 0; 362} 363 364static const struct irq_chip regmap_irq_chip = { 365 .irq_bus_lock = regmap_irq_lock, 366 .irq_bus_sync_unlock = regmap_irq_sync_unlock, 367 .irq_disable = regmap_irq_disable, 368 .irq_enable = regmap_irq_enable, 369 .irq_set_type = regmap_irq_set_type, 370 .irq_set_wake = regmap_irq_set_wake, 371}; 372 373static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, 374 unsigned int b) 375{ 376 const struct regmap_irq_chip *chip = data->chip; 377 struct regmap *map = data->map; 378 struct regmap_irq_sub_irq_map *subreg; 379 int i, ret = 0; 380 381 if (!chip->sub_reg_offsets) { 382 /* Assume linear mapping */ 383 ret = regmap_read(map, chip->status_base + 384 (b * map->reg_stride * data->irq_reg_stride), 385 &data->status_buf[b]); 386 } else { 387 subreg = &chip->sub_reg_offsets[b]; 388 for (i = 0; i < subreg->num_regs; i++) { 389 unsigned int offset = subreg->offset[i]; 390 unsigned int index = offset / map->reg_stride; 391 392 if (chip->not_fixed_stride) 393 ret = regmap_read(map, 394 chip->status_base + offset, 395 &data->status_buf[b]); 396 else 397 ret = regmap_read(map, 398 chip->status_base + offset, 399 &data->status_buf[index]); 400 401 if (ret) 402 break; 403 } 404 } 405 return ret; 406} 407 408static irqreturn_t regmap_irq_thread(int irq, void *d) 409{ 410 struct regmap_irq_chip_data *data = d; 411 const struct regmap_irq_chip *chip = data->chip; 412 struct regmap *map = data->map; 413 int ret, i; 414 bool handled = false; 415 u32 reg; 416 417 if (chip->handle_pre_irq) 418 chip->handle_pre_irq(chip->irq_drv_data); 419 420 if (chip->runtime_pm) { 421 ret = pm_runtime_get_sync(map->dev); 422 if (ret < 0) { 423 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 424 ret); 425 goto exit; 426 } 427 } 428 429 /* 430 * Read only registers with active IRQs if the chip has 'main status 431 * register'. Else read in the statuses, using a single bulk read if 432 * possible in order to reduce the I/O overheads. 433 */ 434 435 if (chip->num_main_regs) { 436 unsigned int max_main_bits; 437 unsigned long size; 438 439 size = chip->num_regs * sizeof(unsigned int); 440 441 max_main_bits = (chip->num_main_status_bits) ? 442 chip->num_main_status_bits : chip->num_regs; 443 /* Clear the status buf as we don't read all status regs */ 444 memset(data->status_buf, 0, size); 445 446 /* We could support bulk read for main status registers 447 * but I don't expect to see devices with really many main 448 * status registers so let's only support single reads for the 449 * sake of simplicity. and add bulk reads only if needed 450 */ 451 for (i = 0; i < chip->num_main_regs; i++) { 452 ret = regmap_read(map, chip->main_status + 453 (i * map->reg_stride 454 * data->irq_reg_stride), 455 &data->main_status_buf[i]); 456 if (ret) { 457 dev_err(map->dev, 458 "Failed to read IRQ status %d\n", 459 ret); 460 goto exit; 461 } 462 } 463 464 /* Read sub registers with active IRQs */ 465 for (i = 0; i < chip->num_main_regs; i++) { 466 unsigned int b; 467 const unsigned long mreg = data->main_status_buf[i]; 468 469 for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { 470 if (i * map->format.val_bytes * 8 + b > 471 max_main_bits) 472 break; 473 ret = read_sub_irq_data(data, b); 474 475 if (ret != 0) { 476 dev_err(map->dev, 477 "Failed to read IRQ status %d\n", 478 ret); 479 goto exit; 480 } 481 } 482 483 } 484 } else if (!map->use_single_read && map->reg_stride == 1 && 485 data->irq_reg_stride == 1) { 486 487 u8 *buf8 = data->status_reg_buf; 488 u16 *buf16 = data->status_reg_buf; 489 u32 *buf32 = data->status_reg_buf; 490 491 BUG_ON(!data->status_reg_buf); 492 493 ret = regmap_bulk_read(map, chip->status_base, 494 data->status_reg_buf, 495 chip->num_regs); 496 if (ret != 0) { 497 dev_err(map->dev, "Failed to read IRQ status: %d\n", 498 ret); 499 goto exit; 500 } 501 502 for (i = 0; i < data->chip->num_regs; i++) { 503 switch (map->format.val_bytes) { 504 case 1: 505 data->status_buf[i] = buf8[i]; 506 break; 507 case 2: 508 data->status_buf[i] = buf16[i]; 509 break; 510 case 4: 511 data->status_buf[i] = buf32[i]; 512 break; 513 default: 514 BUG(); 515 goto exit; 516 } 517 } 518 519 } else { 520 for (i = 0; i < data->chip->num_regs; i++) { 521 unsigned int reg = sub_irq_reg(data, 522 data->chip->status_base, i); 523 ret = regmap_read(map, reg, &data->status_buf[i]); 524 525 if (ret != 0) { 526 dev_err(map->dev, 527 "Failed to read IRQ status: %d\n", 528 ret); 529 goto exit; 530 } 531 } 532 } 533 534 if (chip->status_invert) 535 for (i = 0; i < data->chip->num_regs; i++) 536 data->status_buf[i] = ~data->status_buf[i]; 537 538 /* 539 * Ignore masked IRQs and ack if we need to; we ack early so 540 * there is no race between handling and acknowledging the 541 * interrupt. We assume that typically few of the interrupts 542 * will fire simultaneously so don't worry about overhead from 543 * doing a write per register. 544 */ 545 for (i = 0; i < data->chip->num_regs; i++) { 546 data->status_buf[i] &= ~data->mask_buf[i]; 547 548 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) { 549 reg = sub_irq_reg(data, data->chip->ack_base, i); 550 551 if (chip->ack_invert) 552 ret = regmap_write(map, reg, 553 ~data->status_buf[i]); 554 else 555 ret = regmap_write(map, reg, 556 data->status_buf[i]); 557 if (chip->clear_ack) { 558 if (chip->ack_invert && !ret) 559 ret = regmap_write(map, reg, UINT_MAX); 560 else if (!ret) 561 ret = regmap_write(map, reg, 0); 562 } 563 if (ret != 0) 564 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 565 reg, ret); 566 } 567 } 568 569 for (i = 0; i < chip->num_irqs; i++) { 570 if (data->status_buf[chip->irqs[i].reg_offset / 571 map->reg_stride] & chip->irqs[i].mask) { 572 handle_nested_irq(irq_find_mapping(data->domain, i)); 573 handled = true; 574 } 575 } 576 577exit: 578 if (chip->runtime_pm) 579 pm_runtime_put(map->dev); 580 581 if (chip->handle_post_irq) 582 chip->handle_post_irq(chip->irq_drv_data); 583 584 if (handled) 585 return IRQ_HANDLED; 586 else 587 return IRQ_NONE; 588} 589 590static int regmap_irq_map(struct irq_domain *h, unsigned int virq, 591 irq_hw_number_t hw) 592{ 593 struct regmap_irq_chip_data *data = h->host_data; 594 595 irq_set_chip_data(virq, data); 596 irq_set_chip(virq, &data->irq_chip); 597 irq_set_nested_thread(virq, 1); 598 irq_set_parent(virq, data->irq); 599 irq_set_noprobe(virq); 600 601 return 0; 602} 603 604static const struct irq_domain_ops regmap_domain_ops = { 605 .map = regmap_irq_map, 606 .xlate = irq_domain_xlate_onetwocell, 607}; 608 609/** 610 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling 611 * 612 * @fwnode: The firmware node where the IRQ domain should be added to. 613 * @map: The regmap for the device. 614 * @irq: The IRQ the device uses to signal interrupts. 615 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 616 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 617 * @chip: Configuration for the interrupt controller. 618 * @data: Runtime data structure for the controller, allocated on success. 619 * 620 * Returns 0 on success or an errno on failure. 621 * 622 * In order for this to be efficient the chip really should use a 623 * register cache. The chip driver is responsible for restoring the 624 * register values used by the IRQ controller over suspend and resume. 625 */ 626int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, 627 struct regmap *map, int irq, 628 int irq_flags, int irq_base, 629 const struct regmap_irq_chip *chip, 630 struct regmap_irq_chip_data **data) 631{ 632 struct regmap_irq_chip_data *d; 633 int i; 634 int ret = -ENOMEM; 635 int num_type_reg; 636 u32 reg; 637 u32 unmask_offset; 638 639 if (chip->num_regs <= 0) 640 return -EINVAL; 641 642 if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack)) 643 return -EINVAL; 644 645 for (i = 0; i < chip->num_irqs; i++) { 646 if (chip->irqs[i].reg_offset % map->reg_stride) 647 return -EINVAL; 648 if (chip->irqs[i].reg_offset / map->reg_stride >= 649 chip->num_regs) 650 return -EINVAL; 651 } 652 653 if (chip->not_fixed_stride) { 654 for (i = 0; i < chip->num_regs; i++) 655 if (chip->sub_reg_offsets[i].num_regs != 1) 656 return -EINVAL; 657 } 658 659 if (irq_base) { 660 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); 661 if (irq_base < 0) { 662 dev_warn(map->dev, "Failed to allocate IRQs: %d\n", 663 irq_base); 664 return irq_base; 665 } 666 } 667 668 d = kzalloc(sizeof(*d), GFP_KERNEL); 669 if (!d) 670 return -ENOMEM; 671 672 if (chip->num_main_regs) { 673 d->main_status_buf = kcalloc(chip->num_main_regs, 674 sizeof(unsigned int), 675 GFP_KERNEL); 676 677 if (!d->main_status_buf) 678 goto err_alloc; 679 } 680 681 d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int), 682 GFP_KERNEL); 683 if (!d->status_buf) 684 goto err_alloc; 685 686 d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int), 687 GFP_KERNEL); 688 if (!d->mask_buf) 689 goto err_alloc; 690 691 d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int), 692 GFP_KERNEL); 693 if (!d->mask_buf_def) 694 goto err_alloc; 695 696 if (chip->wake_base) { 697 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int), 698 GFP_KERNEL); 699 if (!d->wake_buf) 700 goto err_alloc; 701 } 702 703 num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg; 704 if (num_type_reg) { 705 d->type_buf_def = kcalloc(num_type_reg, 706 sizeof(unsigned int), GFP_KERNEL); 707 if (!d->type_buf_def) 708 goto err_alloc; 709 710 d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int), 711 GFP_KERNEL); 712 if (!d->type_buf) 713 goto err_alloc; 714 } 715 716 if (chip->num_virt_regs) { 717 /* 718 * Create virt_buf[chip->num_extra_config_regs][chip->num_regs] 719 */ 720 d->virt_buf = kcalloc(chip->num_virt_regs, sizeof(*d->virt_buf), 721 GFP_KERNEL); 722 if (!d->virt_buf) 723 goto err_alloc; 724 725 for (i = 0; i < chip->num_virt_regs; i++) { 726 d->virt_buf[i] = kcalloc(chip->num_regs, 727 sizeof(unsigned int), 728 GFP_KERNEL); 729 if (!d->virt_buf[i]) 730 goto err_alloc; 731 } 732 } 733 734 d->irq_chip = regmap_irq_chip; 735 d->irq_chip.name = chip->name; 736 d->irq = irq; 737 d->map = map; 738 d->chip = chip; 739 d->irq_base = irq_base; 740 741 if (chip->irq_reg_stride) 742 d->irq_reg_stride = chip->irq_reg_stride; 743 else 744 d->irq_reg_stride = 1; 745 746 if (chip->type_reg_stride) 747 d->type_reg_stride = chip->type_reg_stride; 748 else 749 d->type_reg_stride = 1; 750 751 if (!map->use_single_read && map->reg_stride == 1 && 752 d->irq_reg_stride == 1) { 753 d->status_reg_buf = kmalloc_array(chip->num_regs, 754 map->format.val_bytes, 755 GFP_KERNEL); 756 if (!d->status_reg_buf) 757 goto err_alloc; 758 } 759 760 mutex_init(&d->lock); 761 762 for (i = 0; i < chip->num_irqs; i++) 763 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] 764 |= chip->irqs[i].mask; 765 766 /* Mask all the interrupts by default */ 767 for (i = 0; i < chip->num_regs; i++) { 768 d->mask_buf[i] = d->mask_buf_def[i]; 769 if (!chip->mask_base) 770 continue; 771 772 reg = sub_irq_reg(d, d->chip->mask_base, i); 773 774 if (chip->mask_invert) 775 ret = regmap_irq_update_bits(d, reg, 776 d->mask_buf[i], ~d->mask_buf[i]); 777 else if (d->chip->unmask_base) { 778 unmask_offset = d->chip->unmask_base - 779 d->chip->mask_base; 780 ret = regmap_irq_update_bits(d, 781 reg + unmask_offset, 782 d->mask_buf[i], 783 d->mask_buf[i]); 784 } else 785 ret = regmap_irq_update_bits(d, reg, 786 d->mask_buf[i], d->mask_buf[i]); 787 if (ret != 0) { 788 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 789 reg, ret); 790 goto err_alloc; 791 } 792 793 if (!chip->init_ack_masked) 794 continue; 795 796 /* Ack masked but set interrupts */ 797 reg = sub_irq_reg(d, d->chip->status_base, i); 798 ret = regmap_read(map, reg, &d->status_buf[i]); 799 if (ret != 0) { 800 dev_err(map->dev, "Failed to read IRQ status: %d\n", 801 ret); 802 goto err_alloc; 803 } 804 805 if (chip->status_invert) 806 d->status_buf[i] = ~d->status_buf[i]; 807 808 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) { 809 reg = sub_irq_reg(d, d->chip->ack_base, i); 810 if (chip->ack_invert) 811 ret = regmap_write(map, reg, 812 ~(d->status_buf[i] & d->mask_buf[i])); 813 else 814 ret = regmap_write(map, reg, 815 d->status_buf[i] & d->mask_buf[i]); 816 if (chip->clear_ack) { 817 if (chip->ack_invert && !ret) 818 ret = regmap_write(map, reg, UINT_MAX); 819 else if (!ret) 820 ret = regmap_write(map, reg, 0); 821 } 822 if (ret != 0) { 823 dev_err(map->dev, "Failed to ack 0x%x: %d\n", 824 reg, ret); 825 goto err_alloc; 826 } 827 } 828 } 829 830 /* Wake is disabled by default */ 831 if (d->wake_buf) { 832 for (i = 0; i < chip->num_regs; i++) { 833 d->wake_buf[i] = d->mask_buf_def[i]; 834 reg = sub_irq_reg(d, d->chip->wake_base, i); 835 836 if (chip->wake_invert) 837 ret = regmap_irq_update_bits(d, reg, 838 d->mask_buf_def[i], 839 0); 840 else 841 ret = regmap_irq_update_bits(d, reg, 842 d->mask_buf_def[i], 843 d->wake_buf[i]); 844 if (ret != 0) { 845 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 846 reg, ret); 847 goto err_alloc; 848 } 849 } 850 } 851 852 if (chip->num_type_reg && !chip->type_in_mask) { 853 for (i = 0; i < chip->num_type_reg; ++i) { 854 reg = sub_irq_reg(d, d->chip->type_base, i); 855 856 ret = regmap_read(map, reg, &d->type_buf_def[i]); 857 858 if (d->chip->type_invert) 859 d->type_buf_def[i] = ~d->type_buf_def[i]; 860 861 if (ret) { 862 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n", 863 reg, ret); 864 goto err_alloc; 865 } 866 } 867 } 868 869 if (irq_base) 870 d->domain = irq_domain_create_legacy(fwnode, chip->num_irqs, 871 irq_base, 0, 872 ®map_domain_ops, d); 873 else 874 d->domain = irq_domain_create_linear(fwnode, chip->num_irqs, 875 ®map_domain_ops, d); 876 if (!d->domain) { 877 dev_err(map->dev, "Failed to create IRQ domain\n"); 878 ret = -ENOMEM; 879 goto err_alloc; 880 } 881 882 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, 883 irq_flags | IRQF_ONESHOT, 884 chip->name, d); 885 if (ret != 0) { 886 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", 887 irq, chip->name, ret); 888 goto err_domain; 889 } 890 891 *data = d; 892 893 return 0; 894 895err_domain: 896 /* Should really dispose of the domain but... */ 897err_alloc: 898 kfree(d->type_buf); 899 kfree(d->type_buf_def); 900 kfree(d->wake_buf); 901 kfree(d->mask_buf_def); 902 kfree(d->mask_buf); 903 kfree(d->status_buf); 904 kfree(d->status_reg_buf); 905 if (d->virt_buf) { 906 for (i = 0; i < chip->num_virt_regs; i++) 907 kfree(d->virt_buf[i]); 908 kfree(d->virt_buf); 909 } 910 kfree(d); 911 return ret; 912} 913EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode); 914 915/** 916 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling 917 * 918 * @map: The regmap for the device. 919 * @irq: The IRQ the device uses to signal interrupts. 920 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 921 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 922 * @chip: Configuration for the interrupt controller. 923 * @data: Runtime data structure for the controller, allocated on success. 924 * 925 * Returns 0 on success or an errno on failure. 926 * 927 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware 928 * node of the regmap is used. 929 */ 930int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, 931 int irq_base, const struct regmap_irq_chip *chip, 932 struct regmap_irq_chip_data **data) 933{ 934 return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq, 935 irq_flags, irq_base, chip, data); 936} 937EXPORT_SYMBOL_GPL(regmap_add_irq_chip); 938 939/** 940 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip 941 * 942 * @irq: Primary IRQ for the device 943 * @d: ®map_irq_chip_data allocated by regmap_add_irq_chip() 944 * 945 * This function also disposes of all mapped IRQs on the chip. 946 */ 947void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) 948{ 949 unsigned int virq; 950 int hwirq; 951 952 if (!d) 953 return; 954 955 free_irq(irq, d); 956 957 /* Dispose all virtual irq from irq domain before removing it */ 958 for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) { 959 /* Ignore hwirq if holes in the IRQ list */ 960 if (!d->chip->irqs[hwirq].mask) 961 continue; 962 963 /* 964 * Find the virtual irq of hwirq on chip and if it is 965 * there then dispose it 966 */ 967 virq = irq_find_mapping(d->domain, hwirq); 968 if (virq) 969 irq_dispose_mapping(virq); 970 } 971 972 irq_domain_remove(d->domain); 973 kfree(d->type_buf); 974 kfree(d->type_buf_def); 975 kfree(d->wake_buf); 976 kfree(d->mask_buf_def); 977 kfree(d->mask_buf); 978 kfree(d->status_reg_buf); 979 kfree(d->status_buf); 980 kfree(d); 981} 982EXPORT_SYMBOL_GPL(regmap_del_irq_chip); 983 984static void devm_regmap_irq_chip_release(struct device *dev, void *res) 985{ 986 struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res; 987 988 regmap_del_irq_chip(d->irq, d); 989} 990 991static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data) 992 993{ 994 struct regmap_irq_chip_data **r = res; 995 996 if (!r || !*r) { 997 WARN_ON(!r || !*r); 998 return 0; 999 } 1000 return *r == data; 1001} 1002 1003/** 1004 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode() 1005 * 1006 * @dev: The device pointer on which irq_chip belongs to. 1007 * @fwnode: The firmware node where the IRQ domain should be added to. 1008 * @map: The regmap for the device. 1009 * @irq: The IRQ the device uses to signal interrupts 1010 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1011 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1012 * @chip: Configuration for the interrupt controller. 1013 * @data: Runtime data structure for the controller, allocated on success 1014 * 1015 * Returns 0 on success or an errno on failure. 1016 * 1017 * The ®map_irq_chip_data will be automatically released when the device is 1018 * unbound. 1019 */ 1020int devm_regmap_add_irq_chip_fwnode(struct device *dev, 1021 struct fwnode_handle *fwnode, 1022 struct regmap *map, int irq, 1023 int irq_flags, int irq_base, 1024 const struct regmap_irq_chip *chip, 1025 struct regmap_irq_chip_data **data) 1026{ 1027 struct regmap_irq_chip_data **ptr, *d; 1028 int ret; 1029 1030 ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr), 1031 GFP_KERNEL); 1032 if (!ptr) 1033 return -ENOMEM; 1034 1035 ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base, 1036 chip, &d); 1037 if (ret < 0) { 1038 devres_free(ptr); 1039 return ret; 1040 } 1041 1042 *ptr = d; 1043 devres_add(dev, ptr); 1044 *data = d; 1045 return 0; 1046} 1047EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode); 1048 1049/** 1050 * devm_regmap_add_irq_chip() - Resource managed regmap_add_irq_chip() 1051 * 1052 * @dev: The device pointer on which irq_chip belongs to. 1053 * @map: The regmap for the device. 1054 * @irq: The IRQ the device uses to signal interrupts 1055 * @irq_flags: The IRQF_ flags to use for the primary interrupt. 1056 * @irq_base: Allocate at specific IRQ number if irq_base > 0. 1057 * @chip: Configuration for the interrupt controller. 1058 * @data: Runtime data structure for the controller, allocated on success 1059 * 1060 * Returns 0 on success or an errno on failure. 1061 * 1062 * The ®map_irq_chip_data will be automatically released when the device is 1063 * unbound. 1064 */ 1065int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, 1066 int irq_flags, int irq_base, 1067 const struct regmap_irq_chip *chip, 1068 struct regmap_irq_chip_data **data) 1069{ 1070 return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map, 1071 irq, irq_flags, irq_base, chip, 1072 data); 1073} 1074EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip); 1075 1076/** 1077 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip() 1078 * 1079 * @dev: Device for which the resource was allocated. 1080 * @irq: Primary IRQ for the device. 1081 * @data: ®map_irq_chip_data allocated by regmap_add_irq_chip(). 1082 * 1083 * A resource managed version of regmap_del_irq_chip(). 1084 */ 1085void devm_regmap_del_irq_chip(struct device *dev, int irq, 1086 struct regmap_irq_chip_data *data) 1087{ 1088 int rc; 1089 1090 WARN_ON(irq != data->irq); 1091 rc = devres_release(dev, devm_regmap_irq_chip_release, 1092 devm_regmap_irq_chip_match, data); 1093 1094 if (rc != 0) 1095 WARN_ON(rc); 1096} 1097EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip); 1098 1099/** 1100 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip 1101 * 1102 * @data: regmap irq controller to operate on. 1103 * 1104 * Useful for drivers to request their own IRQs. 1105 */ 1106int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) 1107{ 1108 WARN_ON(!data->irq_base); 1109 return data->irq_base; 1110} 1111EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); 1112 1113/** 1114 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ 1115 * 1116 * @data: regmap irq controller to operate on. 1117 * @irq: index of the interrupt requested in the chip IRQs. 1118 * 1119 * Useful for drivers to request their own IRQs. 1120 */ 1121int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq) 1122{ 1123 /* Handle holes in the IRQ list */ 1124 if (!data->chip->irqs[irq].mask) 1125 return -EINVAL; 1126 1127 return irq_create_mapping(data->domain, irq); 1128} 1129EXPORT_SYMBOL_GPL(regmap_irq_get_virq); 1130 1131/** 1132 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip 1133 * 1134 * @data: regmap_irq controller to operate on. 1135 * 1136 * Useful for drivers to request their own IRQs and for integration 1137 * with subsystems. For ease of integration NULL is accepted as a 1138 * domain, allowing devices to just call this even if no domain is 1139 * allocated. 1140 */ 1141struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data) 1142{ 1143 if (data) 1144 return data->domain; 1145 else 1146 return NULL; 1147} 1148EXPORT_SYMBOL_GPL(regmap_irq_get_domain);