hisi_uncore_hha_pmu.c (16560B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * HiSilicon SoC HHA uncore Hardware event counters support 4 * 5 * Copyright (C) 2017 HiSilicon Limited 6 * Author: Shaokun Zhang <zhangshaokun@hisilicon.com> 7 * Anurup M <anurup.m@huawei.com> 8 * 9 * This code is based on the uncore PMUs like arm-cci and arm-ccn. 10 */ 11#include <linux/acpi.h> 12#include <linux/bug.h> 13#include <linux/cpuhotplug.h> 14#include <linux/interrupt.h> 15#include <linux/irq.h> 16#include <linux/list.h> 17#include <linux/smp.h> 18 19#include "hisi_uncore_pmu.h" 20 21/* HHA register definition */ 22#define HHA_INT_MASK 0x0804 23#define HHA_INT_STATUS 0x0808 24#define HHA_INT_CLEAR 0x080C 25#define HHA_VERSION 0x1cf0 26#define HHA_PERF_CTRL 0x1E00 27#define HHA_EVENT_CTRL 0x1E04 28#define HHA_SRCID_CTRL 0x1E08 29#define HHA_DATSRC_CTRL 0x1BF0 30#define HHA_EVENT_TYPE0 0x1E80 31/* 32 * If the HW version only supports a 48-bit counter, then 33 * bits [63:48] are reserved, which are Read-As-Zero and 34 * Writes-Ignored. 35 */ 36#define HHA_CNT0_LOWER 0x1F00 37 38/* HHA PMU v1 has 16 counters and v2 only has 8 counters */ 39#define HHA_V1_NR_COUNTERS 0x10 40#define HHA_V2_NR_COUNTERS 0x8 41 42#define HHA_PERF_CTRL_EN 0x1 43#define HHA_TRACETAG_EN BIT(31) 44#define HHA_SRCID_EN BIT(2) 45#define HHA_SRCID_CMD_SHIFT 6 46#define HHA_SRCID_MSK_SHIFT 20 47#define HHA_SRCID_CMD GENMASK(16, 6) 48#define HHA_SRCID_MSK GENMASK(30, 20) 49#define HHA_DATSRC_SKT_EN BIT(23) 50#define HHA_EVTYPE_NONE 0xff 51#define HHA_V1_NR_EVENT 0x65 52#define HHA_V2_NR_EVENT 0xCE 53 54HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_cmd, config1, 10, 0); 55HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_msk, config1, 21, 11); 56HISI_PMU_EVENT_ATTR_EXTRACTOR(tracetag_en, config1, 22, 22); 57HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 23, 23); 58 59static void hisi_hha_pmu_enable_tracetag(struct perf_event *event) 60{ 61 struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); 62 u32 tt_en = hisi_get_tracetag_en(event); 63 64 if (tt_en) { 65 u32 val; 66 67 val = readl(hha_pmu->base + HHA_SRCID_CTRL); 68 val |= HHA_TRACETAG_EN; 69 writel(val, hha_pmu->base + HHA_SRCID_CTRL); 70 } 71} 72 73static void hisi_hha_pmu_clear_tracetag(struct perf_event *event) 74{ 75 struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); 76 u32 val; 77 78 val = readl(hha_pmu->base + HHA_SRCID_CTRL); 79 val &= ~HHA_TRACETAG_EN; 80 writel(val, hha_pmu->base + HHA_SRCID_CTRL); 81} 82 83static void hisi_hha_pmu_config_ds(struct perf_event *event) 84{ 85 struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); 86 u32 ds_skt = hisi_get_datasrc_skt(event); 87 88 if (ds_skt) { 89 u32 val; 90 91 val = readl(hha_pmu->base + HHA_DATSRC_CTRL); 92 val |= HHA_DATSRC_SKT_EN; 93 writel(val, hha_pmu->base + HHA_DATSRC_CTRL); 94 } 95} 96 97static void hisi_hha_pmu_clear_ds(struct perf_event *event) 98{ 99 struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); 100 u32 ds_skt = hisi_get_datasrc_skt(event); 101 102 if (ds_skt) { 103 u32 val; 104 105 val = readl(hha_pmu->base + HHA_DATSRC_CTRL); 106 val &= ~HHA_DATSRC_SKT_EN; 107 writel(val, hha_pmu->base + HHA_DATSRC_CTRL); 108 } 109} 110 111static void hisi_hha_pmu_config_srcid(struct perf_event *event) 112{ 113 struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); 114 u32 cmd = hisi_get_srcid_cmd(event); 115 116 if (cmd) { 117 u32 val, msk; 118 119 msk = hisi_get_srcid_msk(event); 120 val = readl(hha_pmu->base + HHA_SRCID_CTRL); 121 val |= HHA_SRCID_EN | (cmd << HHA_SRCID_CMD_SHIFT) | 122 (msk << HHA_SRCID_MSK_SHIFT); 123 writel(val, hha_pmu->base + HHA_SRCID_CTRL); 124 } 125} 126 127static void hisi_hha_pmu_disable_srcid(struct perf_event *event) 128{ 129 struct hisi_pmu *hha_pmu = to_hisi_pmu(event->pmu); 130 u32 cmd = hisi_get_srcid_cmd(event); 131 132 if (cmd) { 133 u32 val; 134 135 val = readl(hha_pmu->base + HHA_SRCID_CTRL); 136 val &= ~(HHA_SRCID_EN | HHA_SRCID_MSK | HHA_SRCID_CMD); 137 writel(val, hha_pmu->base + HHA_SRCID_CTRL); 138 } 139} 140 141static void hisi_hha_pmu_enable_filter(struct perf_event *event) 142{ 143 if (event->attr.config1 != 0x0) { 144 hisi_hha_pmu_enable_tracetag(event); 145 hisi_hha_pmu_config_ds(event); 146 hisi_hha_pmu_config_srcid(event); 147 } 148} 149 150static void hisi_hha_pmu_disable_filter(struct perf_event *event) 151{ 152 if (event->attr.config1 != 0x0) { 153 hisi_hha_pmu_disable_srcid(event); 154 hisi_hha_pmu_clear_ds(event); 155 hisi_hha_pmu_clear_tracetag(event); 156 } 157} 158 159/* 160 * Select the counter register offset using the counter index 161 * each counter is 48-bits. 162 */ 163static u32 hisi_hha_pmu_get_counter_offset(int cntr_idx) 164{ 165 return (HHA_CNT0_LOWER + (cntr_idx * 8)); 166} 167 168static u64 hisi_hha_pmu_read_counter(struct hisi_pmu *hha_pmu, 169 struct hw_perf_event *hwc) 170{ 171 /* Read 64 bits and like L3C, top 16 bits are RAZ */ 172 return readq(hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx)); 173} 174 175static void hisi_hha_pmu_write_counter(struct hisi_pmu *hha_pmu, 176 struct hw_perf_event *hwc, u64 val) 177{ 178 /* Write 64 bits and like L3C, top 16 bits are WI */ 179 writeq(val, hha_pmu->base + hisi_hha_pmu_get_counter_offset(hwc->idx)); 180} 181 182static void hisi_hha_pmu_write_evtype(struct hisi_pmu *hha_pmu, int idx, 183 u32 type) 184{ 185 u32 reg, reg_idx, shift, val; 186 187 /* 188 * Select the appropriate event select register(HHA_EVENT_TYPEx). 189 * There are 4 event select registers for the 16 hardware counters. 190 * Event code is 8-bits and for the first 4 hardware counters, 191 * HHA_EVENT_TYPE0 is chosen. For the next 4 hardware counters, 192 * HHA_EVENT_TYPE1 is chosen and so on. 193 */ 194 reg = HHA_EVENT_TYPE0 + 4 * (idx / 4); 195 reg_idx = idx % 4; 196 shift = 8 * reg_idx; 197 198 /* Write event code to HHA_EVENT_TYPEx register */ 199 val = readl(hha_pmu->base + reg); 200 val &= ~(HHA_EVTYPE_NONE << shift); 201 val |= (type << shift); 202 writel(val, hha_pmu->base + reg); 203} 204 205static void hisi_hha_pmu_start_counters(struct hisi_pmu *hha_pmu) 206{ 207 u32 val; 208 209 /* 210 * Set perf_enable bit in HHA_PERF_CTRL to start event 211 * counting for all enabled counters. 212 */ 213 val = readl(hha_pmu->base + HHA_PERF_CTRL); 214 val |= HHA_PERF_CTRL_EN; 215 writel(val, hha_pmu->base + HHA_PERF_CTRL); 216} 217 218static void hisi_hha_pmu_stop_counters(struct hisi_pmu *hha_pmu) 219{ 220 u32 val; 221 222 /* 223 * Clear perf_enable bit in HHA_PERF_CTRL to stop event 224 * counting for all enabled counters. 225 */ 226 val = readl(hha_pmu->base + HHA_PERF_CTRL); 227 val &= ~(HHA_PERF_CTRL_EN); 228 writel(val, hha_pmu->base + HHA_PERF_CTRL); 229} 230 231static void hisi_hha_pmu_enable_counter(struct hisi_pmu *hha_pmu, 232 struct hw_perf_event *hwc) 233{ 234 u32 val; 235 236 /* Enable counter index in HHA_EVENT_CTRL register */ 237 val = readl(hha_pmu->base + HHA_EVENT_CTRL); 238 val |= (1 << hwc->idx); 239 writel(val, hha_pmu->base + HHA_EVENT_CTRL); 240} 241 242static void hisi_hha_pmu_disable_counter(struct hisi_pmu *hha_pmu, 243 struct hw_perf_event *hwc) 244{ 245 u32 val; 246 247 /* Clear counter index in HHA_EVENT_CTRL register */ 248 val = readl(hha_pmu->base + HHA_EVENT_CTRL); 249 val &= ~(1 << hwc->idx); 250 writel(val, hha_pmu->base + HHA_EVENT_CTRL); 251} 252 253static void hisi_hha_pmu_enable_counter_int(struct hisi_pmu *hha_pmu, 254 struct hw_perf_event *hwc) 255{ 256 u32 val; 257 258 /* Write 0 to enable interrupt */ 259 val = readl(hha_pmu->base + HHA_INT_MASK); 260 val &= ~(1 << hwc->idx); 261 writel(val, hha_pmu->base + HHA_INT_MASK); 262} 263 264static void hisi_hha_pmu_disable_counter_int(struct hisi_pmu *hha_pmu, 265 struct hw_perf_event *hwc) 266{ 267 u32 val; 268 269 /* Write 1 to mask interrupt */ 270 val = readl(hha_pmu->base + HHA_INT_MASK); 271 val |= (1 << hwc->idx); 272 writel(val, hha_pmu->base + HHA_INT_MASK); 273} 274 275static u32 hisi_hha_pmu_get_int_status(struct hisi_pmu *hha_pmu) 276{ 277 return readl(hha_pmu->base + HHA_INT_STATUS); 278} 279 280static void hisi_hha_pmu_clear_int_status(struct hisi_pmu *hha_pmu, int idx) 281{ 282 writel(1 << idx, hha_pmu->base + HHA_INT_CLEAR); 283} 284 285static const struct acpi_device_id hisi_hha_pmu_acpi_match[] = { 286 { "HISI0243", }, 287 { "HISI0244", }, 288 {} 289}; 290MODULE_DEVICE_TABLE(acpi, hisi_hha_pmu_acpi_match); 291 292static int hisi_hha_pmu_init_data(struct platform_device *pdev, 293 struct hisi_pmu *hha_pmu) 294{ 295 unsigned long long id; 296 acpi_status status; 297 298 /* 299 * Use SCCL_ID and UID to identify the HHA PMU, while 300 * SCCL_ID is in MPIDR[aff2]. 301 */ 302 if (device_property_read_u32(&pdev->dev, "hisilicon,scl-id", 303 &hha_pmu->sccl_id)) { 304 dev_err(&pdev->dev, "Can not read hha sccl-id!\n"); 305 return -EINVAL; 306 } 307 308 /* 309 * Early versions of BIOS support _UID by mistake, so we support 310 * both "hisilicon, idx-id" as preference, if available. 311 */ 312 if (device_property_read_u32(&pdev->dev, "hisilicon,idx-id", 313 &hha_pmu->index_id)) { 314 status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 315 "_UID", NULL, &id); 316 if (ACPI_FAILURE(status)) { 317 dev_err(&pdev->dev, "Cannot read idx-id!\n"); 318 return -EINVAL; 319 } 320 321 hha_pmu->index_id = id; 322 } 323 /* HHA PMUs only share the same SCCL */ 324 hha_pmu->ccl_id = -1; 325 326 hha_pmu->base = devm_platform_ioremap_resource(pdev, 0); 327 if (IS_ERR(hha_pmu->base)) { 328 dev_err(&pdev->dev, "ioremap failed for hha_pmu resource\n"); 329 return PTR_ERR(hha_pmu->base); 330 } 331 332 hha_pmu->identifier = readl(hha_pmu->base + HHA_VERSION); 333 334 return 0; 335} 336 337static struct attribute *hisi_hha_pmu_v1_format_attr[] = { 338 HISI_PMU_FORMAT_ATTR(event, "config:0-7"), 339 NULL, 340}; 341 342static const struct attribute_group hisi_hha_pmu_v1_format_group = { 343 .name = "format", 344 .attrs = hisi_hha_pmu_v1_format_attr, 345}; 346 347static struct attribute *hisi_hha_pmu_v2_format_attr[] = { 348 HISI_PMU_FORMAT_ATTR(event, "config:0-7"), 349 HISI_PMU_FORMAT_ATTR(srcid_cmd, "config1:0-10"), 350 HISI_PMU_FORMAT_ATTR(srcid_msk, "config1:11-21"), 351 HISI_PMU_FORMAT_ATTR(tracetag_en, "config1:22"), 352 HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:23"), 353 NULL 354}; 355 356static const struct attribute_group hisi_hha_pmu_v2_format_group = { 357 .name = "format", 358 .attrs = hisi_hha_pmu_v2_format_attr, 359}; 360 361static struct attribute *hisi_hha_pmu_v1_events_attr[] = { 362 HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00), 363 HISI_PMU_EVENT_ATTR(rx_outer, 0x01), 364 HISI_PMU_EVENT_ATTR(rx_sccl, 0x02), 365 HISI_PMU_EVENT_ATTR(rx_ccix, 0x03), 366 HISI_PMU_EVENT_ATTR(rx_wbi, 0x04), 367 HISI_PMU_EVENT_ATTR(rx_wbip, 0x05), 368 HISI_PMU_EVENT_ATTR(rx_wtistash, 0x11), 369 HISI_PMU_EVENT_ATTR(rd_ddr_64b, 0x1c), 370 HISI_PMU_EVENT_ATTR(wr_ddr_64b, 0x1d), 371 HISI_PMU_EVENT_ATTR(rd_ddr_128b, 0x1e), 372 HISI_PMU_EVENT_ATTR(wr_ddr_128b, 0x1f), 373 HISI_PMU_EVENT_ATTR(spill_num, 0x20), 374 HISI_PMU_EVENT_ATTR(spill_success, 0x21), 375 HISI_PMU_EVENT_ATTR(bi_num, 0x23), 376 HISI_PMU_EVENT_ATTR(mediated_num, 0x32), 377 HISI_PMU_EVENT_ATTR(tx_snp_num, 0x33), 378 HISI_PMU_EVENT_ATTR(tx_snp_outer, 0x34), 379 HISI_PMU_EVENT_ATTR(tx_snp_ccix, 0x35), 380 HISI_PMU_EVENT_ATTR(rx_snprspdata, 0x38), 381 HISI_PMU_EVENT_ATTR(rx_snprsp_outer, 0x3c), 382 HISI_PMU_EVENT_ATTR(sdir-lookup, 0x40), 383 HISI_PMU_EVENT_ATTR(edir-lookup, 0x41), 384 HISI_PMU_EVENT_ATTR(sdir-hit, 0x42), 385 HISI_PMU_EVENT_ATTR(edir-hit, 0x43), 386 HISI_PMU_EVENT_ATTR(sdir-home-migrate, 0x4c), 387 HISI_PMU_EVENT_ATTR(edir-home-migrate, 0x4d), 388 NULL, 389}; 390 391static const struct attribute_group hisi_hha_pmu_v1_events_group = { 392 .name = "events", 393 .attrs = hisi_hha_pmu_v1_events_attr, 394}; 395 396static struct attribute *hisi_hha_pmu_v2_events_attr[] = { 397 HISI_PMU_EVENT_ATTR(rx_ops_num, 0x00), 398 HISI_PMU_EVENT_ATTR(rx_outer, 0x01), 399 HISI_PMU_EVENT_ATTR(rx_sccl, 0x02), 400 HISI_PMU_EVENT_ATTR(hha_retry, 0x2e), 401 HISI_PMU_EVENT_ATTR(cycles, 0x55), 402 NULL 403}; 404 405static const struct attribute_group hisi_hha_pmu_v2_events_group = { 406 .name = "events", 407 .attrs = hisi_hha_pmu_v2_events_attr, 408}; 409 410static DEVICE_ATTR(cpumask, 0444, hisi_cpumask_sysfs_show, NULL); 411 412static struct attribute *hisi_hha_pmu_cpumask_attrs[] = { 413 &dev_attr_cpumask.attr, 414 NULL, 415}; 416 417static const struct attribute_group hisi_hha_pmu_cpumask_attr_group = { 418 .attrs = hisi_hha_pmu_cpumask_attrs, 419}; 420 421static struct device_attribute hisi_hha_pmu_identifier_attr = 422 __ATTR(identifier, 0444, hisi_uncore_pmu_identifier_attr_show, NULL); 423 424static struct attribute *hisi_hha_pmu_identifier_attrs[] = { 425 &hisi_hha_pmu_identifier_attr.attr, 426 NULL 427}; 428 429static const struct attribute_group hisi_hha_pmu_identifier_group = { 430 .attrs = hisi_hha_pmu_identifier_attrs, 431}; 432 433static const struct attribute_group *hisi_hha_pmu_v1_attr_groups[] = { 434 &hisi_hha_pmu_v1_format_group, 435 &hisi_hha_pmu_v1_events_group, 436 &hisi_hha_pmu_cpumask_attr_group, 437 &hisi_hha_pmu_identifier_group, 438 NULL, 439}; 440 441static const struct attribute_group *hisi_hha_pmu_v2_attr_groups[] = { 442 &hisi_hha_pmu_v2_format_group, 443 &hisi_hha_pmu_v2_events_group, 444 &hisi_hha_pmu_cpumask_attr_group, 445 &hisi_hha_pmu_identifier_group, 446 NULL 447}; 448 449static const struct hisi_uncore_ops hisi_uncore_hha_ops = { 450 .write_evtype = hisi_hha_pmu_write_evtype, 451 .get_event_idx = hisi_uncore_pmu_get_event_idx, 452 .start_counters = hisi_hha_pmu_start_counters, 453 .stop_counters = hisi_hha_pmu_stop_counters, 454 .enable_counter = hisi_hha_pmu_enable_counter, 455 .disable_counter = hisi_hha_pmu_disable_counter, 456 .enable_counter_int = hisi_hha_pmu_enable_counter_int, 457 .disable_counter_int = hisi_hha_pmu_disable_counter_int, 458 .write_counter = hisi_hha_pmu_write_counter, 459 .read_counter = hisi_hha_pmu_read_counter, 460 .get_int_status = hisi_hha_pmu_get_int_status, 461 .clear_int_status = hisi_hha_pmu_clear_int_status, 462 .enable_filter = hisi_hha_pmu_enable_filter, 463 .disable_filter = hisi_hha_pmu_disable_filter, 464}; 465 466static int hisi_hha_pmu_dev_probe(struct platform_device *pdev, 467 struct hisi_pmu *hha_pmu) 468{ 469 int ret; 470 471 ret = hisi_hha_pmu_init_data(pdev, hha_pmu); 472 if (ret) 473 return ret; 474 475 ret = hisi_uncore_pmu_init_irq(hha_pmu, pdev); 476 if (ret) 477 return ret; 478 479 if (hha_pmu->identifier >= HISI_PMU_V2) { 480 hha_pmu->counter_bits = 64; 481 hha_pmu->check_event = HHA_V2_NR_EVENT; 482 hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v2_attr_groups; 483 hha_pmu->num_counters = HHA_V2_NR_COUNTERS; 484 } else { 485 hha_pmu->counter_bits = 48; 486 hha_pmu->check_event = HHA_V1_NR_EVENT; 487 hha_pmu->pmu_events.attr_groups = hisi_hha_pmu_v1_attr_groups; 488 hha_pmu->num_counters = HHA_V1_NR_COUNTERS; 489 } 490 hha_pmu->ops = &hisi_uncore_hha_ops; 491 hha_pmu->dev = &pdev->dev; 492 hha_pmu->on_cpu = -1; 493 494 return 0; 495} 496 497static int hisi_hha_pmu_probe(struct platform_device *pdev) 498{ 499 struct hisi_pmu *hha_pmu; 500 char *name; 501 int ret; 502 503 hha_pmu = devm_kzalloc(&pdev->dev, sizeof(*hha_pmu), GFP_KERNEL); 504 if (!hha_pmu) 505 return -ENOMEM; 506 507 platform_set_drvdata(pdev, hha_pmu); 508 509 ret = hisi_hha_pmu_dev_probe(pdev, hha_pmu); 510 if (ret) 511 return ret; 512 513 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, 514 &hha_pmu->node); 515 if (ret) { 516 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); 517 return ret; 518 } 519 520 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u", 521 hha_pmu->sccl_id, hha_pmu->index_id); 522 hha_pmu->pmu = (struct pmu) { 523 .name = name, 524 .module = THIS_MODULE, 525 .task_ctx_nr = perf_invalid_context, 526 .event_init = hisi_uncore_pmu_event_init, 527 .pmu_enable = hisi_uncore_pmu_enable, 528 .pmu_disable = hisi_uncore_pmu_disable, 529 .add = hisi_uncore_pmu_add, 530 .del = hisi_uncore_pmu_del, 531 .start = hisi_uncore_pmu_start, 532 .stop = hisi_uncore_pmu_stop, 533 .read = hisi_uncore_pmu_read, 534 .attr_groups = hha_pmu->pmu_events.attr_groups, 535 .capabilities = PERF_PMU_CAP_NO_EXCLUDE, 536 }; 537 538 ret = perf_pmu_register(&hha_pmu->pmu, name, -1); 539 if (ret) { 540 dev_err(hha_pmu->dev, "HHA PMU register failed!\n"); 541 cpuhp_state_remove_instance_nocalls( 542 CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, &hha_pmu->node); 543 } 544 545 return ret; 546} 547 548static int hisi_hha_pmu_remove(struct platform_device *pdev) 549{ 550 struct hisi_pmu *hha_pmu = platform_get_drvdata(pdev); 551 552 perf_pmu_unregister(&hha_pmu->pmu); 553 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, 554 &hha_pmu->node); 555 return 0; 556} 557 558static struct platform_driver hisi_hha_pmu_driver = { 559 .driver = { 560 .name = "hisi_hha_pmu", 561 .acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match), 562 .suppress_bind_attrs = true, 563 }, 564 .probe = hisi_hha_pmu_probe, 565 .remove = hisi_hha_pmu_remove, 566}; 567 568static int __init hisi_hha_pmu_module_init(void) 569{ 570 int ret; 571 572 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE, 573 "AP_PERF_ARM_HISI_HHA_ONLINE", 574 hisi_uncore_pmu_online_cpu, 575 hisi_uncore_pmu_offline_cpu); 576 if (ret) { 577 pr_err("HHA PMU: Error setup hotplug, ret = %d;\n", ret); 578 return ret; 579 } 580 581 ret = platform_driver_register(&hisi_hha_pmu_driver); 582 if (ret) 583 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); 584 585 return ret; 586} 587module_init(hisi_hha_pmu_module_init); 588 589static void __exit hisi_hha_pmu_module_exit(void) 590{ 591 platform_driver_unregister(&hisi_hha_pmu_driver); 592 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE); 593} 594module_exit(hisi_hha_pmu_module_exit); 595 596MODULE_DESCRIPTION("HiSilicon SoC HHA uncore PMU driver"); 597MODULE_LICENSE("GPL v2"); 598MODULE_AUTHOR("Shaokun Zhang <zhangshaokun@hisilicon.com>"); 599MODULE_AUTHOR("Anurup M <anurup.m@huawei.com>");