pmsu.c (16733B)
1/* 2 * Power Management Service Unit(PMSU) support for Armada 370/XP platforms. 3 * 4 * Copyright (C) 2012 Marvell 5 * 6 * Yehuda Yitschak <yehuday@marvell.com> 7 * Gregory Clement <gregory.clement@free-electrons.com> 8 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 9 * 10 * This file is licensed under the terms of the GNU General Public 11 * License version 2. This program is licensed "as is" without any 12 * warranty of any kind, whether express or implied. 13 * 14 * The Armada 370 and Armada XP SOCs have a power management service 15 * unit which is responsible for powering down and waking up CPUs and 16 * other SOC units 17 */ 18 19#define pr_fmt(fmt) "mvebu-pmsu: " fmt 20 21#include <linux/clk.h> 22#include <linux/cpu_pm.h> 23#include <linux/delay.h> 24#include <linux/init.h> 25#include <linux/io.h> 26#include <linux/kernel.h> 27#include <linux/mbus.h> 28#include <linux/mvebu-pmsu.h> 29#include <linux/of_address.h> 30#include <linux/of_device.h> 31#include <linux/platform_device.h> 32#include <linux/resource.h> 33#include <linux/slab.h> 34#include <linux/smp.h> 35#include <asm/cacheflush.h> 36#include <asm/cp15.h> 37#include <asm/smp_scu.h> 38#include <asm/smp_plat.h> 39#include <asm/suspend.h> 40#include <asm/tlbflush.h> 41#include "common.h" 42#include "pmsu.h" 43 44#define PMSU_BASE_OFFSET 0x100 45#define PMSU_REG_SIZE 0x1000 46 47/* PMSU MP registers */ 48#define PMSU_CONTROL_AND_CONFIG(cpu) ((cpu * 0x100) + 0x104) 49#define PMSU_CONTROL_AND_CONFIG_DFS_REQ BIT(18) 50#define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ BIT(16) 51#define PMSU_CONTROL_AND_CONFIG_L2_PWDDN BIT(20) 52 53#define PMSU_CPU_POWER_DOWN_CONTROL(cpu) ((cpu * 0x100) + 0x108) 54 55#define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP BIT(0) 56 57#define PMSU_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x10c) 58#define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT BIT(16) 59#define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT BIT(17) 60#define PMSU_STATUS_AND_MASK_IRQ_WAKEUP BIT(20) 61#define PMSU_STATUS_AND_MASK_FIQ_WAKEUP BIT(21) 62#define PMSU_STATUS_AND_MASK_DBG_WAKEUP BIT(22) 63#define PMSU_STATUS_AND_MASK_IRQ_MASK BIT(24) 64#define PMSU_STATUS_AND_MASK_FIQ_MASK BIT(25) 65 66#define PMSU_EVENT_STATUS_AND_MASK(cpu) ((cpu * 0x100) + 0x120) 67#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE BIT(1) 68#define PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK BIT(17) 69 70#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124) 71 72/* PMSU fabric registers */ 73#define L2C_NFABRIC_PM_CTL 0x4 74#define L2C_NFABRIC_PM_CTL_PWR_DOWN BIT(20) 75 76/* PMSU delay registers */ 77#define PMSU_POWERDOWN_DELAY 0xF04 78#define PMSU_POWERDOWN_DELAY_PMU BIT(1) 79#define PMSU_POWERDOWN_DELAY_MASK 0xFFFE 80#define PMSU_DFLT_ARMADA38X_DELAY 0x64 81 82/* CA9 MPcore SoC Control registers */ 83 84#define MPCORE_RESET_CTL 0x64 85#define MPCORE_RESET_CTL_L2 BIT(0) 86#define MPCORE_RESET_CTL_DEBUG BIT(16) 87 88#define SRAM_PHYS_BASE 0xFFFF0000 89#define BOOTROM_BASE 0xFFF00000 90#define BOOTROM_SIZE 0x100000 91 92#define ARMADA_370_CRYPT0_ENG_TARGET 0x9 93#define ARMADA_370_CRYPT0_ENG_ATTR 0x1 94 95extern void ll_disable_coherency(void); 96extern void ll_enable_coherency(void); 97 98extern void armada_370_xp_cpu_resume(void); 99extern void armada_38x_cpu_resume(void); 100 101static phys_addr_t pmsu_mp_phys_base; 102static void __iomem *pmsu_mp_base; 103 104static void *mvebu_cpu_resume; 105 106static const struct of_device_id of_pmsu_table[] = { 107 { .compatible = "marvell,armada-370-pmsu", }, 108 { .compatible = "marvell,armada-370-xp-pmsu", }, 109 { .compatible = "marvell,armada-380-pmsu", }, 110 { /* end of list */ }, 111}; 112 113void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) 114{ 115 writel(__pa_symbol(boot_addr), pmsu_mp_base + 116 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); 117} 118 119extern unsigned char mvebu_boot_wa_start[]; 120extern unsigned char mvebu_boot_wa_end[]; 121 122/* 123 * This function sets up the boot address workaround needed for SMP 124 * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the 125 * BootROM Mbus window, and instead remaps a crypto SRAM into which a 126 * custom piece of code is copied to replace the problematic BootROM. 127 */ 128int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target, 129 unsigned int crypto_eng_attribute, 130 phys_addr_t resume_addr_reg) 131{ 132 void __iomem *sram_virt_base; 133 u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start; 134 135 mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE); 136 mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute, 137 SRAM_PHYS_BASE, SZ_64K); 138 139 sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K); 140 if (!sram_virt_base) { 141 pr_err("Unable to map SRAM to setup the boot address WA\n"); 142 return -ENOMEM; 143 } 144 145 memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len); 146 147 /* 148 * The last word of the code copied in SRAM must contain the 149 * physical base address of the PMSU register. We 150 * intentionally store this address in the native endianness 151 * of the system. 152 */ 153 __raw_writel((unsigned long)resume_addr_reg, 154 sram_virt_base + code_len - 4); 155 156 iounmap(sram_virt_base); 157 158 return 0; 159} 160 161static int __init mvebu_v7_pmsu_init(void) 162{ 163 struct device_node *np; 164 struct resource res; 165 int ret = 0; 166 167 np = of_find_matching_node(NULL, of_pmsu_table); 168 if (!np) 169 return 0; 170 171 pr_info("Initializing Power Management Service Unit\n"); 172 173 if (of_address_to_resource(np, 0, &res)) { 174 pr_err("unable to get resource\n"); 175 ret = -ENOENT; 176 goto out; 177 } 178 179 if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) { 180 pr_warn(FW_WARN "deprecated pmsu binding\n"); 181 res.start = res.start - PMSU_BASE_OFFSET; 182 res.end = res.start + PMSU_REG_SIZE - 1; 183 } 184 185 if (!request_mem_region(res.start, resource_size(&res), 186 np->full_name)) { 187 pr_err("unable to request region\n"); 188 ret = -EBUSY; 189 goto out; 190 } 191 192 pmsu_mp_phys_base = res.start; 193 194 pmsu_mp_base = ioremap(res.start, resource_size(&res)); 195 if (!pmsu_mp_base) { 196 pr_err("unable to map registers\n"); 197 release_mem_region(res.start, resource_size(&res)); 198 ret = -ENOMEM; 199 goto out; 200 } 201 202 out: 203 of_node_put(np); 204 return ret; 205} 206 207static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void) 208{ 209 u32 reg; 210 211 if (pmsu_mp_base == NULL) 212 return; 213 214 /* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */ 215 reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL); 216 reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN; 217 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); 218} 219 220enum pmsu_idle_prepare_flags { 221 PMSU_PREPARE_NORMAL = 0, 222 PMSU_PREPARE_DEEP_IDLE = BIT(0), 223 PMSU_PREPARE_SNOOP_DISABLE = BIT(1), 224}; 225 226/* No locking is needed because we only access per-CPU registers */ 227static int mvebu_v7_pmsu_idle_prepare(unsigned long flags) 228{ 229 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 230 u32 reg; 231 232 if (pmsu_mp_base == NULL) 233 return -EINVAL; 234 235 /* 236 * Adjust the PMSU configuration to wait for WFI signal, enable 237 * IRQ and FIQ as wakeup events, set wait for snoop queue empty 238 * indication and mask IRQ and FIQ from CPU 239 */ 240 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 241 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 242 PMSU_STATUS_AND_MASK_IRQ_WAKEUP | 243 PMSU_STATUS_AND_MASK_FIQ_WAKEUP | 244 PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT | 245 PMSU_STATUS_AND_MASK_IRQ_MASK | 246 PMSU_STATUS_AND_MASK_FIQ_MASK; 247 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 248 249 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 250 /* ask HW to power down the L2 Cache if needed */ 251 if (flags & PMSU_PREPARE_DEEP_IDLE) 252 reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 253 254 /* request power down */ 255 reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ; 256 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 257 258 if (flags & PMSU_PREPARE_SNOOP_DISABLE) { 259 /* Disable snoop disable by HW - SW is taking care of it */ 260 reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 261 reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP; 262 writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu)); 263 } 264 265 return 0; 266} 267 268int armada_370_xp_pmsu_idle_enter(unsigned long deepidle) 269{ 270 unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE; 271 int ret; 272 273 if (deepidle) 274 flags |= PMSU_PREPARE_DEEP_IDLE; 275 276 ret = mvebu_v7_pmsu_idle_prepare(flags); 277 if (ret) 278 return ret; 279 280 v7_exit_coherency_flush(all); 281 282 ll_disable_coherency(); 283 284 dsb(); 285 286 wfi(); 287 288 /* If we are here, wfi failed. As processors run out of 289 * coherency for some time, tlbs might be stale, so flush them 290 */ 291 local_flush_tlb_all(); 292 293 ll_enable_coherency(); 294 295 /* Test the CR_C bit and set it if it was cleared */ 296 asm volatile( 297 "mrc p15, 0, r0, c1, c0, 0 \n\t" 298 "tst r0, %0 \n\t" 299 "orreq r0, r0, #(1 << 2) \n\t" 300 "mcreq p15, 0, r0, c1, c0, 0 \n\t" 301 "isb " 302 : : "Ir" (CR_C) : "r0"); 303 304 pr_debug("Failed to suspend the system\n"); 305 306 return 0; 307} 308 309static int armada_370_xp_cpu_suspend(unsigned long deepidle) 310{ 311 return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter); 312} 313 314int armada_38x_do_cpu_suspend(unsigned long deepidle) 315{ 316 unsigned long flags = 0; 317 318 if (deepidle) 319 flags |= PMSU_PREPARE_DEEP_IDLE; 320 321 mvebu_v7_pmsu_idle_prepare(flags); 322 /* 323 * Already flushed cache, but do it again as the outer cache 324 * functions dirty the cache with spinlocks 325 */ 326 v7_exit_coherency_flush(louis); 327 328 scu_power_mode(mvebu_get_scu_base(), SCU_PM_POWEROFF); 329 330 cpu_do_idle(); 331 332 return 1; 333} 334 335static int armada_38x_cpu_suspend(unsigned long deepidle) 336{ 337 return cpu_suspend(false, armada_38x_do_cpu_suspend); 338} 339 340/* No locking is needed because we only access per-CPU registers */ 341void mvebu_v7_pmsu_idle_exit(void) 342{ 343 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 344 u32 reg; 345 346 if (pmsu_mp_base == NULL) 347 return; 348 /* cancel ask HW to power down the L2 Cache if possible */ 349 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 350 reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN; 351 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu)); 352 353 /* cancel Enable wakeup events and mask interrupts */ 354 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 355 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP); 356 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 357 reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; 358 reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK); 359 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu)); 360} 361 362static int mvebu_v7_cpu_pm_notify(struct notifier_block *self, 363 unsigned long action, void *hcpu) 364{ 365 if (action == CPU_PM_ENTER) { 366 unsigned int hw_cpu = cpu_logical_map(smp_processor_id()); 367 mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume); 368 } else if (action == CPU_PM_EXIT) { 369 mvebu_v7_pmsu_idle_exit(); 370 } 371 372 return NOTIFY_OK; 373} 374 375static struct notifier_block mvebu_v7_cpu_pm_notifier = { 376 .notifier_call = mvebu_v7_cpu_pm_notify, 377}; 378 379static struct platform_device mvebu_v7_cpuidle_device; 380 381static int broken_idle(struct device_node *np) 382{ 383 if (of_property_read_bool(np, "broken-idle")) { 384 pr_warn("CPU idle is currently broken: disabling\n"); 385 return 1; 386 } 387 388 return 0; 389} 390 391static __init int armada_370_cpuidle_init(void) 392{ 393 struct device_node *np; 394 phys_addr_t redirect_reg; 395 396 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 397 if (!np) 398 return -ENODEV; 399 400 if (broken_idle(np)) 401 goto end; 402 403 /* 404 * On Armada 370, there is "a slow exit process from the deep 405 * idle state due to heavy L1/L2 cache cleanup operations 406 * performed by the BootROM software". To avoid this, we 407 * replace the restart code of the bootrom by a a simple jump 408 * to the boot address. Then the code located at this boot 409 * address will take care of the initialization. 410 */ 411 redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0); 412 mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET, 413 ARMADA_370_CRYPT0_ENG_ATTR, 414 redirect_reg); 415 416 mvebu_cpu_resume = armada_370_xp_cpu_resume; 417 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 418 mvebu_v7_cpuidle_device.name = "cpuidle-armada-370"; 419 420end: 421 of_node_put(np); 422 return 0; 423} 424 425static __init int armada_38x_cpuidle_init(void) 426{ 427 struct device_node *np; 428 void __iomem *mpsoc_base; 429 u32 reg; 430 431 pr_warn("CPU idle is currently broken on Armada 38x: disabling\n"); 432 return 0; 433 434 np = of_find_compatible_node(NULL, NULL, 435 "marvell,armada-380-coherency-fabric"); 436 if (!np) 437 return -ENODEV; 438 439 if (broken_idle(np)) 440 goto end; 441 442 of_node_put(np); 443 444 np = of_find_compatible_node(NULL, NULL, 445 "marvell,armada-380-mpcore-soc-ctrl"); 446 if (!np) 447 return -ENODEV; 448 mpsoc_base = of_iomap(np, 0); 449 BUG_ON(!mpsoc_base); 450 451 /* Set up reset mask when powering down the cpus */ 452 reg = readl(mpsoc_base + MPCORE_RESET_CTL); 453 reg |= MPCORE_RESET_CTL_L2; 454 reg |= MPCORE_RESET_CTL_DEBUG; 455 writel(reg, mpsoc_base + MPCORE_RESET_CTL); 456 iounmap(mpsoc_base); 457 458 /* Set up delay */ 459 reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY); 460 reg &= ~PMSU_POWERDOWN_DELAY_MASK; 461 reg |= PMSU_DFLT_ARMADA38X_DELAY; 462 reg |= PMSU_POWERDOWN_DELAY_PMU; 463 writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY); 464 465 mvebu_cpu_resume = armada_38x_cpu_resume; 466 mvebu_v7_cpuidle_device.dev.platform_data = armada_38x_cpu_suspend; 467 mvebu_v7_cpuidle_device.name = "cpuidle-armada-38x"; 468 469end: 470 of_node_put(np); 471 return 0; 472} 473 474static __init int armada_xp_cpuidle_init(void) 475{ 476 struct device_node *np; 477 478 np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); 479 if (!np) 480 return -ENODEV; 481 482 if (broken_idle(np)) 483 goto end; 484 485 mvebu_cpu_resume = armada_370_xp_cpu_resume; 486 mvebu_v7_cpuidle_device.dev.platform_data = armada_370_xp_cpu_suspend; 487 mvebu_v7_cpuidle_device.name = "cpuidle-armada-xp"; 488 489end: 490 of_node_put(np); 491 return 0; 492} 493 494static int __init mvebu_v7_cpu_pm_init(void) 495{ 496 struct device_node *np; 497 int ret; 498 499 np = of_find_matching_node(NULL, of_pmsu_table); 500 if (!np) 501 return 0; 502 of_node_put(np); 503 504 /* 505 * Currently the CPU idle support for Armada 38x is broken, as 506 * the CPU hotplug uses some of the CPU idle functions it is 507 * broken too, so let's disable it 508 */ 509 if (of_machine_is_compatible("marvell,armada380")) { 510 cpu_hotplug_disable(); 511 pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n"); 512 } 513 514 if (of_machine_is_compatible("marvell,armadaxp")) 515 ret = armada_xp_cpuidle_init(); 516 else if (of_machine_is_compatible("marvell,armada370")) 517 ret = armada_370_cpuidle_init(); 518 else if (of_machine_is_compatible("marvell,armada380")) 519 ret = armada_38x_cpuidle_init(); 520 else 521 return 0; 522 523 if (ret) 524 return ret; 525 526 mvebu_v7_pmsu_enable_l2_powerdown_onidle(); 527 if (mvebu_v7_cpuidle_device.name) 528 platform_device_register(&mvebu_v7_cpuidle_device); 529 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier); 530 531 return 0; 532} 533 534arch_initcall(mvebu_v7_cpu_pm_init); 535early_initcall(mvebu_v7_pmsu_init); 536 537static void mvebu_pmsu_dfs_request_local(void *data) 538{ 539 u32 reg; 540 u32 cpu = smp_processor_id(); 541 unsigned long flags; 542 543 local_irq_save(flags); 544 545 /* Prepare to enter idle */ 546 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 547 reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT | 548 PMSU_STATUS_AND_MASK_IRQ_MASK | 549 PMSU_STATUS_AND_MASK_FIQ_MASK; 550 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 551 552 /* Request the DFS transition */ 553 reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 554 reg |= PMSU_CONTROL_AND_CONFIG_DFS_REQ; 555 writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(cpu)); 556 557 /* The fact of entering idle will trigger the DFS transition */ 558 wfi(); 559 560 /* 561 * We're back from idle, the DFS transition has completed, 562 * clear the idle wait indication. 563 */ 564 reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 565 reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT; 566 writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(cpu)); 567 568 local_irq_restore(flags); 569} 570 571int mvebu_pmsu_dfs_request(int cpu) 572{ 573 unsigned long timeout; 574 int hwcpu = cpu_logical_map(cpu); 575 u32 reg; 576 577 /* Clear any previous DFS DONE event */ 578 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 579 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE; 580 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 581 582 /* Mask the DFS done interrupt, since we are going to poll */ 583 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 584 reg |= PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 585 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 586 587 /* Trigger the DFS on the appropriate CPU */ 588 smp_call_function_single(cpu, mvebu_pmsu_dfs_request_local, 589 NULL, false); 590 591 /* Poll until the DFS done event is generated */ 592 timeout = jiffies + HZ; 593 while (time_before(jiffies, timeout)) { 594 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 595 if (reg & PMSU_EVENT_STATUS_AND_MASK_DFS_DONE) 596 break; 597 udelay(10); 598 } 599 600 if (time_after(jiffies, timeout)) 601 return -ETIME; 602 603 /* Restore the DFS mask to its original state */ 604 reg = readl(pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 605 reg &= ~PMSU_EVENT_STATUS_AND_MASK_DFS_DONE_MASK; 606 writel(reg, pmsu_mp_base + PMSU_EVENT_STATUS_AND_MASK(hwcpu)); 607 608 return 0; 609}