cpufreq_cooling.c (19865B)
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/drivers/thermal/cpufreq_cooling.c 4 * 5 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) 6 * 7 * Copyright (C) 2012-2018 Linaro Limited. 8 * 9 * Authors: Amit Daniel <amit.kachhap@linaro.org> 10 * Viresh Kumar <viresh.kumar@linaro.org> 11 * 12 */ 13#include <linux/cpu.h> 14#include <linux/cpufreq.h> 15#include <linux/cpu_cooling.h> 16#include <linux/device.h> 17#include <linux/energy_model.h> 18#include <linux/err.h> 19#include <linux/export.h> 20#include <linux/pm_opp.h> 21#include <linux/pm_qos.h> 22#include <linux/slab.h> 23#include <linux/thermal.h> 24 25#include <trace/events/thermal.h> 26 27/* 28 * Cooling state <-> CPUFreq frequency 29 * 30 * Cooling states are translated to frequencies throughout this driver and this 31 * is the relation between them. 32 * 33 * Highest cooling state corresponds to lowest possible frequency. 34 * 35 * i.e. 36 * level 0 --> 1st Max Freq 37 * level 1 --> 2nd Max Freq 38 * ... 39 */ 40 41/** 42 * struct time_in_idle - Idle time stats 43 * @time: previous reading of the absolute time that this cpu was idle 44 * @timestamp: wall time of the last invocation of get_cpu_idle_time_us() 45 */ 46struct time_in_idle { 47 u64 time; 48 u64 timestamp; 49}; 50 51/** 52 * struct cpufreq_cooling_device - data for cooling device with cpufreq 53 * @last_load: load measured by the latest call to cpufreq_get_requested_power() 54 * @cpufreq_state: integer value representing the current state of cpufreq 55 * cooling devices. 56 * @max_level: maximum cooling level. One less than total number of valid 57 * cpufreq frequencies. 58 * @em: Reference on the Energy Model of the device 59 * @cdev: thermal_cooling_device pointer to keep track of the 60 * registered cooling device. 61 * @policy: cpufreq policy. 62 * @idle_time: idle time stats 63 * @qos_req: PM QoS contraint to apply 64 * 65 * This structure is required for keeping information of each registered 66 * cpufreq_cooling_device. 67 */ 68struct cpufreq_cooling_device { 69 u32 last_load; 70 unsigned int cpufreq_state; 71 unsigned int max_level; 72 struct em_perf_domain *em; 73 struct cpufreq_policy *policy; 74#ifndef CONFIG_SMP 75 struct time_in_idle *idle_time; 76#endif 77 struct freq_qos_request qos_req; 78}; 79 80#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 81/** 82 * get_level: Find the level for a particular frequency 83 * @cpufreq_cdev: cpufreq_cdev for which the property is required 84 * @freq: Frequency 85 * 86 * Return: level corresponding to the frequency. 87 */ 88static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev, 89 unsigned int freq) 90{ 91 int i; 92 93 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { 94 if (freq > cpufreq_cdev->em->table[i].frequency) 95 break; 96 } 97 98 return cpufreq_cdev->max_level - i - 1; 99} 100 101static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev, 102 u32 freq) 103{ 104 int i; 105 106 for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) { 107 if (freq > cpufreq_cdev->em->table[i].frequency) 108 break; 109 } 110 111 return cpufreq_cdev->em->table[i + 1].power; 112} 113 114static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, 115 u32 power) 116{ 117 int i; 118 119 for (i = cpufreq_cdev->max_level; i > 0; i--) { 120 if (power >= cpufreq_cdev->em->table[i].power) 121 break; 122 } 123 124 return cpufreq_cdev->em->table[i].frequency; 125} 126 127/** 128 * get_load() - get load for a cpu 129 * @cpufreq_cdev: struct cpufreq_cooling_device for the cpu 130 * @cpu: cpu number 131 * @cpu_idx: index of the cpu in time_in_idle array 132 * 133 * Return: The average load of cpu @cpu in percentage since this 134 * function was last called. 135 */ 136#ifdef CONFIG_SMP 137static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, 138 int cpu_idx) 139{ 140 unsigned long max = arch_scale_cpu_capacity(cpu); 141 unsigned long util; 142 143 util = sched_cpu_util(cpu, max); 144 return (util * 100) / max; 145} 146#else /* !CONFIG_SMP */ 147static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, 148 int cpu_idx) 149{ 150 u32 load; 151 u64 now, now_idle, delta_time, delta_idle; 152 struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx]; 153 154 now_idle = get_cpu_idle_time(cpu, &now, 0); 155 delta_idle = now_idle - idle_time->time; 156 delta_time = now - idle_time->timestamp; 157 158 if (delta_time <= delta_idle) 159 load = 0; 160 else 161 load = div64_u64(100 * (delta_time - delta_idle), delta_time); 162 163 idle_time->time = now_idle; 164 idle_time->timestamp = now; 165 166 return load; 167} 168#endif /* CONFIG_SMP */ 169 170/** 171 * get_dynamic_power() - calculate the dynamic power 172 * @cpufreq_cdev: &cpufreq_cooling_device for this cdev 173 * @freq: current frequency 174 * 175 * Return: the dynamic power consumed by the cpus described by 176 * @cpufreq_cdev. 177 */ 178static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev, 179 unsigned long freq) 180{ 181 u32 raw_cpu_power; 182 183 raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq); 184 return (raw_cpu_power * cpufreq_cdev->last_load) / 100; 185} 186 187/** 188 * cpufreq_get_requested_power() - get the current power 189 * @cdev: &thermal_cooling_device pointer 190 * @power: pointer in which to store the resulting power 191 * 192 * Calculate the current power consumption of the cpus in milliwatts 193 * and store it in @power. This function should actually calculate 194 * the requested power, but it's hard to get the frequency that 195 * cpufreq would have assigned if there were no thermal limits. 196 * Instead, we calculate the current power on the assumption that the 197 * immediate future will look like the immediate past. 198 * 199 * We use the current frequency and the average load since this 200 * function was last called. In reality, there could have been 201 * multiple opps since this function was last called and that affects 202 * the load calculation. While it's not perfectly accurate, this 203 * simplification is good enough and works. REVISIT this, as more 204 * complex code may be needed if experiments show that it's not 205 * accurate enough. 206 * 207 * Return: 0 on success, -E* if getting the static power failed. 208 */ 209static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, 210 u32 *power) 211{ 212 unsigned long freq; 213 int i = 0, cpu; 214 u32 total_load = 0; 215 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 216 struct cpufreq_policy *policy = cpufreq_cdev->policy; 217 u32 *load_cpu = NULL; 218 219 freq = cpufreq_quick_get(policy->cpu); 220 221 if (trace_thermal_power_cpu_get_power_enabled()) { 222 u32 ncpus = cpumask_weight(policy->related_cpus); 223 224 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL); 225 } 226 227 for_each_cpu(cpu, policy->related_cpus) { 228 u32 load; 229 230 if (cpu_online(cpu)) 231 load = get_load(cpufreq_cdev, cpu, i); 232 else 233 load = 0; 234 235 total_load += load; 236 if (load_cpu) 237 load_cpu[i] = load; 238 239 i++; 240 } 241 242 cpufreq_cdev->last_load = total_load; 243 244 *power = get_dynamic_power(cpufreq_cdev, freq); 245 246 if (load_cpu) { 247 trace_thermal_power_cpu_get_power(policy->related_cpus, freq, 248 load_cpu, i, *power); 249 250 kfree(load_cpu); 251 } 252 253 return 0; 254} 255 256/** 257 * cpufreq_state2power() - convert a cpu cdev state to power consumed 258 * @cdev: &thermal_cooling_device pointer 259 * @state: cooling device state to be converted 260 * @power: pointer in which to store the resulting power 261 * 262 * Convert cooling device state @state into power consumption in 263 * milliwatts assuming 100% load. Store the calculated power in 264 * @power. 265 * 266 * Return: 0 on success, -EINVAL if the cooling device state could not 267 * be converted into a frequency or other -E* if there was an error 268 * when calculating the static power. 269 */ 270static int cpufreq_state2power(struct thermal_cooling_device *cdev, 271 unsigned long state, u32 *power) 272{ 273 unsigned int freq, num_cpus, idx; 274 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 275 276 /* Request state should be less than max_level */ 277 if (state > cpufreq_cdev->max_level) 278 return -EINVAL; 279 280 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); 281 282 idx = cpufreq_cdev->max_level - state; 283 freq = cpufreq_cdev->em->table[idx].frequency; 284 *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus; 285 286 return 0; 287} 288 289/** 290 * cpufreq_power2state() - convert power to a cooling device state 291 * @cdev: &thermal_cooling_device pointer 292 * @power: power in milliwatts to be converted 293 * @state: pointer in which to store the resulting state 294 * 295 * Calculate a cooling device state for the cpus described by @cdev 296 * that would allow them to consume at most @power mW and store it in 297 * @state. Note that this calculation depends on external factors 298 * such as the cpu load or the current static power. Calling this 299 * function with the same power as input can yield different cooling 300 * device states depending on those external factors. 301 * 302 * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if 303 * the calculated frequency could not be converted to a valid state. 304 * The latter should not happen unless the frequencies available to 305 * cpufreq have changed since the initialization of the cpu cooling 306 * device. 307 */ 308static int cpufreq_power2state(struct thermal_cooling_device *cdev, 309 u32 power, unsigned long *state) 310{ 311 unsigned int target_freq; 312 u32 last_load, normalised_power; 313 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 314 struct cpufreq_policy *policy = cpufreq_cdev->policy; 315 316 last_load = cpufreq_cdev->last_load ?: 1; 317 normalised_power = (power * 100) / last_load; 318 target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power); 319 320 *state = get_level(cpufreq_cdev, target_freq); 321 trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state, 322 power); 323 return 0; 324} 325 326static inline bool em_is_sane(struct cpufreq_cooling_device *cpufreq_cdev, 327 struct em_perf_domain *em) { 328 struct cpufreq_policy *policy; 329 unsigned int nr_levels; 330 331 if (!em || em_is_artificial(em)) 332 return false; 333 334 policy = cpufreq_cdev->policy; 335 if (!cpumask_equal(policy->related_cpus, em_span_cpus(em))) { 336 pr_err("The span of pd %*pbl is misaligned with cpufreq policy %*pbl\n", 337 cpumask_pr_args(em_span_cpus(em)), 338 cpumask_pr_args(policy->related_cpus)); 339 return false; 340 } 341 342 nr_levels = cpufreq_cdev->max_level + 1; 343 if (em_pd_nr_perf_states(em) != nr_levels) { 344 pr_err("The number of performance states in pd %*pbl (%u) doesn't match the number of cooling levels (%u)\n", 345 cpumask_pr_args(em_span_cpus(em)), 346 em_pd_nr_perf_states(em), nr_levels); 347 return false; 348 } 349 350 return true; 351} 352#endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */ 353 354#ifdef CONFIG_SMP 355static inline int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 356{ 357 return 0; 358} 359 360static inline void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 361{ 362} 363#else 364static int allocate_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 365{ 366 unsigned int num_cpus = cpumask_weight(cpufreq_cdev->policy->related_cpus); 367 368 cpufreq_cdev->idle_time = kcalloc(num_cpus, 369 sizeof(*cpufreq_cdev->idle_time), 370 GFP_KERNEL); 371 if (!cpufreq_cdev->idle_time) 372 return -ENOMEM; 373 374 return 0; 375} 376 377static void free_idle_time(struct cpufreq_cooling_device *cpufreq_cdev) 378{ 379 kfree(cpufreq_cdev->idle_time); 380 cpufreq_cdev->idle_time = NULL; 381} 382#endif /* CONFIG_SMP */ 383 384static unsigned int get_state_freq(struct cpufreq_cooling_device *cpufreq_cdev, 385 unsigned long state) 386{ 387 struct cpufreq_policy *policy; 388 unsigned long idx; 389 390#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 391 /* Use the Energy Model table if available */ 392 if (cpufreq_cdev->em) { 393 idx = cpufreq_cdev->max_level - state; 394 return cpufreq_cdev->em->table[idx].frequency; 395 } 396#endif 397 398 /* Otherwise, fallback on the CPUFreq table */ 399 policy = cpufreq_cdev->policy; 400 if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) 401 idx = cpufreq_cdev->max_level - state; 402 else 403 idx = state; 404 405 return policy->freq_table[idx].frequency; 406} 407 408/* cpufreq cooling device callback functions are defined below */ 409 410/** 411 * cpufreq_get_max_state - callback function to get the max cooling state. 412 * @cdev: thermal cooling device pointer. 413 * @state: fill this variable with the max cooling state. 414 * 415 * Callback for the thermal cooling device to return the cpufreq 416 * max cooling state. 417 * 418 * Return: 0 on success, an error code otherwise. 419 */ 420static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, 421 unsigned long *state) 422{ 423 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 424 425 *state = cpufreq_cdev->max_level; 426 return 0; 427} 428 429/** 430 * cpufreq_get_cur_state - callback function to get the current cooling state. 431 * @cdev: thermal cooling device pointer. 432 * @state: fill this variable with the current cooling state. 433 * 434 * Callback for the thermal cooling device to return the cpufreq 435 * current cooling state. 436 * 437 * Return: 0 on success, an error code otherwise. 438 */ 439static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, 440 unsigned long *state) 441{ 442 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 443 444 *state = cpufreq_cdev->cpufreq_state; 445 446 return 0; 447} 448 449/** 450 * cpufreq_set_cur_state - callback function to set the current cooling state. 451 * @cdev: thermal cooling device pointer. 452 * @state: set this variable to the current cooling state. 453 * 454 * Callback for the thermal cooling device to change the cpufreq 455 * current cooling state. 456 * 457 * Return: 0 on success, an error code otherwise. 458 */ 459static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, 460 unsigned long state) 461{ 462 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 463 struct cpumask *cpus; 464 unsigned int frequency; 465 int ret; 466 467 /* Request state should be less than max_level */ 468 if (state > cpufreq_cdev->max_level) 469 return -EINVAL; 470 471 /* Check if the old cooling action is same as new cooling action */ 472 if (cpufreq_cdev->cpufreq_state == state) 473 return 0; 474 475 frequency = get_state_freq(cpufreq_cdev, state); 476 477 ret = freq_qos_update_request(&cpufreq_cdev->qos_req, frequency); 478 if (ret >= 0) { 479 cpufreq_cdev->cpufreq_state = state; 480 cpus = cpufreq_cdev->policy->related_cpus; 481 arch_update_thermal_pressure(cpus, frequency); 482 ret = 0; 483 } 484 485 return ret; 486} 487 488/* Bind cpufreq callbacks to thermal cooling device ops */ 489 490static struct thermal_cooling_device_ops cpufreq_cooling_ops = { 491 .get_max_state = cpufreq_get_max_state, 492 .get_cur_state = cpufreq_get_cur_state, 493 .set_cur_state = cpufreq_set_cur_state, 494}; 495 496/** 497 * __cpufreq_cooling_register - helper function to create cpufreq cooling device 498 * @np: a valid struct device_node to the cooling device device tree node 499 * @policy: cpufreq policy 500 * Normally this should be same as cpufreq policy->related_cpus. 501 * @em: Energy Model of the cpufreq policy 502 * 503 * This interface function registers the cpufreq cooling device with the name 504 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 505 * cooling devices. It also gives the opportunity to link the cooling device 506 * with a device tree node, in order to bind it via the thermal DT code. 507 * 508 * Return: a valid struct thermal_cooling_device pointer on success, 509 * on failure, it returns a corresponding ERR_PTR(). 510 */ 511static struct thermal_cooling_device * 512__cpufreq_cooling_register(struct device_node *np, 513 struct cpufreq_policy *policy, 514 struct em_perf_domain *em) 515{ 516 struct thermal_cooling_device *cdev; 517 struct cpufreq_cooling_device *cpufreq_cdev; 518 unsigned int i; 519 struct device *dev; 520 int ret; 521 struct thermal_cooling_device_ops *cooling_ops; 522 char *name; 523 524 dev = get_cpu_device(policy->cpu); 525 if (unlikely(!dev)) { 526 pr_warn("No cpu device for cpu %d\n", policy->cpu); 527 return ERR_PTR(-ENODEV); 528 } 529 530 if (IS_ERR_OR_NULL(policy)) { 531 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy); 532 return ERR_PTR(-EINVAL); 533 } 534 535 i = cpufreq_table_count_valid_entries(policy); 536 if (!i) { 537 pr_debug("%s: CPUFreq table not found or has no valid entries\n", 538 __func__); 539 return ERR_PTR(-ENODEV); 540 } 541 542 cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL); 543 if (!cpufreq_cdev) 544 return ERR_PTR(-ENOMEM); 545 546 cpufreq_cdev->policy = policy; 547 548 ret = allocate_idle_time(cpufreq_cdev); 549 if (ret) { 550 cdev = ERR_PTR(ret); 551 goto free_cdev; 552 } 553 554 /* max_level is an index, not a counter */ 555 cpufreq_cdev->max_level = i - 1; 556 557 cooling_ops = &cpufreq_cooling_ops; 558 559#ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 560 if (em_is_sane(cpufreq_cdev, em)) { 561 cpufreq_cdev->em = em; 562 cooling_ops->get_requested_power = cpufreq_get_requested_power; 563 cooling_ops->state2power = cpufreq_state2power; 564 cooling_ops->power2state = cpufreq_power2state; 565 } else 566#endif 567 if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) { 568 pr_err("%s: unsorted frequency tables are not supported\n", 569 __func__); 570 cdev = ERR_PTR(-EINVAL); 571 goto free_idle_time; 572 } 573 574 ret = freq_qos_add_request(&policy->constraints, 575 &cpufreq_cdev->qos_req, FREQ_QOS_MAX, 576 get_state_freq(cpufreq_cdev, 0)); 577 if (ret < 0) { 578 pr_err("%s: Failed to add freq constraint (%d)\n", __func__, 579 ret); 580 cdev = ERR_PTR(ret); 581 goto free_idle_time; 582 } 583 584 cdev = ERR_PTR(-ENOMEM); 585 name = kasprintf(GFP_KERNEL, "cpufreq-%s", dev_name(dev)); 586 if (!name) 587 goto remove_qos_req; 588 589 cdev = thermal_of_cooling_device_register(np, name, cpufreq_cdev, 590 cooling_ops); 591 kfree(name); 592 593 if (IS_ERR(cdev)) 594 goto remove_qos_req; 595 596 return cdev; 597 598remove_qos_req: 599 freq_qos_remove_request(&cpufreq_cdev->qos_req); 600free_idle_time: 601 free_idle_time(cpufreq_cdev); 602free_cdev: 603 kfree(cpufreq_cdev); 604 return cdev; 605} 606 607/** 608 * cpufreq_cooling_register - function to create cpufreq cooling device. 609 * @policy: cpufreq policy 610 * 611 * This interface function registers the cpufreq cooling device with the name 612 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 613 * cooling devices. 614 * 615 * Return: a valid struct thermal_cooling_device pointer on success, 616 * on failure, it returns a corresponding ERR_PTR(). 617 */ 618struct thermal_cooling_device * 619cpufreq_cooling_register(struct cpufreq_policy *policy) 620{ 621 return __cpufreq_cooling_register(NULL, policy, NULL); 622} 623EXPORT_SYMBOL_GPL(cpufreq_cooling_register); 624 625/** 626 * of_cpufreq_cooling_register - function to create cpufreq cooling device. 627 * @policy: cpufreq policy 628 * 629 * This interface function registers the cpufreq cooling device with the name 630 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq 631 * cooling devices. Using this API, the cpufreq cooling device will be 632 * linked to the device tree node provided. 633 * 634 * Using this function, the cooling device will implement the power 635 * extensions by using a simple cpu power model. The cpus must have 636 * registered their OPPs using the OPP library. 637 * 638 * It also takes into account, if property present in policy CPU node, the 639 * static power consumed by the cpu. 640 * 641 * Return: a valid struct thermal_cooling_device pointer on success, 642 * and NULL on failure. 643 */ 644struct thermal_cooling_device * 645of_cpufreq_cooling_register(struct cpufreq_policy *policy) 646{ 647 struct device_node *np = of_get_cpu_node(policy->cpu, NULL); 648 struct thermal_cooling_device *cdev = NULL; 649 650 if (!np) { 651 pr_err("cpufreq_cooling: OF node not available for cpu%d\n", 652 policy->cpu); 653 return NULL; 654 } 655 656 if (of_find_property(np, "#cooling-cells", NULL)) { 657 struct em_perf_domain *em = em_cpu_get(policy->cpu); 658 659 cdev = __cpufreq_cooling_register(np, policy, em); 660 if (IS_ERR(cdev)) { 661 pr_err("cpufreq_cooling: cpu%d failed to register as cooling device: %ld\n", 662 policy->cpu, PTR_ERR(cdev)); 663 cdev = NULL; 664 } 665 } 666 667 of_node_put(np); 668 return cdev; 669} 670EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); 671 672/** 673 * cpufreq_cooling_unregister - function to remove cpufreq cooling device. 674 * @cdev: thermal cooling device pointer. 675 * 676 * This interface function unregisters the "thermal-cpufreq-%x" cooling device. 677 */ 678void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 679{ 680 struct cpufreq_cooling_device *cpufreq_cdev; 681 682 if (!cdev) 683 return; 684 685 cpufreq_cdev = cdev->devdata; 686 687 thermal_cooling_device_unregister(cdev); 688 freq_qos_remove_request(&cpufreq_cdev->qos_req); 689 free_idle_time(cpufreq_cdev); 690 kfree(cpufreq_cdev); 691} 692EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);