cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

processor_thermal.c (6069B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
      4 *
      5 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
      6 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
      7 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
      8 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
      9 *  			- Added processor hotplug support
     10 */
     11
     12#include <linux/kernel.h>
     13#include <linux/module.h>
     14#include <linux/init.h>
     15#include <linux/cpufreq.h>
     16#include <linux/acpi.h>
     17#include <acpi/processor.h>
     18#include <linux/uaccess.h>
     19
     20#ifdef CONFIG_CPU_FREQ
     21
     22/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
     23 * offers (in most cases) voltage scaling in addition to frequency scaling, and
     24 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
     25 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
     26 */
     27
     28#define CPUFREQ_THERMAL_MIN_STEP 0
     29#define CPUFREQ_THERMAL_MAX_STEP 3
     30
     31static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
     32
     33#define reduction_pctg(cpu) \
     34	per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
     35
     36/*
     37 * Emulate "per package data" using per cpu data (which should really be
     38 * provided elsewhere)
     39 *
     40 * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
     41 * temporarily. Fortunately that's not a big issue here (I hope)
     42 */
     43static int phys_package_first_cpu(int cpu)
     44{
     45	int i;
     46	int id = topology_physical_package_id(cpu);
     47
     48	for_each_online_cpu(i)
     49		if (topology_physical_package_id(i) == id)
     50			return i;
     51	return 0;
     52}
     53
     54static int cpu_has_cpufreq(unsigned int cpu)
     55{
     56	struct cpufreq_policy *policy;
     57
     58	if (!acpi_processor_cpufreq_init)
     59		return 0;
     60
     61	policy = cpufreq_cpu_get(cpu);
     62	if (policy) {
     63		cpufreq_cpu_put(policy);
     64		return 1;
     65	}
     66	return 0;
     67}
     68
     69static int cpufreq_get_max_state(unsigned int cpu)
     70{
     71	if (!cpu_has_cpufreq(cpu))
     72		return 0;
     73
     74	return CPUFREQ_THERMAL_MAX_STEP;
     75}
     76
     77static int cpufreq_get_cur_state(unsigned int cpu)
     78{
     79	if (!cpu_has_cpufreq(cpu))
     80		return 0;
     81
     82	return reduction_pctg(cpu);
     83}
     84
     85static int cpufreq_set_cur_state(unsigned int cpu, int state)
     86{
     87	struct cpufreq_policy *policy;
     88	struct acpi_processor *pr;
     89	unsigned long max_freq;
     90	int i, ret;
     91
     92	if (!cpu_has_cpufreq(cpu))
     93		return 0;
     94
     95	reduction_pctg(cpu) = state;
     96
     97	/*
     98	 * Update all the CPUs in the same package because they all
     99	 * contribute to the temperature and often share the same
    100	 * frequency.
    101	 */
    102	for_each_online_cpu(i) {
    103		if (topology_physical_package_id(i) !=
    104		    topology_physical_package_id(cpu))
    105			continue;
    106
    107		pr = per_cpu(processors, i);
    108
    109		if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
    110			continue;
    111
    112		policy = cpufreq_cpu_get(i);
    113		if (!policy)
    114			return -EINVAL;
    115
    116		max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
    117
    118		cpufreq_cpu_put(policy);
    119
    120		ret = freq_qos_update_request(&pr->thermal_req, max_freq);
    121		if (ret < 0) {
    122			pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
    123				pr->id, ret);
    124		}
    125	}
    126	return 0;
    127}
    128
    129void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
    130{
    131	unsigned int cpu;
    132
    133	for_each_cpu(cpu, policy->related_cpus) {
    134		struct acpi_processor *pr = per_cpu(processors, cpu);
    135		int ret;
    136
    137		if (!pr)
    138			continue;
    139
    140		ret = freq_qos_add_request(&policy->constraints,
    141					   &pr->thermal_req,
    142					   FREQ_QOS_MAX, INT_MAX);
    143		if (ret < 0)
    144			pr_err("Failed to add freq constraint for CPU%d (%d)\n",
    145			       cpu, ret);
    146	}
    147}
    148
    149void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
    150{
    151	unsigned int cpu;
    152
    153	for_each_cpu(cpu, policy->related_cpus) {
    154		struct acpi_processor *pr = per_cpu(processors, policy->cpu);
    155
    156		if (pr)
    157			freq_qos_remove_request(&pr->thermal_req);
    158	}
    159}
    160#else				/* ! CONFIG_CPU_FREQ */
    161static int cpufreq_get_max_state(unsigned int cpu)
    162{
    163	return 0;
    164}
    165
    166static int cpufreq_get_cur_state(unsigned int cpu)
    167{
    168	return 0;
    169}
    170
    171static int cpufreq_set_cur_state(unsigned int cpu, int state)
    172{
    173	return 0;
    174}
    175
    176#endif
    177
    178/* thermal cooling device callbacks */
    179static int acpi_processor_max_state(struct acpi_processor *pr)
    180{
    181	int max_state = 0;
    182
    183	/*
    184	 * There exists four states according to
    185	 * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
    186	 */
    187	max_state += cpufreq_get_max_state(pr->id);
    188	if (pr->flags.throttling)
    189		max_state += (pr->throttling.state_count -1);
    190
    191	return max_state;
    192}
    193static int
    194processor_get_max_state(struct thermal_cooling_device *cdev,
    195			unsigned long *state)
    196{
    197	struct acpi_device *device = cdev->devdata;
    198	struct acpi_processor *pr;
    199
    200	if (!device)
    201		return -EINVAL;
    202
    203	pr = acpi_driver_data(device);
    204	if (!pr)
    205		return -EINVAL;
    206
    207	*state = acpi_processor_max_state(pr);
    208	return 0;
    209}
    210
    211static int
    212processor_get_cur_state(struct thermal_cooling_device *cdev,
    213			unsigned long *cur_state)
    214{
    215	struct acpi_device *device = cdev->devdata;
    216	struct acpi_processor *pr;
    217
    218	if (!device)
    219		return -EINVAL;
    220
    221	pr = acpi_driver_data(device);
    222	if (!pr)
    223		return -EINVAL;
    224
    225	*cur_state = cpufreq_get_cur_state(pr->id);
    226	if (pr->flags.throttling)
    227		*cur_state += pr->throttling.state;
    228	return 0;
    229}
    230
    231static int
    232processor_set_cur_state(struct thermal_cooling_device *cdev,
    233			unsigned long state)
    234{
    235	struct acpi_device *device = cdev->devdata;
    236	struct acpi_processor *pr;
    237	int result = 0;
    238	int max_pstate;
    239
    240	if (!device)
    241		return -EINVAL;
    242
    243	pr = acpi_driver_data(device);
    244	if (!pr)
    245		return -EINVAL;
    246
    247	max_pstate = cpufreq_get_max_state(pr->id);
    248
    249	if (state > acpi_processor_max_state(pr))
    250		return -EINVAL;
    251
    252	if (state <= max_pstate) {
    253		if (pr->flags.throttling && pr->throttling.state)
    254			result = acpi_processor_set_throttling(pr, 0, false);
    255		cpufreq_set_cur_state(pr->id, state);
    256	} else {
    257		cpufreq_set_cur_state(pr->id, max_pstate);
    258		result = acpi_processor_set_throttling(pr,
    259				state - max_pstate, false);
    260	}
    261	return result;
    262}
    263
    264const struct thermal_cooling_device_ops processor_cooling_ops = {
    265	.get_max_state = processor_get_max_state,
    266	.get_cur_state = processor_get_cur_state,
    267	.set_cur_state = processor_set_cur_state,
    268};