cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

energy_model.h (10972B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_ENERGY_MODEL_H
      3#define _LINUX_ENERGY_MODEL_H
      4#include <linux/cpumask.h>
      5#include <linux/device.h>
      6#include <linux/jump_label.h>
      7#include <linux/kobject.h>
      8#include <linux/rcupdate.h>
      9#include <linux/sched/cpufreq.h>
     10#include <linux/sched/topology.h>
     11#include <linux/types.h>
     12
     13/**
     14 * struct em_perf_state - Performance state of a performance domain
     15 * @frequency:	The frequency in KHz, for consistency with CPUFreq
     16 * @power:	The power consumed at this level (by 1 CPU or by a registered
     17 *		device). It can be a total power: static and dynamic.
     18 * @cost:	The cost coefficient associated with this level, used during
     19 *		energy calculation. Equal to: power * max_frequency / frequency
     20 * @flags:	see "em_perf_state flags" description below.
     21 */
     22struct em_perf_state {
     23	unsigned long frequency;
     24	unsigned long power;
     25	unsigned long cost;
     26	unsigned long flags;
     27};
     28
     29/*
     30 * em_perf_state flags:
     31 *
     32 * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
     33 * in this em_perf_domain, another performance state with a higher frequency
     34 * but a lower or equal power cost. Such inefficient states are ignored when
     35 * using em_pd_get_efficient_*() functions.
     36 */
     37#define EM_PERF_STATE_INEFFICIENT BIT(0)
     38
     39/**
     40 * struct em_perf_domain - Performance domain
     41 * @table:		List of performance states, in ascending order
     42 * @nr_perf_states:	Number of performance states
     43 * @flags:		See "em_perf_domain flags"
     44 * @cpus:		Cpumask covering the CPUs of the domain. It's here
     45 *			for performance reasons to avoid potential cache
     46 *			misses during energy calculations in the scheduler
     47 *			and simplifies allocating/freeing that memory region.
     48 *
     49 * In case of CPU device, a "performance domain" represents a group of CPUs
     50 * whose performance is scaled together. All CPUs of a performance domain
     51 * must have the same micro-architecture. Performance domains often have
     52 * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
     53 * field is unused.
     54 */
     55struct em_perf_domain {
     56	struct em_perf_state *table;
     57	int nr_perf_states;
     58	unsigned long flags;
     59	unsigned long cpus[];
     60};
     61
     62/*
     63 *  em_perf_domain flags:
     64 *
     65 *  EM_PERF_DOMAIN_MILLIWATTS: The power values are in milli-Watts or some
     66 *  other scale.
     67 *
     68 *  EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
     69 *  energy consumption.
     70 *
     71 *  EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
     72 *  created by platform missing real power information
     73 */
     74#define EM_PERF_DOMAIN_MILLIWATTS BIT(0)
     75#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
     76#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
     77
     78#define em_span_cpus(em) (to_cpumask((em)->cpus))
     79#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
     80
     81#ifdef CONFIG_ENERGY_MODEL
     82#define EM_MAX_POWER 0xFFFF
     83
     84/*
     85 * Increase resolution of energy estimation calculations for 64-bit
     86 * architectures. The extra resolution improves decision made by EAS for the
     87 * task placement when two Performance Domains might provide similar energy
     88 * estimation values (w/o better resolution the values could be equal).
     89 *
     90 * We increase resolution only if we have enough bits to allow this increased
     91 * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
     92 * are pretty high and the returns do not justify the increased costs.
     93 */
     94#ifdef CONFIG_64BIT
     95#define em_scale_power(p) ((p) * 1000)
     96#else
     97#define em_scale_power(p) (p)
     98#endif
     99
    100struct em_data_callback {
    101	/**
    102	 * active_power() - Provide power at the next performance state of
    103	 *		a device
    104	 * @dev		: Device for which we do this operation (can be a CPU)
    105	 * @power	: Active power at the performance state
    106	 *		(modified)
    107	 * @freq	: Frequency at the performance state in kHz
    108	 *		(modified)
    109	 *
    110	 * active_power() must find the lowest performance state of 'dev' above
    111	 * 'freq' and update 'power' and 'freq' to the matching active power
    112	 * and frequency.
    113	 *
    114	 * In case of CPUs, the power is the one of a single CPU in the domain,
    115	 * expressed in milli-Watts or an abstract scale. It is expected to
    116	 * fit in the [0, EM_MAX_POWER] range.
    117	 *
    118	 * Return 0 on success.
    119	 */
    120	int (*active_power)(struct device *dev, unsigned long *power,
    121			    unsigned long *freq);
    122
    123	/**
    124	 * get_cost() - Provide the cost at the given performance state of
    125	 *		a device
    126	 * @dev		: Device for which we do this operation (can be a CPU)
    127	 * @freq	: Frequency at the performance state in kHz
    128	 * @cost	: The cost value for the performance state
    129	 *		(modified)
    130	 *
    131	 * In case of CPUs, the cost is the one of a single CPU in the domain.
    132	 * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
    133	 * usage in EAS calculation.
    134	 *
    135	 * Return 0 on success, or appropriate error value in case of failure.
    136	 */
    137	int (*get_cost)(struct device *dev, unsigned long freq,
    138			unsigned long *cost);
    139};
    140#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
    141#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb)	\
    142	{ .active_power = _active_power_cb,		\
    143	  .get_cost = _cost_cb }
    144#define EM_DATA_CB(_active_power_cb)			\
    145		EM_ADV_DATA_CB(_active_power_cb, NULL)
    146
    147struct em_perf_domain *em_cpu_get(int cpu);
    148struct em_perf_domain *em_pd_get(struct device *dev);
    149int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
    150				struct em_data_callback *cb, cpumask_t *span,
    151				bool milliwatts);
    152void em_dev_unregister_perf_domain(struct device *dev);
    153
    154/**
    155 * em_pd_get_efficient_state() - Get an efficient performance state from the EM
    156 * @pd   : Performance domain for which we want an efficient frequency
    157 * @freq : Frequency to map with the EM
    158 *
    159 * It is called from the scheduler code quite frequently and as a consequence
    160 * doesn't implement any check.
    161 *
    162 * Return: An efficient performance state, high enough to meet @freq
    163 * requirement.
    164 */
    165static inline
    166struct em_perf_state *em_pd_get_efficient_state(struct em_perf_domain *pd,
    167						unsigned long freq)
    168{
    169	struct em_perf_state *ps;
    170	int i;
    171
    172	for (i = 0; i < pd->nr_perf_states; i++) {
    173		ps = &pd->table[i];
    174		if (ps->frequency >= freq) {
    175			if (pd->flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
    176			    ps->flags & EM_PERF_STATE_INEFFICIENT)
    177				continue;
    178			break;
    179		}
    180	}
    181
    182	return ps;
    183}
    184
    185/**
    186 * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
    187 *		performance domain
    188 * @pd		: performance domain for which energy has to be estimated
    189 * @max_util	: highest utilization among CPUs of the domain
    190 * @sum_util	: sum of the utilization of all CPUs in the domain
    191 * @allowed_cpu_cap	: maximum allowed CPU capacity for the @pd, which
    192 *			  might reflect reduced frequency (due to thermal)
    193 *
    194 * This function must be used only for CPU devices. There is no validation,
    195 * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
    196 * the scheduler code quite frequently and that is why there is not checks.
    197 *
    198 * Return: the sum of the energy consumed by the CPUs of the domain assuming
    199 * a capacity state satisfying the max utilization of the domain.
    200 */
    201static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
    202				unsigned long max_util, unsigned long sum_util,
    203				unsigned long allowed_cpu_cap)
    204{
    205	unsigned long freq, scale_cpu;
    206	struct em_perf_state *ps;
    207	int cpu;
    208
    209	if (!sum_util)
    210		return 0;
    211
    212	/*
    213	 * In order to predict the performance state, map the utilization of
    214	 * the most utilized CPU of the performance domain to a requested
    215	 * frequency, like schedutil. Take also into account that the real
    216	 * frequency might be set lower (due to thermal capping). Thus, clamp
    217	 * max utilization to the allowed CPU capacity before calculating
    218	 * effective frequency.
    219	 */
    220	cpu = cpumask_first(to_cpumask(pd->cpus));
    221	scale_cpu = arch_scale_cpu_capacity(cpu);
    222	ps = &pd->table[pd->nr_perf_states - 1];
    223
    224	max_util = map_util_perf(max_util);
    225	max_util = min(max_util, allowed_cpu_cap);
    226	freq = map_util_freq(max_util, ps->frequency, scale_cpu);
    227
    228	/*
    229	 * Find the lowest performance state of the Energy Model above the
    230	 * requested frequency.
    231	 */
    232	ps = em_pd_get_efficient_state(pd, freq);
    233
    234	/*
    235	 * The capacity of a CPU in the domain at the performance state (ps)
    236	 * can be computed as:
    237	 *
    238	 *             ps->freq * scale_cpu
    239	 *   ps->cap = --------------------                          (1)
    240	 *                 cpu_max_freq
    241	 *
    242	 * So, ignoring the costs of idle states (which are not available in
    243	 * the EM), the energy consumed by this CPU at that performance state
    244	 * is estimated as:
    245	 *
    246	 *             ps->power * cpu_util
    247	 *   cpu_nrg = --------------------                          (2)
    248	 *                   ps->cap
    249	 *
    250	 * since 'cpu_util / ps->cap' represents its percentage of busy time.
    251	 *
    252	 *   NOTE: Although the result of this computation actually is in
    253	 *         units of power, it can be manipulated as an energy value
    254	 *         over a scheduling period, since it is assumed to be
    255	 *         constant during that interval.
    256	 *
    257	 * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
    258	 * of two terms:
    259	 *
    260	 *             ps->power * cpu_max_freq   cpu_util
    261	 *   cpu_nrg = ------------------------ * ---------          (3)
    262	 *                    ps->freq            scale_cpu
    263	 *
    264	 * The first term is static, and is stored in the em_perf_state struct
    265	 * as 'ps->cost'.
    266	 *
    267	 * Since all CPUs of the domain have the same micro-architecture, they
    268	 * share the same 'ps->cost', and the same CPU capacity. Hence, the
    269	 * total energy of the domain (which is the simple sum of the energy of
    270	 * all of its CPUs) can be factorized as:
    271	 *
    272	 *            ps->cost * \Sum cpu_util
    273	 *   pd_nrg = ------------------------                       (4)
    274	 *                  scale_cpu
    275	 */
    276	return ps->cost * sum_util / scale_cpu;
    277}
    278
    279/**
    280 * em_pd_nr_perf_states() - Get the number of performance states of a perf.
    281 *				domain
    282 * @pd		: performance domain for which this must be done
    283 *
    284 * Return: the number of performance states in the performance domain table
    285 */
    286static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
    287{
    288	return pd->nr_perf_states;
    289}
    290
    291#else
    292struct em_data_callback {};
    293#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
    294#define EM_DATA_CB(_active_power_cb) { }
    295#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
    296
    297static inline
    298int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
    299				struct em_data_callback *cb, cpumask_t *span,
    300				bool milliwatts)
    301{
    302	return -EINVAL;
    303}
    304static inline void em_dev_unregister_perf_domain(struct device *dev)
    305{
    306}
    307static inline struct em_perf_domain *em_cpu_get(int cpu)
    308{
    309	return NULL;
    310}
    311static inline struct em_perf_domain *em_pd_get(struct device *dev)
    312{
    313	return NULL;
    314}
    315static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
    316			unsigned long max_util, unsigned long sum_util,
    317			unsigned long allowed_cpu_cap)
    318{
    319	return 0;
    320}
    321static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
    322{
    323	return 0;
    324}
    325#endif
    326
    327#endif