cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

topology.h (6846B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_SCHED_TOPOLOGY_H
      3#define _LINUX_SCHED_TOPOLOGY_H
      4
      5#include <linux/topology.h>
      6
      7#include <linux/sched/idle.h>
      8
      9/*
     10 * sched-domains (multiprocessor balancing) declarations:
     11 */
     12#ifdef CONFIG_SMP
     13
     14/* Generate SD flag indexes */
     15#define SD_FLAG(name, mflags) __##name,
     16enum {
     17	#include <linux/sched/sd_flags.h>
     18	__SD_FLAG_CNT,
     19};
     20#undef SD_FLAG
     21/* Generate SD flag bits */
     22#define SD_FLAG(name, mflags) name = 1 << __##name,
     23enum {
     24	#include <linux/sched/sd_flags.h>
     25};
     26#undef SD_FLAG
     27
     28#ifdef CONFIG_SCHED_DEBUG
     29
     30struct sd_flag_debug {
     31	unsigned int meta_flags;
     32	char *name;
     33};
     34extern const struct sd_flag_debug sd_flag_debug[];
     35
     36#endif
     37
     38#ifdef CONFIG_SCHED_SMT
     39static inline int cpu_smt_flags(void)
     40{
     41	return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
     42}
     43#endif
     44
     45#ifdef CONFIG_SCHED_CLUSTER
     46static inline int cpu_cluster_flags(void)
     47{
     48	return SD_SHARE_PKG_RESOURCES;
     49}
     50#endif
     51
     52#ifdef CONFIG_SCHED_MC
     53static inline int cpu_core_flags(void)
     54{
     55	return SD_SHARE_PKG_RESOURCES;
     56}
     57#endif
     58
     59#ifdef CONFIG_NUMA
     60static inline int cpu_numa_flags(void)
     61{
     62	return SD_NUMA;
     63}
     64#endif
     65
     66extern int arch_asym_cpu_priority(int cpu);
     67
     68struct sched_domain_attr {
     69	int relax_domain_level;
     70};
     71
     72#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
     73	.relax_domain_level = -1,			\
     74}
     75
     76extern int sched_domain_level_max;
     77
     78struct sched_group;
     79
     80struct sched_domain_shared {
     81	atomic_t	ref;
     82	atomic_t	nr_busy_cpus;
     83	int		has_idle_cores;
     84};
     85
     86struct sched_domain {
     87	/* These fields must be setup */
     88	struct sched_domain __rcu *parent;	/* top domain must be null terminated */
     89	struct sched_domain __rcu *child;	/* bottom domain must be null terminated */
     90	struct sched_group *groups;	/* the balancing groups of the domain */
     91	unsigned long min_interval;	/* Minimum balance interval ms */
     92	unsigned long max_interval;	/* Maximum balance interval ms */
     93	unsigned int busy_factor;	/* less balancing by factor if busy */
     94	unsigned int imbalance_pct;	/* No balance until over watermark */
     95	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
     96	unsigned int imb_numa_nr;	/* Nr running tasks that allows a NUMA imbalance */
     97
     98	int nohz_idle;			/* NOHZ IDLE status */
     99	int flags;			/* See SD_* */
    100	int level;
    101
    102	/* Runtime fields. */
    103	unsigned long last_balance;	/* init to jiffies. units in jiffies */
    104	unsigned int balance_interval;	/* initialise to 1. units in ms. */
    105	unsigned int nr_balance_failed; /* initialise to 0 */
    106
    107	/* idle_balance() stats */
    108	u64 max_newidle_lb_cost;
    109	unsigned long last_decay_max_lb_cost;
    110
    111	u64 avg_scan_cost;		/* select_idle_sibling */
    112
    113#ifdef CONFIG_SCHEDSTATS
    114	/* load_balance() stats */
    115	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
    116	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
    117	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
    118	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
    119	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
    120	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
    121	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
    122	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
    123
    124	/* Active load balancing */
    125	unsigned int alb_count;
    126	unsigned int alb_failed;
    127	unsigned int alb_pushed;
    128
    129	/* SD_BALANCE_EXEC stats */
    130	unsigned int sbe_count;
    131	unsigned int sbe_balanced;
    132	unsigned int sbe_pushed;
    133
    134	/* SD_BALANCE_FORK stats */
    135	unsigned int sbf_count;
    136	unsigned int sbf_balanced;
    137	unsigned int sbf_pushed;
    138
    139	/* try_to_wake_up() stats */
    140	unsigned int ttwu_wake_remote;
    141	unsigned int ttwu_move_affine;
    142	unsigned int ttwu_move_balance;
    143#endif
    144#ifdef CONFIG_SCHED_DEBUG
    145	char *name;
    146#endif
    147	union {
    148		void *private;		/* used during construction */
    149		struct rcu_head rcu;	/* used during destruction */
    150	};
    151	struct sched_domain_shared *shared;
    152
    153	unsigned int span_weight;
    154	/*
    155	 * Span of all CPUs in this domain.
    156	 *
    157	 * NOTE: this field is variable length. (Allocated dynamically
    158	 * by attaching extra space to the end of the structure,
    159	 * depending on how many CPUs the kernel has booted up with)
    160	 */
    161	unsigned long span[];
    162};
    163
    164static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
    165{
    166	return to_cpumask(sd->span);
    167}
    168
    169extern void partition_sched_domains_locked(int ndoms_new,
    170					   cpumask_var_t doms_new[],
    171					   struct sched_domain_attr *dattr_new);
    172
    173extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
    174				    struct sched_domain_attr *dattr_new);
    175
    176/* Allocate an array of sched domains, for partition_sched_domains(). */
    177cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
    178void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
    179
    180bool cpus_share_cache(int this_cpu, int that_cpu);
    181
    182typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
    183typedef int (*sched_domain_flags_f)(void);
    184
    185#define SDTL_OVERLAP	0x01
    186
    187struct sd_data {
    188	struct sched_domain *__percpu *sd;
    189	struct sched_domain_shared *__percpu *sds;
    190	struct sched_group *__percpu *sg;
    191	struct sched_group_capacity *__percpu *sgc;
    192};
    193
    194struct sched_domain_topology_level {
    195	sched_domain_mask_f mask;
    196	sched_domain_flags_f sd_flags;
    197	int		    flags;
    198	int		    numa_level;
    199	struct sd_data      data;
    200#ifdef CONFIG_SCHED_DEBUG
    201	char                *name;
    202#endif
    203};
    204
    205extern void set_sched_topology(struct sched_domain_topology_level *tl);
    206
    207#ifdef CONFIG_SCHED_DEBUG
    208# define SD_INIT_NAME(type)		.name = #type
    209#else
    210# define SD_INIT_NAME(type)
    211#endif
    212
    213#else /* CONFIG_SMP */
    214
    215struct sched_domain_attr;
    216
    217static inline void
    218partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
    219			       struct sched_domain_attr *dattr_new)
    220{
    221}
    222
    223static inline void
    224partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
    225			struct sched_domain_attr *dattr_new)
    226{
    227}
    228
    229static inline bool cpus_share_cache(int this_cpu, int that_cpu)
    230{
    231	return true;
    232}
    233
    234#endif	/* !CONFIG_SMP */
    235
    236#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
    237extern void rebuild_sched_domains_energy(void);
    238#else
    239static inline void rebuild_sched_domains_energy(void)
    240{
    241}
    242#endif
    243
    244#ifndef arch_scale_cpu_capacity
    245/**
    246 * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
    247 * @cpu: the CPU in question.
    248 *
    249 * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
    250 *
    251 *             max_perf(cpu)
    252 *      ----------------------------- * SCHED_CAPACITY_SCALE
    253 *      max(max_perf(c) : c \in CPUs)
    254 */
    255static __always_inline
    256unsigned long arch_scale_cpu_capacity(int cpu)
    257{
    258	return SCHED_CAPACITY_SCALE;
    259}
    260#endif
    261
    262#ifndef arch_scale_thermal_pressure
    263static __always_inline
    264unsigned long arch_scale_thermal_pressure(int cpu)
    265{
    266	return 0;
    267}
    268#endif
    269
    270#ifndef arch_update_thermal_pressure
    271static __always_inline
    272void arch_update_thermal_pressure(const struct cpumask *cpus,
    273				  unsigned long capped_frequency)
    274{ }
    275#endif
    276
    277static inline int task_node(const struct task_struct *p)
    278{
    279	return cpu_to_node(task_cpu(p));
    280}
    281
    282#endif /* _LINUX_SCHED_TOPOLOGY_H */