cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpupri.c (8710B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 *  kernel/sched/cpupri.c
      4 *
      5 *  CPU priority management
      6 *
      7 *  Copyright (C) 2007-2008 Novell
      8 *
      9 *  Author: Gregory Haskins <ghaskins@novell.com>
     10 *
     11 *  This code tracks the priority of each CPU so that global migration
     12 *  decisions are easy to calculate.  Each CPU can be in a state as follows:
     13 *
     14 *                 (INVALID), NORMAL, RT1, ... RT99, HIGHER
     15 *
     16 *  going from the lowest priority to the highest.  CPUs in the INVALID state
     17 *  are not eligible for routing.  The system maintains this state with
     18 *  a 2 dimensional bitmap (the first for priority class, the second for CPUs
     19 *  in that class).  Therefore a typical application without affinity
     20 *  restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
     21 *  searches).  For tasks with affinity restrictions, the algorithm has a
     22 *  worst case complexity of O(min(101, nr_domcpus)), though the scenario that
     23 *  yields the worst case search is fairly contrived.
     24 */
     25
     26/*
     27 * p->rt_priority   p->prio   newpri   cpupri
     28 *
     29 *				  -1       -1 (CPUPRI_INVALID)
     30 *
     31 *				  99        0 (CPUPRI_NORMAL)
     32 *
     33 *		1        98       98        1
     34 *	      ...
     35 *	       49        50       50       49
     36 *	       50        49       49       50
     37 *	      ...
     38 *	       99         0        0       99
     39 *
     40 *				 100	  100 (CPUPRI_HIGHER)
     41 */
     42static int convert_prio(int prio)
     43{
     44	int cpupri;
     45
     46	switch (prio) {
     47	case CPUPRI_INVALID:
     48		cpupri = CPUPRI_INVALID;	/* -1 */
     49		break;
     50
     51	case 0 ... 98:
     52		cpupri = MAX_RT_PRIO-1 - prio;	/* 1 ... 99 */
     53		break;
     54
     55	case MAX_RT_PRIO-1:
     56		cpupri = CPUPRI_NORMAL;		/*  0 */
     57		break;
     58
     59	case MAX_RT_PRIO:
     60		cpupri = CPUPRI_HIGHER;		/* 100 */
     61		break;
     62	}
     63
     64	return cpupri;
     65}
     66
     67static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
     68				struct cpumask *lowest_mask, int idx)
     69{
     70	struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
     71	int skip = 0;
     72
     73	if (!atomic_read(&(vec)->count))
     74		skip = 1;
     75	/*
     76	 * When looking at the vector, we need to read the counter,
     77	 * do a memory barrier, then read the mask.
     78	 *
     79	 * Note: This is still all racy, but we can deal with it.
     80	 *  Ideally, we only want to look at masks that are set.
     81	 *
     82	 *  If a mask is not set, then the only thing wrong is that we
     83	 *  did a little more work than necessary.
     84	 *
     85	 *  If we read a zero count but the mask is set, because of the
     86	 *  memory barriers, that can only happen when the highest prio
     87	 *  task for a run queue has left the run queue, in which case,
     88	 *  it will be followed by a pull. If the task we are processing
     89	 *  fails to find a proper place to go, that pull request will
     90	 *  pull this task if the run queue is running at a lower
     91	 *  priority.
     92	 */
     93	smp_rmb();
     94
     95	/* Need to do the rmb for every iteration */
     96	if (skip)
     97		return 0;
     98
     99	if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids)
    100		return 0;
    101
    102	if (lowest_mask) {
    103		cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
    104
    105		/*
    106		 * We have to ensure that we have at least one bit
    107		 * still set in the array, since the map could have
    108		 * been concurrently emptied between the first and
    109		 * second reads of vec->mask.  If we hit this
    110		 * condition, simply act as though we never hit this
    111		 * priority level and continue on.
    112		 */
    113		if (cpumask_empty(lowest_mask))
    114			return 0;
    115	}
    116
    117	return 1;
    118}
    119
    120int cpupri_find(struct cpupri *cp, struct task_struct *p,
    121		struct cpumask *lowest_mask)
    122{
    123	return cpupri_find_fitness(cp, p, lowest_mask, NULL);
    124}
    125
    126/**
    127 * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
    128 * @cp: The cpupri context
    129 * @p: The task
    130 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
    131 * @fitness_fn: A pointer to a function to do custom checks whether the CPU
    132 *              fits a specific criteria so that we only return those CPUs.
    133 *
    134 * Note: This function returns the recommended CPUs as calculated during the
    135 * current invocation.  By the time the call returns, the CPUs may have in
    136 * fact changed priorities any number of times.  While not ideal, it is not
    137 * an issue of correctness since the normal rebalancer logic will correct
    138 * any discrepancies created by racing against the uncertainty of the current
    139 * priority configuration.
    140 *
    141 * Return: (int)bool - CPUs were found
    142 */
    143int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
    144		struct cpumask *lowest_mask,
    145		bool (*fitness_fn)(struct task_struct *p, int cpu))
    146{
    147	int task_pri = convert_prio(p->prio);
    148	int idx, cpu;
    149
    150	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
    151
    152	for (idx = 0; idx < task_pri; idx++) {
    153
    154		if (!__cpupri_find(cp, p, lowest_mask, idx))
    155			continue;
    156
    157		if (!lowest_mask || !fitness_fn)
    158			return 1;
    159
    160		/* Ensure the capacity of the CPUs fit the task */
    161		for_each_cpu(cpu, lowest_mask) {
    162			if (!fitness_fn(p, cpu))
    163				cpumask_clear_cpu(cpu, lowest_mask);
    164		}
    165
    166		/*
    167		 * If no CPU at the current priority can fit the task
    168		 * continue looking
    169		 */
    170		if (cpumask_empty(lowest_mask))
    171			continue;
    172
    173		return 1;
    174	}
    175
    176	/*
    177	 * If we failed to find a fitting lowest_mask, kick off a new search
    178	 * but without taking into account any fitness criteria this time.
    179	 *
    180	 * This rule favours honouring priority over fitting the task in the
    181	 * correct CPU (Capacity Awareness being the only user now).
    182	 * The idea is that if a higher priority task can run, then it should
    183	 * run even if this ends up being on unfitting CPU.
    184	 *
    185	 * The cost of this trade-off is not entirely clear and will probably
    186	 * be good for some workloads and bad for others.
    187	 *
    188	 * The main idea here is that if some CPUs were over-committed, we try
    189	 * to spread which is what the scheduler traditionally did. Sys admins
    190	 * must do proper RT planning to avoid overloading the system if they
    191	 * really care.
    192	 */
    193	if (fitness_fn)
    194		return cpupri_find(cp, p, lowest_mask);
    195
    196	return 0;
    197}
    198
    199/**
    200 * cpupri_set - update the CPU priority setting
    201 * @cp: The cpupri context
    202 * @cpu: The target CPU
    203 * @newpri: The priority (INVALID,NORMAL,RT1-RT99,HIGHER) to assign to this CPU
    204 *
    205 * Note: Assumes cpu_rq(cpu)->lock is locked
    206 *
    207 * Returns: (void)
    208 */
    209void cpupri_set(struct cpupri *cp, int cpu, int newpri)
    210{
    211	int *currpri = &cp->cpu_to_pri[cpu];
    212	int oldpri = *currpri;
    213	int do_mb = 0;
    214
    215	newpri = convert_prio(newpri);
    216
    217	BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
    218
    219	if (newpri == oldpri)
    220		return;
    221
    222	/*
    223	 * If the CPU was currently mapped to a different value, we
    224	 * need to map it to the new value then remove the old value.
    225	 * Note, we must add the new value first, otherwise we risk the
    226	 * cpu being missed by the priority loop in cpupri_find.
    227	 */
    228	if (likely(newpri != CPUPRI_INVALID)) {
    229		struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
    230
    231		cpumask_set_cpu(cpu, vec->mask);
    232		/*
    233		 * When adding a new vector, we update the mask first,
    234		 * do a write memory barrier, and then update the count, to
    235		 * make sure the vector is visible when count is set.
    236		 */
    237		smp_mb__before_atomic();
    238		atomic_inc(&(vec)->count);
    239		do_mb = 1;
    240	}
    241	if (likely(oldpri != CPUPRI_INVALID)) {
    242		struct cpupri_vec *vec  = &cp->pri_to_cpu[oldpri];
    243
    244		/*
    245		 * Because the order of modification of the vec->count
    246		 * is important, we must make sure that the update
    247		 * of the new prio is seen before we decrement the
    248		 * old prio. This makes sure that the loop sees
    249		 * one or the other when we raise the priority of
    250		 * the run queue. We don't care about when we lower the
    251		 * priority, as that will trigger an rt pull anyway.
    252		 *
    253		 * We only need to do a memory barrier if we updated
    254		 * the new priority vec.
    255		 */
    256		if (do_mb)
    257			smp_mb__after_atomic();
    258
    259		/*
    260		 * When removing from the vector, we decrement the counter first
    261		 * do a memory barrier and then clear the mask.
    262		 */
    263		atomic_dec(&(vec)->count);
    264		smp_mb__after_atomic();
    265		cpumask_clear_cpu(cpu, vec->mask);
    266	}
    267
    268	*currpri = newpri;
    269}
    270
    271/**
    272 * cpupri_init - initialize the cpupri structure
    273 * @cp: The cpupri context
    274 *
    275 * Return: -ENOMEM on memory allocation failure.
    276 */
    277int cpupri_init(struct cpupri *cp)
    278{
    279	int i;
    280
    281	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
    282		struct cpupri_vec *vec = &cp->pri_to_cpu[i];
    283
    284		atomic_set(&vec->count, 0);
    285		if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
    286			goto cleanup;
    287	}
    288
    289	cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL);
    290	if (!cp->cpu_to_pri)
    291		goto cleanup;
    292
    293	for_each_possible_cpu(i)
    294		cp->cpu_to_pri[i] = CPUPRI_INVALID;
    295
    296	return 0;
    297
    298cleanup:
    299	for (i--; i >= 0; i--)
    300		free_cpumask_var(cp->pri_to_cpu[i].mask);
    301	return -ENOMEM;
    302}
    303
    304/**
    305 * cpupri_cleanup - clean up the cpupri structure
    306 * @cp: The cpupri context
    307 */
    308void cpupri_cleanup(struct cpupri *cp)
    309{
    310	int i;
    311
    312	kfree(cp->cpu_to_pri);
    313	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
    314		free_cpumask_var(cp->pri_to_cpu[i].mask);
    315}