cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cpumask.c (7226B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/slab.h>
      3#include <linux/kernel.h>
      4#include <linux/bitops.h>
      5#include <linux/cpumask.h>
      6#include <linux/export.h>
      7#include <linux/memblock.h>
      8#include <linux/numa.h>
      9
     10/**
     11 * cpumask_next - get the next cpu in a cpumask
     12 * @n: the cpu prior to the place to search (ie. return will be > @n)
     13 * @srcp: the cpumask pointer
     14 *
     15 * Returns >= nr_cpu_ids if no further cpus set.
     16 */
     17unsigned int cpumask_next(int n, const struct cpumask *srcp)
     18{
     19	/* -1 is a legal arg here. */
     20	if (n != -1)
     21		cpumask_check(n);
     22	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
     23}
     24EXPORT_SYMBOL(cpumask_next);
     25
     26/**
     27 * cpumask_next_and - get the next cpu in *src1p & *src2p
     28 * @n: the cpu prior to the place to search (ie. return will be > @n)
     29 * @src1p: the first cpumask pointer
     30 * @src2p: the second cpumask pointer
     31 *
     32 * Returns >= nr_cpu_ids if no further cpus set in both.
     33 */
     34int cpumask_next_and(int n, const struct cpumask *src1p,
     35		     const struct cpumask *src2p)
     36{
     37	/* -1 is a legal arg here. */
     38	if (n != -1)
     39		cpumask_check(n);
     40	return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
     41		nr_cpumask_bits, n + 1);
     42}
     43EXPORT_SYMBOL(cpumask_next_and);
     44
     45/**
     46 * cpumask_any_but - return a "random" in a cpumask, but not this one.
     47 * @mask: the cpumask to search
     48 * @cpu: the cpu to ignore.
     49 *
     50 * Often used to find any cpu but smp_processor_id() in a mask.
     51 * Returns >= nr_cpu_ids if no cpus set.
     52 */
     53int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
     54{
     55	unsigned int i;
     56
     57	cpumask_check(cpu);
     58	for_each_cpu(i, mask)
     59		if (i != cpu)
     60			break;
     61	return i;
     62}
     63EXPORT_SYMBOL(cpumask_any_but);
     64
     65/**
     66 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
     67 * @n: the cpu prior to the place to search
     68 * @mask: the cpumask pointer
     69 * @start: the start point of the iteration
     70 * @wrap: assume @n crossing @start terminates the iteration
     71 *
     72 * Returns >= nr_cpu_ids on completion
     73 *
     74 * Note: the @wrap argument is required for the start condition when
     75 * we cannot assume @start is set in @mask.
     76 */
     77int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
     78{
     79	int next;
     80
     81again:
     82	next = cpumask_next(n, mask);
     83
     84	if (wrap && n < start && next >= start) {
     85		return nr_cpumask_bits;
     86
     87	} else if (next >= nr_cpumask_bits) {
     88		wrap = true;
     89		n = -1;
     90		goto again;
     91	}
     92
     93	return next;
     94}
     95EXPORT_SYMBOL(cpumask_next_wrap);
     96
     97/* These are not inline because of header tangles. */
     98#ifdef CONFIG_CPUMASK_OFFSTACK
     99/**
    100 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
    101 * @mask: pointer to cpumask_var_t where the cpumask is returned
    102 * @flags: GFP_ flags
    103 *
    104 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
    105 * a nop returning a constant 1 (in <linux/cpumask.h>)
    106 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
    107 *
    108 * In addition, mask will be NULL if this fails.  Note that gcc is
    109 * usually smart enough to know that mask can never be NULL if
    110 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
    111 * too.
    112 */
    113bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
    114{
    115	*mask = kmalloc_node(cpumask_size(), flags, node);
    116
    117#ifdef CONFIG_DEBUG_PER_CPU_MAPS
    118	if (!*mask) {
    119		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
    120		dump_stack();
    121	}
    122#endif
    123
    124	return *mask != NULL;
    125}
    126EXPORT_SYMBOL(alloc_cpumask_var_node);
    127
    128bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
    129{
    130	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
    131}
    132EXPORT_SYMBOL(zalloc_cpumask_var_node);
    133
    134/**
    135 * alloc_cpumask_var - allocate a struct cpumask
    136 * @mask: pointer to cpumask_var_t where the cpumask is returned
    137 * @flags: GFP_ flags
    138 *
    139 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
    140 * a nop returning a constant 1 (in <linux/cpumask.h>).
    141 *
    142 * See alloc_cpumask_var_node.
    143 */
    144bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
    145{
    146	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
    147}
    148EXPORT_SYMBOL(alloc_cpumask_var);
    149
    150bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
    151{
    152	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
    153}
    154EXPORT_SYMBOL(zalloc_cpumask_var);
    155
    156/**
    157 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
    158 * @mask: pointer to cpumask_var_t where the cpumask is returned
    159 *
    160 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
    161 * a nop (in <linux/cpumask.h>).
    162 * Either returns an allocated (zero-filled) cpumask, or causes the
    163 * system to panic.
    164 */
    165void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
    166{
    167	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
    168	if (!*mask)
    169		panic("%s: Failed to allocate %u bytes\n", __func__,
    170		      cpumask_size());
    171}
    172
    173/**
    174 * free_cpumask_var - frees memory allocated for a struct cpumask.
    175 * @mask: cpumask to free
    176 *
    177 * This is safe on a NULL mask.
    178 */
    179void free_cpumask_var(cpumask_var_t mask)
    180{
    181	kfree(mask);
    182}
    183EXPORT_SYMBOL(free_cpumask_var);
    184
    185/**
    186 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
    187 * @mask: cpumask to free
    188 */
    189void __init free_bootmem_cpumask_var(cpumask_var_t mask)
    190{
    191	memblock_free(mask, cpumask_size());
    192}
    193#endif
    194
    195/**
    196 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
    197 * @i: index number
    198 * @node: local numa_node
    199 *
    200 * This function selects an online CPU according to a numa aware policy;
    201 * local cpus are returned first, followed by non-local ones, then it
    202 * wraps around.
    203 *
    204 * It's not very efficient, but useful for setup.
    205 */
    206unsigned int cpumask_local_spread(unsigned int i, int node)
    207{
    208	int cpu;
    209
    210	/* Wrap: we always want a cpu. */
    211	i %= num_online_cpus();
    212
    213	if (node == NUMA_NO_NODE) {
    214		for_each_cpu(cpu, cpu_online_mask)
    215			if (i-- == 0)
    216				return cpu;
    217	} else {
    218		/* NUMA first. */
    219		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
    220			if (i-- == 0)
    221				return cpu;
    222
    223		for_each_cpu(cpu, cpu_online_mask) {
    224			/* Skip NUMA nodes, done above. */
    225			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
    226				continue;
    227
    228			if (i-- == 0)
    229				return cpu;
    230		}
    231	}
    232	BUG();
    233}
    234EXPORT_SYMBOL(cpumask_local_spread);
    235
    236static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
    237
    238/**
    239 * Returns an arbitrary cpu within srcp1 & srcp2.
    240 *
    241 * Iterated calls using the same srcp1 and srcp2 will be distributed within
    242 * their intersection.
    243 *
    244 * Returns >= nr_cpu_ids if the intersection is empty.
    245 */
    246int cpumask_any_and_distribute(const struct cpumask *src1p,
    247			       const struct cpumask *src2p)
    248{
    249	int next, prev;
    250
    251	/* NOTE: our first selection will skip 0. */
    252	prev = __this_cpu_read(distribute_cpu_mask_prev);
    253
    254	next = cpumask_next_and(prev, src1p, src2p);
    255	if (next >= nr_cpu_ids)
    256		next = cpumask_first_and(src1p, src2p);
    257
    258	if (next < nr_cpu_ids)
    259		__this_cpu_write(distribute_cpu_mask_prev, next);
    260
    261	return next;
    262}
    263EXPORT_SYMBOL(cpumask_any_and_distribute);
    264
    265int cpumask_any_distribute(const struct cpumask *srcp)
    266{
    267	int next, prev;
    268
    269	/* NOTE: our first selection will skip 0. */
    270	prev = __this_cpu_read(distribute_cpu_mask_prev);
    271
    272	next = cpumask_next(prev, srcp);
    273	if (next >= nr_cpu_ids)
    274		next = cpumask_first(srcp);
    275
    276	if (next < nr_cpu_ids)
    277		__this_cpu_write(distribute_cpu_mask_prev, next);
    278
    279	return next;
    280}
    281EXPORT_SYMBOL(cpumask_any_distribute);