cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

percpu_counter.c (6581B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * Fast batching percpu counters.
      4 */
      5
      6#include <linux/percpu_counter.h>
      7#include <linux/mutex.h>
      8#include <linux/init.h>
      9#include <linux/cpu.h>
     10#include <linux/module.h>
     11#include <linux/debugobjects.h>
     12
     13#ifdef CONFIG_HOTPLUG_CPU
     14static LIST_HEAD(percpu_counters);
     15static DEFINE_SPINLOCK(percpu_counters_lock);
     16#endif
     17
     18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
     19
     20static const struct debug_obj_descr percpu_counter_debug_descr;
     21
     22static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
     23{
     24	struct percpu_counter *fbc = addr;
     25
     26	switch (state) {
     27	case ODEBUG_STATE_ACTIVE:
     28		percpu_counter_destroy(fbc);
     29		debug_object_free(fbc, &percpu_counter_debug_descr);
     30		return true;
     31	default:
     32		return false;
     33	}
     34}
     35
     36static const struct debug_obj_descr percpu_counter_debug_descr = {
     37	.name		= "percpu_counter",
     38	.fixup_free	= percpu_counter_fixup_free,
     39};
     40
     41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
     42{
     43	debug_object_init(fbc, &percpu_counter_debug_descr);
     44	debug_object_activate(fbc, &percpu_counter_debug_descr);
     45}
     46
     47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
     48{
     49	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
     50	debug_object_free(fbc, &percpu_counter_debug_descr);
     51}
     52
     53#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
     54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
     55{ }
     56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
     57{ }
     58#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
     59
     60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
     61{
     62	int cpu;
     63	unsigned long flags;
     64
     65	raw_spin_lock_irqsave(&fbc->lock, flags);
     66	for_each_possible_cpu(cpu) {
     67		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
     68		*pcount = 0;
     69	}
     70	fbc->count = amount;
     71	raw_spin_unlock_irqrestore(&fbc->lock, flags);
     72}
     73EXPORT_SYMBOL(percpu_counter_set);
     74
     75/*
     76 * This function is both preempt and irq safe. The former is due to explicit
     77 * preemption disable. The latter is guaranteed by the fact that the slow path
     78 * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
     79 * this_cpu_add which is irq-safe by definition. Hence there is no need muck
     80 * with irq state before calling this one
     81 */
     82void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
     83{
     84	s64 count;
     85
     86	preempt_disable();
     87	count = __this_cpu_read(*fbc->counters) + amount;
     88	if (abs(count) >= batch) {
     89		unsigned long flags;
     90		raw_spin_lock_irqsave(&fbc->lock, flags);
     91		fbc->count += count;
     92		__this_cpu_sub(*fbc->counters, count - amount);
     93		raw_spin_unlock_irqrestore(&fbc->lock, flags);
     94	} else {
     95		this_cpu_add(*fbc->counters, amount);
     96	}
     97	preempt_enable();
     98}
     99EXPORT_SYMBOL(percpu_counter_add_batch);
    100
    101/*
    102 * For percpu_counter with a big batch, the devication of its count could
    103 * be big, and there is requirement to reduce the deviation, like when the
    104 * counter's batch could be runtime decreased to get a better accuracy,
    105 * which can be achieved by running this sync function on each CPU.
    106 */
    107void percpu_counter_sync(struct percpu_counter *fbc)
    108{
    109	unsigned long flags;
    110	s64 count;
    111
    112	raw_spin_lock_irqsave(&fbc->lock, flags);
    113	count = __this_cpu_read(*fbc->counters);
    114	fbc->count += count;
    115	__this_cpu_sub(*fbc->counters, count);
    116	raw_spin_unlock_irqrestore(&fbc->lock, flags);
    117}
    118EXPORT_SYMBOL(percpu_counter_sync);
    119
    120/*
    121 * Add up all the per-cpu counts, return the result.  This is a more accurate
    122 * but much slower version of percpu_counter_read_positive()
    123 */
    124s64 __percpu_counter_sum(struct percpu_counter *fbc)
    125{
    126	s64 ret;
    127	int cpu;
    128	unsigned long flags;
    129
    130	raw_spin_lock_irqsave(&fbc->lock, flags);
    131	ret = fbc->count;
    132	for_each_online_cpu(cpu) {
    133		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
    134		ret += *pcount;
    135	}
    136	raw_spin_unlock_irqrestore(&fbc->lock, flags);
    137	return ret;
    138}
    139EXPORT_SYMBOL(__percpu_counter_sum);
    140
    141int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
    142			  struct lock_class_key *key)
    143{
    144	unsigned long flags __maybe_unused;
    145
    146	raw_spin_lock_init(&fbc->lock);
    147	lockdep_set_class(&fbc->lock, key);
    148	fbc->count = amount;
    149	fbc->counters = alloc_percpu_gfp(s32, gfp);
    150	if (!fbc->counters)
    151		return -ENOMEM;
    152
    153	debug_percpu_counter_activate(fbc);
    154
    155#ifdef CONFIG_HOTPLUG_CPU
    156	INIT_LIST_HEAD(&fbc->list);
    157	spin_lock_irqsave(&percpu_counters_lock, flags);
    158	list_add(&fbc->list, &percpu_counters);
    159	spin_unlock_irqrestore(&percpu_counters_lock, flags);
    160#endif
    161	return 0;
    162}
    163EXPORT_SYMBOL(__percpu_counter_init);
    164
    165void percpu_counter_destroy(struct percpu_counter *fbc)
    166{
    167	unsigned long flags __maybe_unused;
    168
    169	if (!fbc->counters)
    170		return;
    171
    172	debug_percpu_counter_deactivate(fbc);
    173
    174#ifdef CONFIG_HOTPLUG_CPU
    175	spin_lock_irqsave(&percpu_counters_lock, flags);
    176	list_del(&fbc->list);
    177	spin_unlock_irqrestore(&percpu_counters_lock, flags);
    178#endif
    179	free_percpu(fbc->counters);
    180	fbc->counters = NULL;
    181}
    182EXPORT_SYMBOL(percpu_counter_destroy);
    183
    184int percpu_counter_batch __read_mostly = 32;
    185EXPORT_SYMBOL(percpu_counter_batch);
    186
    187static int compute_batch_value(unsigned int cpu)
    188{
    189	int nr = num_online_cpus();
    190
    191	percpu_counter_batch = max(32, nr*2);
    192	return 0;
    193}
    194
    195static int percpu_counter_cpu_dead(unsigned int cpu)
    196{
    197#ifdef CONFIG_HOTPLUG_CPU
    198	struct percpu_counter *fbc;
    199
    200	compute_batch_value(cpu);
    201
    202	spin_lock_irq(&percpu_counters_lock);
    203	list_for_each_entry(fbc, &percpu_counters, list) {
    204		s32 *pcount;
    205
    206		raw_spin_lock(&fbc->lock);
    207		pcount = per_cpu_ptr(fbc->counters, cpu);
    208		fbc->count += *pcount;
    209		*pcount = 0;
    210		raw_spin_unlock(&fbc->lock);
    211	}
    212	spin_unlock_irq(&percpu_counters_lock);
    213#endif
    214	return 0;
    215}
    216
    217/*
    218 * Compare counter against given value.
    219 * Return 1 if greater, 0 if equal and -1 if less
    220 */
    221int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
    222{
    223	s64	count;
    224
    225	count = percpu_counter_read(fbc);
    226	/* Check to see if rough count will be sufficient for comparison */
    227	if (abs(count - rhs) > (batch * num_online_cpus())) {
    228		if (count > rhs)
    229			return 1;
    230		else
    231			return -1;
    232	}
    233	/* Need to use precise count */
    234	count = percpu_counter_sum(fbc);
    235	if (count > rhs)
    236		return 1;
    237	else if (count < rhs)
    238		return -1;
    239	else
    240		return 0;
    241}
    242EXPORT_SYMBOL(__percpu_counter_compare);
    243
    244static int __init percpu_counter_startup(void)
    245{
    246	int ret;
    247
    248	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
    249				compute_batch_value, NULL);
    250	WARN_ON(ret < 0);
    251	ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
    252					"lib/percpu_cnt:dead", NULL,
    253					percpu_counter_cpu_dead);
    254	WARN_ON(ret < 0);
    255	return 0;
    256}
    257module_init(percpu_counter_startup);