cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

flex_proportions.c (7346B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 *  Floating proportions with flexible aging period
      4 *
      5 *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
      6 *
      7 * The goal of this code is: Given different types of event, measure proportion
      8 * of each type of event over time. The proportions are measured with
      9 * exponentially decaying history to give smooth transitions. A formula
     10 * expressing proportion of event of type 'j' is:
     11 *
     12 *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
     13 *
     14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
     15 * total number of events in i-th last time period.
     16 *
     17 * Note that p_{j}'s are normalised, i.e.
     18 *
     19 *   \Sum_{j} p_{j} = 1,
     20 *
     21 * This formula can be straightforwardly computed by maintaining denominator
     22 * (let's call it 'd') and for each event type its numerator (let's call it
     23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
     24 *   n_j++; d++;
     25 *
     26 * When a new period is declared, we could do:
     27 *   d /= 2
     28 *   for each j
     29 *     n_j /= 2
     30 *
     31 * To avoid iteration over all event types, we instead shift numerator of event
     32 * j lazily when someone asks for a proportion of event j or when event j
     33 * occurs. This can bit trivially implemented by remembering last period in
     34 * which something happened with proportion of type j.
     35 */
     36#include <linux/flex_proportions.h>
     37
     38int fprop_global_init(struct fprop_global *p, gfp_t gfp)
     39{
     40	int err;
     41
     42	p->period = 0;
     43	/* Use 1 to avoid dealing with periods with 0 events... */
     44	err = percpu_counter_init(&p->events, 1, gfp);
     45	if (err)
     46		return err;
     47	seqcount_init(&p->sequence);
     48	return 0;
     49}
     50
     51void fprop_global_destroy(struct fprop_global *p)
     52{
     53	percpu_counter_destroy(&p->events);
     54}
     55
     56/*
     57 * Declare @periods new periods. It is upto the caller to make sure period
     58 * transitions cannot happen in parallel.
     59 *
     60 * The function returns true if the proportions are still defined and false
     61 * if aging zeroed out all events. This can be used to detect whether declaring
     62 * further periods has any effect.
     63 */
     64bool fprop_new_period(struct fprop_global *p, int periods)
     65{
     66	s64 events;
     67	unsigned long flags;
     68
     69	local_irq_save(flags);
     70	events = percpu_counter_sum(&p->events);
     71	/*
     72	 * Don't do anything if there are no events.
     73	 */
     74	if (events <= 1) {
     75		local_irq_restore(flags);
     76		return false;
     77	}
     78	write_seqcount_begin(&p->sequence);
     79	if (periods < 64)
     80		events -= events >> periods;
     81	/* Use addition to avoid losing events happening between sum and set */
     82	percpu_counter_add(&p->events, -events);
     83	p->period += periods;
     84	write_seqcount_end(&p->sequence);
     85	local_irq_restore(flags);
     86
     87	return true;
     88}
     89
     90/*
     91 * ---- SINGLE ----
     92 */
     93
     94int fprop_local_init_single(struct fprop_local_single *pl)
     95{
     96	pl->events = 0;
     97	pl->period = 0;
     98	raw_spin_lock_init(&pl->lock);
     99	return 0;
    100}
    101
    102void fprop_local_destroy_single(struct fprop_local_single *pl)
    103{
    104}
    105
    106static void fprop_reflect_period_single(struct fprop_global *p,
    107					struct fprop_local_single *pl)
    108{
    109	unsigned int period = p->period;
    110	unsigned long flags;
    111
    112	/* Fast path - period didn't change */
    113	if (pl->period == period)
    114		return;
    115	raw_spin_lock_irqsave(&pl->lock, flags);
    116	/* Someone updated pl->period while we were spinning? */
    117	if (pl->period >= period) {
    118		raw_spin_unlock_irqrestore(&pl->lock, flags);
    119		return;
    120	}
    121	/* Aging zeroed our fraction? */
    122	if (period - pl->period < BITS_PER_LONG)
    123		pl->events >>= period - pl->period;
    124	else
    125		pl->events = 0;
    126	pl->period = period;
    127	raw_spin_unlock_irqrestore(&pl->lock, flags);
    128}
    129
    130/* Event of type pl happened */
    131void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
    132{
    133	fprop_reflect_period_single(p, pl);
    134	pl->events++;
    135	percpu_counter_add(&p->events, 1);
    136}
    137
    138/* Return fraction of events of type pl */
    139void fprop_fraction_single(struct fprop_global *p,
    140			   struct fprop_local_single *pl,
    141			   unsigned long *numerator, unsigned long *denominator)
    142{
    143	unsigned int seq;
    144	s64 num, den;
    145
    146	do {
    147		seq = read_seqcount_begin(&p->sequence);
    148		fprop_reflect_period_single(p, pl);
    149		num = pl->events;
    150		den = percpu_counter_read_positive(&p->events);
    151	} while (read_seqcount_retry(&p->sequence, seq));
    152
    153	/*
    154	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
    155	 * counter errors
    156	 */
    157	if (den <= num) {
    158		if (num)
    159			den = num;
    160		else
    161			den = 1;
    162	}
    163	*denominator = den;
    164	*numerator = num;
    165}
    166
    167/*
    168 * ---- PERCPU ----
    169 */
    170#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
    171
    172int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
    173{
    174	int err;
    175
    176	err = percpu_counter_init(&pl->events, 0, gfp);
    177	if (err)
    178		return err;
    179	pl->period = 0;
    180	raw_spin_lock_init(&pl->lock);
    181	return 0;
    182}
    183
    184void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
    185{
    186	percpu_counter_destroy(&pl->events);
    187}
    188
    189static void fprop_reflect_period_percpu(struct fprop_global *p,
    190					struct fprop_local_percpu *pl)
    191{
    192	unsigned int period = p->period;
    193	unsigned long flags;
    194
    195	/* Fast path - period didn't change */
    196	if (pl->period == period)
    197		return;
    198	raw_spin_lock_irqsave(&pl->lock, flags);
    199	/* Someone updated pl->period while we were spinning? */
    200	if (pl->period >= period) {
    201		raw_spin_unlock_irqrestore(&pl->lock, flags);
    202		return;
    203	}
    204	/* Aging zeroed our fraction? */
    205	if (period - pl->period < BITS_PER_LONG) {
    206		s64 val = percpu_counter_read(&pl->events);
    207
    208		if (val < (nr_cpu_ids * PROP_BATCH))
    209			val = percpu_counter_sum(&pl->events);
    210
    211		percpu_counter_add_batch(&pl->events,
    212			-val + (val >> (period-pl->period)), PROP_BATCH);
    213	} else
    214		percpu_counter_set(&pl->events, 0);
    215	pl->period = period;
    216	raw_spin_unlock_irqrestore(&pl->lock, flags);
    217}
    218
    219/* Event of type pl happened */
    220void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
    221		long nr)
    222{
    223	fprop_reflect_period_percpu(p, pl);
    224	percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
    225	percpu_counter_add(&p->events, nr);
    226}
    227
    228void fprop_fraction_percpu(struct fprop_global *p,
    229			   struct fprop_local_percpu *pl,
    230			   unsigned long *numerator, unsigned long *denominator)
    231{
    232	unsigned int seq;
    233	s64 num, den;
    234
    235	do {
    236		seq = read_seqcount_begin(&p->sequence);
    237		fprop_reflect_period_percpu(p, pl);
    238		num = percpu_counter_read_positive(&pl->events);
    239		den = percpu_counter_read_positive(&p->events);
    240	} while (read_seqcount_retry(&p->sequence, seq));
    241
    242	/*
    243	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
    244	 * counter errors
    245	 */
    246	if (den <= num) {
    247		if (num)
    248			den = num;
    249		else
    250			den = 1;
    251	}
    252	*denominator = den;
    253	*numerator = num;
    254}
    255
    256/*
    257 * Like __fprop_add_percpu() except that event is counted only if the given
    258 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
    259 */
    260void __fprop_add_percpu_max(struct fprop_global *p,
    261		struct fprop_local_percpu *pl, int max_frac, long nr)
    262{
    263	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
    264		unsigned long numerator, denominator;
    265		s64 tmp;
    266
    267		fprop_fraction_percpu(p, pl, &numerator, &denominator);
    268		/* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
    269		tmp = (u64)denominator * max_frac -
    270					((u64)numerator << FPROP_FRAC_SHIFT);
    271		if (tmp < 0) {
    272			/* Maximum fraction already exceeded? */
    273			return;
    274		} else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
    275			/* Add just enough for the fraction to saturate */
    276			nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
    277					FPROP_FRAC_BASE - max_frac);
    278		}
    279	}
    280
    281	__fprop_add_percpu(p, pl, nr);
    282}