cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

u64_stats_sync.h (6728B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2#ifndef _LINUX_U64_STATS_SYNC_H
      3#define _LINUX_U64_STATS_SYNC_H
      4
      5/*
      6 * Protect against 64-bit values tearing on 32-bit architectures. This is
      7 * typically used for statistics read/update in different subsystems.
      8 *
      9 * Key points :
     10 *
     11 * -  Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
     12 * -  The whole thing is a no-op on 64-bit architectures.
     13 *
     14 * Usage constraints:
     15 *
     16 * 1) Write side must ensure mutual exclusion, or one seqcount update could
     17 *    be lost, thus blocking readers forever.
     18 *
     19 * 2) Write side must disable preemption, or a seqcount reader can preempt the
     20 *    writer and also spin forever.
     21 *
     22 * 3) Write side must use the _irqsave() variant if other writers, or a reader,
     23 *    can be invoked from an IRQ context.
     24 *
     25 * 4) If reader fetches several counters, there is no guarantee the whole values
     26 *    are consistent w.r.t. each other (remember point #2: seqcounts are not
     27 *    used for 64bit architectures).
     28 *
     29 * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
     30 *    pure reads.
     31 *
     32 * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
     33 *    might be updated from a hardirq or softirq context (remember point #1:
     34 *    seqcounts are not used for UP kernels). 32-bit UP stat readers could read
     35 *    corrupted 64-bit values otherwise.
     36 *
     37 * Usage :
     38 *
     39 * Stats producer (writer) should use following template granted it already got
     40 * an exclusive access to counters (a lock is already taken, or per cpu
     41 * data is used [in a non preemptable context])
     42 *
     43 *   spin_lock_bh(...) or other synchronization to get exclusive access
     44 *   ...
     45 *   u64_stats_update_begin(&stats->syncp);
     46 *   u64_stats_add(&stats->bytes64, len); // non atomic operation
     47 *   u64_stats_inc(&stats->packets64);    // non atomic operation
     48 *   u64_stats_update_end(&stats->syncp);
     49 *
     50 * While a consumer (reader) should use following template to get consistent
     51 * snapshot for each variable (but no guarantee on several ones)
     52 *
     53 * u64 tbytes, tpackets;
     54 * unsigned int start;
     55 *
     56 * do {
     57 *         start = u64_stats_fetch_begin(&stats->syncp);
     58 *         tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
     59 *         tpackets = u64_stats_read(&stats->packets64); // non atomic operation
     60 * } while (u64_stats_fetch_retry(&stats->syncp, start));
     61 *
     62 *
     63 * Example of use in drivers/net/loopback.c, using per_cpu containers,
     64 * in BH disabled context.
     65 */
     66#include <linux/seqlock.h>
     67
     68struct u64_stats_sync {
     69#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
     70	seqcount_t	seq;
     71#endif
     72};
     73
     74#if BITS_PER_LONG == 64
     75#include <asm/local64.h>
     76
     77typedef struct {
     78	local64_t	v;
     79} u64_stats_t ;
     80
     81static inline u64 u64_stats_read(const u64_stats_t *p)
     82{
     83	return local64_read(&p->v);
     84}
     85
     86static inline void u64_stats_set(u64_stats_t *p, u64 val)
     87{
     88	local64_set(&p->v, val);
     89}
     90
     91static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
     92{
     93	local64_add(val, &p->v);
     94}
     95
     96static inline void u64_stats_inc(u64_stats_t *p)
     97{
     98	local64_inc(&p->v);
     99}
    100
    101#else
    102
    103typedef struct {
    104	u64		v;
    105} u64_stats_t;
    106
    107static inline u64 u64_stats_read(const u64_stats_t *p)
    108{
    109	return p->v;
    110}
    111
    112static inline void u64_stats_set(u64_stats_t *p, u64 val)
    113{
    114	p->v = val;
    115}
    116
    117static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
    118{
    119	p->v += val;
    120}
    121
    122static inline void u64_stats_inc(u64_stats_t *p)
    123{
    124	p->v++;
    125}
    126#endif
    127
    128#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
    129#define u64_stats_init(syncp)	seqcount_init(&(syncp)->seq)
    130#else
    131static inline void u64_stats_init(struct u64_stats_sync *syncp)
    132{
    133}
    134#endif
    135
    136static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
    137{
    138#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
    139	if (IS_ENABLED(CONFIG_PREEMPT_RT))
    140		preempt_disable();
    141	write_seqcount_begin(&syncp->seq);
    142#endif
    143}
    144
    145static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
    146{
    147#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
    148	write_seqcount_end(&syncp->seq);
    149	if (IS_ENABLED(CONFIG_PREEMPT_RT))
    150		preempt_enable();
    151#endif
    152}
    153
    154static inline unsigned long
    155u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
    156{
    157	unsigned long flags = 0;
    158
    159#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
    160	if (IS_ENABLED(CONFIG_PREEMPT_RT))
    161		preempt_disable();
    162	else
    163		local_irq_save(flags);
    164	write_seqcount_begin(&syncp->seq);
    165#endif
    166	return flags;
    167}
    168
    169static inline void
    170u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
    171				unsigned long flags)
    172{
    173#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
    174	write_seqcount_end(&syncp->seq);
    175	if (IS_ENABLED(CONFIG_PREEMPT_RT))
    176		preempt_enable();
    177	else
    178		local_irq_restore(flags);
    179#endif
    180}
    181
    182static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
    183{
    184#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
    185	return read_seqcount_begin(&syncp->seq);
    186#else
    187	return 0;
    188#endif
    189}
    190
    191static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
    192{
    193#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
    194	preempt_disable();
    195#endif
    196	return __u64_stats_fetch_begin(syncp);
    197}
    198
    199static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
    200					 unsigned int start)
    201{
    202#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
    203	return read_seqcount_retry(&syncp->seq, start);
    204#else
    205	return false;
    206#endif
    207}
    208
    209static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
    210					 unsigned int start)
    211{
    212#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
    213	preempt_enable();
    214#endif
    215	return __u64_stats_fetch_retry(syncp, start);
    216}
    217
    218/*
    219 * In case irq handlers can update u64 counters, readers can use following helpers
    220 * - SMP 32bit arches use seqcount protection, irq safe.
    221 * - UP 32bit must disable irqs.
    222 * - 64bit have no problem atomically reading u64 values, irq safe.
    223 */
    224static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
    225{
    226#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
    227	preempt_disable();
    228#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
    229	local_irq_disable();
    230#endif
    231	return __u64_stats_fetch_begin(syncp);
    232}
    233
    234static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
    235					     unsigned int start)
    236{
    237#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
    238	preempt_enable();
    239#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
    240	local_irq_enable();
    241#endif
    242	return __u64_stats_fetch_retry(syncp, start);
    243}
    244
    245#endif /* _LINUX_U64_STATS_SYNC_H */