cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

cnt32_to_63.h (3545B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 *  Extend a 32-bit counter to 63 bits
      4 *
      5 *  Author:	Nicolas Pitre
      6 *  Created:	December 3, 2006
      7 *  Copyright:	MontaVista Software, Inc.
      8 */
      9
     10#ifndef __LINUX_CNT32_TO_63_H__
     11#define __LINUX_CNT32_TO_63_H__
     12
     13#include <linux/compiler.h>
     14#include <linux/types.h>
     15#include <asm/byteorder.h>
     16
     17/* this is used only to give gcc a clue about good code generation */
     18union cnt32_to_63 {
     19	struct {
     20#if defined(__LITTLE_ENDIAN)
     21		u32 lo, hi;
     22#elif defined(__BIG_ENDIAN)
     23		u32 hi, lo;
     24#endif
     25	};
     26	u64 val;
     27};
     28
     29
     30/**
     31 * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter
     32 * @cnt_lo: The low part of the counter
     33 *
     34 * Many hardware clock counters are only 32 bits wide and therefore have
     35 * a relatively short period making wrap-arounds rather frequent.  This
     36 * is a problem when implementing sched_clock() for example, where a 64-bit
     37 * non-wrapping monotonic value is expected to be returned.
     38 *
     39 * To overcome that limitation, let's extend a 32-bit counter to 63 bits
     40 * in a completely lock free fashion. Bits 0 to 31 of the clock are provided
     41 * by the hardware while bits 32 to 62 are stored in memory.  The top bit in
     42 * memory is used to synchronize with the hardware clock half-period.  When
     43 * the top bit of both counters (hardware and in memory) differ then the
     44 * memory is updated with a new value, incrementing it when the hardware
     45 * counter wraps around.
     46 *
     47 * Because a word store in memory is atomic then the incremented value will
     48 * always be in synch with the top bit indicating to any potential concurrent
     49 * reader if the value in memory is up to date or not with regards to the
     50 * needed increment.  And any race in updating the value in memory is harmless
     51 * as the same value would simply be stored more than once.
     52 *
     53 * The restrictions for the algorithm to work properly are:
     54 *
     55 * 1) this code must be called at least once per each half period of the
     56 *    32-bit counter;
     57 *
     58 * 2) this code must not be preempted for a duration longer than the
     59 *    32-bit counter half period minus the longest period between two
     60 *    calls to this code;
     61 *
     62 * Those requirements ensure proper update to the state bit in memory.
     63 * This is usually not a problem in practice, but if it is then a kernel
     64 * timer should be scheduled to manage for this code to be executed often
     65 * enough.
     66 *
     67 * And finally:
     68 *
     69 * 3) the cnt_lo argument must be seen as a globally incrementing value,
     70 *    meaning that it should be a direct reference to the counter data which
     71 *    can be evaluated according to a specific ordering within the macro,
     72 *    and not the result of a previous evaluation stored in a variable.
     73 *
     74 * For example, this is wrong:
     75 *
     76 *	u32 partial = get_hw_count();
     77 *	u64 full = cnt32_to_63(partial);
     78 *	return full;
     79 *
     80 * This is fine:
     81 *
     82 *	u64 full = cnt32_to_63(get_hw_count());
     83 *	return full;
     84 *
     85 * Note that the top bit (bit 63) in the returned value should be considered
     86 * as garbage.  It is not cleared here because callers are likely to use a
     87 * multiplier on the returned value which can get rid of the top bit
     88 * implicitly by making the multiplier even, therefore saving on a runtime
     89 * clear-bit instruction. Otherwise caller must remember to clear the top
     90 * bit explicitly.
     91 */
     92#define cnt32_to_63(cnt_lo) \
     93({ \
     94	static u32 __m_cnt_hi; \
     95	union cnt32_to_63 __x; \
     96	__x.hi = __m_cnt_hi; \
     97 	smp_rmb(); \
     98	__x.lo = (cnt_lo); \
     99	if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
    100		__m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
    101	__x.val; \
    102})
    103
    104#endif