cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sfp-util.h (2461B)


      1/* SPDX-License-Identifier: GPL-2.0 */
      2/*
      3 * These are copied from glibc/stdlib/longlong.h
      4 */
      5
      6#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
      7  do {                                                                  \
      8    UWtype __x;                                                         \
      9    __x = (al) + (bl);                                                  \
     10    (sh) = (ah) + (bh) + (__x < (al));                                  \
     11    (sl) = __x;                                                         \
     12  } while (0)
     13
     14#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
     15  do {                                                                  \
     16    UWtype __x;                                                         \
     17    __x = (al) - (bl);                                                  \
     18    (sh) = (ah) - (bh) - (__x > (al));                                  \
     19    (sl) = __x;                                                         \
     20  } while (0)
     21
     22#define umul_ppmm(w1, w0, u, v) \
     23  __asm__ ("dmulu.l %2,%3\n\tsts    macl,%1\n\tsts  mach,%0"	\
     24	: "=r" ((u32)(w1)), "=r" ((u32)(w0))	\
     25	:  "r" ((u32)(u)),   "r" ((u32)(v))	\
     26	: "macl", "mach")
     27
     28#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
     29#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
     30#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
     31
     32#define udiv_qrnnd(q, r, n1, n0, d) \
     33  do {									\
     34    UWtype __d1, __d0, __q1, __q0;					\
     35    UWtype __r1, __r0, __m;						\
     36    __d1 = __ll_highpart (d);						\
     37    __d0 = __ll_lowpart (d);						\
     38									\
     39    __r1 = (n1) % __d1;							\
     40    __q1 = (n1) / __d1;							\
     41    __m = (UWtype) __q1 * __d0;						\
     42    __r1 = __r1 * __ll_B | __ll_highpart (n0);				\
     43    if (__r1 < __m)							\
     44      {									\
     45	__q1--, __r1 += (d);						\
     46	if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
     47	  if (__r1 < __m)						\
     48	    __q1--, __r1 += (d);					\
     49      }									\
     50    __r1 -= __m;							\
     51									\
     52    __r0 = __r1 % __d1;							\
     53    __q0 = __r1 / __d1;							\
     54    __m = (UWtype) __q0 * __d0;						\
     55    __r0 = __r0 * __ll_B | __ll_lowpart (n0);				\
     56    if (__r0 < __m)							\
     57      {									\
     58	__q0--, __r0 += (d);						\
     59	if (__r0 >= (d))						\
     60	  if (__r0 < __m)						\
     61	    __q0--, __r0 += (d);					\
     62      }									\
     63    __r0 -= __m;							\
     64									\
     65    (q) = (UWtype) __q1 * __ll_B | __q0;				\
     66    (r) = __r0;								\
     67  } while (0)
     68
     69#define abort()	return 0
     70
     71#define __BYTE_ORDER __LITTLE_ENDIAN
     72
     73