cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

sm3.c (8015B)


      1/* SPDX-License-Identifier: GPL-2.0-only */
      2/*
      3 * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
      4 * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
      5 *
      6 * Copyright (C) 2017 ARM Limited or its affiliates.
      7 * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com>
      8 * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
      9 */
     10
     11#include <linux/module.h>
     12#include <asm/unaligned.h>
     13#include <crypto/sm3.h>
     14
     15static const u32 ____cacheline_aligned K[64] = {
     16	0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
     17	0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
     18	0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
     19	0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
     20	0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
     21	0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
     22	0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
     23	0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
     24	0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
     25	0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
     26	0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
     27	0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
     28	0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
     29	0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
     30	0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
     31	0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
     32};
     33
     34/*
     35 * Transform the message X which consists of 16 32-bit-words. See
     36 * GM/T 004-2012 for details.
     37 */
     38#define R(i, a, b, c, d, e, f, g, h, t, w1, w2)			\
     39	do {							\
     40		ss1 = rol32((rol32((a), 12) + (e) + (t)), 7);	\
     41		ss2 = ss1 ^ rol32((a), 12);			\
     42		d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2));	\
     43		h += GG ## i(e, f, g) + ss1 + (w1);		\
     44		b = rol32((b), 9);				\
     45		f = rol32((f), 19);				\
     46		h = P0((h));					\
     47	} while (0)
     48
     49#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
     50	R(1, a, b, c, d, e, f, g, h, t, w1, w2)
     51#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
     52	R(2, a, b, c, d, e, f, g, h, t, w1, w2)
     53
     54#define FF1(x, y, z)  (x ^ y ^ z)
     55#define FF2(x, y, z)  ((x & y) | (x & z) | (y & z))
     56
     57#define GG1(x, y, z)  FF1(x, y, z)
     58#define GG2(x, y, z)  ((x & y) | (~x & z))
     59
     60/* Message expansion */
     61#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
     62#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
     63#define I(i)  (W[i] = get_unaligned_be32(data + i * 4))
     64#define W1(i) (W[i & 0x0f])
     65#define W2(i) (W[i & 0x0f] =				\
     66		P1(W[i & 0x0f]				\
     67			^ W[(i-9) & 0x0f]		\
     68			^ rol32(W[(i-3) & 0x0f], 15))	\
     69		^ rol32(W[(i-13) & 0x0f], 7)		\
     70		^ W[(i-6) & 0x0f])
     71
     72static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
     73{
     74	u32 a, b, c, d, e, f, g, h, ss1, ss2;
     75
     76	a = sctx->state[0];
     77	b = sctx->state[1];
     78	c = sctx->state[2];
     79	d = sctx->state[3];
     80	e = sctx->state[4];
     81	f = sctx->state[5];
     82	g = sctx->state[6];
     83	h = sctx->state[7];
     84
     85	R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
     86	R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
     87	R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
     88	R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
     89	R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
     90	R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
     91	R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
     92	R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
     93	R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
     94	R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
     95	R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
     96	R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
     97	R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
     98	R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
     99	R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
    100	R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
    101
    102	R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
    103	R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
    104	R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
    105	R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
    106	R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
    107	R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
    108	R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
    109	R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
    110	R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
    111	R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
    112	R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
    113	R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
    114	R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
    115	R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
    116	R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
    117	R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
    118
    119	R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
    120	R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
    121	R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
    122	R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
    123	R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
    124	R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
    125	R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
    126	R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
    127	R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
    128	R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
    129	R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
    130	R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
    131	R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
    132	R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
    133	R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
    134	R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
    135
    136	R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
    137	R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
    138	R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
    139	R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
    140	R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
    141	R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
    142	R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
    143	R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
    144	R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
    145	R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
    146	R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
    147	R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
    148	R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
    149	R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
    150	R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
    151	R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
    152
    153	sctx->state[0] ^= a;
    154	sctx->state[1] ^= b;
    155	sctx->state[2] ^= c;
    156	sctx->state[3] ^= d;
    157	sctx->state[4] ^= e;
    158	sctx->state[5] ^= f;
    159	sctx->state[6] ^= g;
    160	sctx->state[7] ^= h;
    161}
    162#undef R
    163#undef R1
    164#undef R2
    165#undef I
    166#undef W1
    167#undef W2
    168
    169static inline void sm3_block(struct sm3_state *sctx,
    170		u8 const *data, int blocks, u32 W[16])
    171{
    172	while (blocks--) {
    173		sm3_transform(sctx, data, W);
    174		data += SM3_BLOCK_SIZE;
    175	}
    176}
    177
    178void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
    179{
    180	unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
    181	u32 W[16];
    182
    183	sctx->count += len;
    184
    185	if ((partial + len) >= SM3_BLOCK_SIZE) {
    186		int blocks;
    187
    188		if (partial) {
    189			int p = SM3_BLOCK_SIZE - partial;
    190
    191			memcpy(sctx->buffer + partial, data, p);
    192			data += p;
    193			len -= p;
    194
    195			sm3_block(sctx, sctx->buffer, 1, W);
    196		}
    197
    198		blocks = len / SM3_BLOCK_SIZE;
    199		len %= SM3_BLOCK_SIZE;
    200
    201		if (blocks) {
    202			sm3_block(sctx, data, blocks, W);
    203			data += blocks * SM3_BLOCK_SIZE;
    204		}
    205
    206		memzero_explicit(W, sizeof(W));
    207
    208		partial = 0;
    209	}
    210	if (len)
    211		memcpy(sctx->buffer + partial, data, len);
    212}
    213EXPORT_SYMBOL_GPL(sm3_update);
    214
    215void sm3_final(struct sm3_state *sctx, u8 *out)
    216{
    217	const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
    218	__be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
    219	__be32 *digest = (__be32 *)out;
    220	unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
    221	u32 W[16];
    222	int i;
    223
    224	sctx->buffer[partial++] = 0x80;
    225	if (partial > bit_offset) {
    226		memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
    227		partial = 0;
    228
    229		sm3_block(sctx, sctx->buffer, 1, W);
    230	}
    231
    232	memset(sctx->buffer + partial, 0, bit_offset - partial);
    233	*bits = cpu_to_be64(sctx->count << 3);
    234	sm3_block(sctx, sctx->buffer, 1, W);
    235
    236	for (i = 0; i < 8; i++)
    237		put_unaligned_be32(sctx->state[i], digest++);
    238
    239	/* Zeroize sensitive information. */
    240	memzero_explicit(W, sizeof(W));
    241	memzero_explicit(sctx, sizeof(*sctx));
    242}
    243EXPORT_SYMBOL_GPL(sm3_final);
    244
    245MODULE_DESCRIPTION("Generic SM3 library");
    246MODULE_LICENSE("GPL v2");