cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmac.c (19341B)


      1/*
      2 * VMAC: Message Authentication Code using Universal Hashing
      3 *
      4 * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
      5 *
      6 * Copyright (c) 2009, Intel Corporation.
      7 * Copyright (c) 2018, Google Inc.
      8 *
      9 * This program is free software; you can redistribute it and/or modify it
     10 * under the terms and conditions of the GNU General Public License,
     11 * version 2, as published by the Free Software Foundation.
     12 *
     13 * This program is distributed in the hope it will be useful, but WITHOUT
     14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
     15 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
     16 * more details.
     17 *
     18 * You should have received a copy of the GNU General Public License along with
     19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
     20 * Place - Suite 330, Boston, MA 02111-1307 USA.
     21 */
     22
     23/*
     24 * Derived from:
     25 *	VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
     26 *	This implementation is herby placed in the public domain.
     27 *	The authors offers no warranty. Use at your own risk.
     28 *	Last modified: 17 APR 08, 1700 PDT
     29 */
     30
     31#include <asm/unaligned.h>
     32#include <linux/init.h>
     33#include <linux/types.h>
     34#include <linux/crypto.h>
     35#include <linux/module.h>
     36#include <linux/scatterlist.h>
     37#include <asm/byteorder.h>
     38#include <crypto/scatterwalk.h>
     39#include <crypto/internal/cipher.h>
     40#include <crypto/internal/hash.h>
     41
     42/*
     43 * User definable settings.
     44 */
     45#define VMAC_TAG_LEN	64
     46#define VMAC_KEY_SIZE	128/* Must be 128, 192 or 256			*/
     47#define VMAC_KEY_LEN	(VMAC_KEY_SIZE/8)
     48#define VMAC_NHBYTES	128/* Must 2^i for any 3 < i < 13 Standard = 128*/
     49#define VMAC_NONCEBYTES	16
     50
     51/* per-transform (per-key) context */
     52struct vmac_tfm_ctx {
     53	struct crypto_cipher *cipher;
     54	u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
     55	u64 polykey[2*VMAC_TAG_LEN/64];
     56	u64 l3key[2*VMAC_TAG_LEN/64];
     57};
     58
     59/* per-request context */
     60struct vmac_desc_ctx {
     61	union {
     62		u8 partial[VMAC_NHBYTES];	/* partial block */
     63		__le64 partial_words[VMAC_NHBYTES / 8];
     64	};
     65	unsigned int partial_size;	/* size of the partial block */
     66	bool first_block_processed;
     67	u64 polytmp[2*VMAC_TAG_LEN/64];	/* running total of L2-hash */
     68	union {
     69		u8 bytes[VMAC_NONCEBYTES];
     70		__be64 pads[VMAC_NONCEBYTES / 8];
     71	} nonce;
     72	unsigned int nonce_size; /* nonce bytes filled so far */
     73};
     74
     75/*
     76 * Constants and masks
     77 */
     78#define UINT64_C(x) x##ULL
     79static const u64 p64   = UINT64_C(0xfffffffffffffeff);	/* 2^64 - 257 prime  */
     80static const u64 m62   = UINT64_C(0x3fffffffffffffff);	/* 62-bit mask       */
     81static const u64 m63   = UINT64_C(0x7fffffffffffffff);	/* 63-bit mask       */
     82static const u64 m64   = UINT64_C(0xffffffffffffffff);	/* 64-bit mask       */
     83static const u64 mpoly = UINT64_C(0x1fffffff1fffffff);	/* Poly key mask     */
     84
     85#define pe64_to_cpup le64_to_cpup		/* Prefer little endian */
     86
     87#ifdef __LITTLE_ENDIAN
     88#define INDEX_HIGH 1
     89#define INDEX_LOW 0
     90#else
     91#define INDEX_HIGH 0
     92#define INDEX_LOW 1
     93#endif
     94
     95/*
     96 * The following routines are used in this implementation. They are
     97 * written via macros to simulate zero-overhead call-by-reference.
     98 *
     99 * MUL64: 64x64->128-bit multiplication
    100 * PMUL64: assumes top bits cleared on inputs
    101 * ADD128: 128x128->128-bit addition
    102 */
    103
    104#define ADD128(rh, rl, ih, il)						\
    105	do {								\
    106		u64 _il = (il);						\
    107		(rl) += (_il);						\
    108		if ((rl) < (_il))					\
    109			(rh)++;						\
    110		(rh) += (ih);						\
    111	} while (0)
    112
    113#define MUL32(i1, i2)	((u64)(u32)(i1)*(u32)(i2))
    114
    115#define PMUL64(rh, rl, i1, i2)	/* Assumes m doesn't overflow */	\
    116	do {								\
    117		u64 _i1 = (i1), _i2 = (i2);				\
    118		u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2);	\
    119		rh = MUL32(_i1>>32, _i2>>32);				\
    120		rl = MUL32(_i1, _i2);					\
    121		ADD128(rh, rl, (m >> 32), (m << 32));			\
    122	} while (0)
    123
    124#define MUL64(rh, rl, i1, i2)						\
    125	do {								\
    126		u64 _i1 = (i1), _i2 = (i2);				\
    127		u64 m1 = MUL32(_i1, _i2>>32);				\
    128		u64 m2 = MUL32(_i1>>32, _i2);				\
    129		rh = MUL32(_i1>>32, _i2>>32);				\
    130		rl = MUL32(_i1, _i2);					\
    131		ADD128(rh, rl, (m1 >> 32), (m1 << 32));			\
    132		ADD128(rh, rl, (m2 >> 32), (m2 << 32));			\
    133	} while (0)
    134
    135/*
    136 * For highest performance the L1 NH and L2 polynomial hashes should be
    137 * carefully implemented to take advantage of one's target architecture.
    138 * Here these two hash functions are defined multiple time; once for
    139 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
    140 * for the rest (32-bit) architectures.
    141 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
    142 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
    143 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
    144 * NH computations at once).
    145 */
    146
    147#ifdef CONFIG_64BIT
    148
    149#define nh_16(mp, kp, nw, rh, rl)					\
    150	do {								\
    151		int i; u64 th, tl;					\
    152		rh = rl = 0;						\
    153		for (i = 0; i < nw; i += 2) {				\
    154			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
    155				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
    156			ADD128(rh, rl, th, tl);				\
    157		}							\
    158	} while (0)
    159
    160#define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1)				\
    161	do {								\
    162		int i; u64 th, tl;					\
    163		rh1 = rl1 = rh = rl = 0;				\
    164		for (i = 0; i < nw; i += 2) {				\
    165			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
    166				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
    167			ADD128(rh, rl, th, tl);				\
    168			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],	\
    169				pe64_to_cpup((mp)+i+1)+(kp)[i+3]);	\
    170			ADD128(rh1, rl1, th, tl);			\
    171		}							\
    172	} while (0)
    173
    174#if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
    175#define nh_vmac_nhbytes(mp, kp, nw, rh, rl)				\
    176	do {								\
    177		int i; u64 th, tl;					\
    178		rh = rl = 0;						\
    179		for (i = 0; i < nw; i += 8) {				\
    180			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
    181				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
    182			ADD128(rh, rl, th, tl);				\
    183			MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2],	\
    184				pe64_to_cpup((mp)+i+3)+(kp)[i+3]);	\
    185			ADD128(rh, rl, th, tl);				\
    186			MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4],	\
    187				pe64_to_cpup((mp)+i+5)+(kp)[i+5]);	\
    188			ADD128(rh, rl, th, tl);				\
    189			MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6],	\
    190				pe64_to_cpup((mp)+i+7)+(kp)[i+7]);	\
    191			ADD128(rh, rl, th, tl);				\
    192		}							\
    193	} while (0)
    194
    195#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1)			\
    196	do {								\
    197		int i; u64 th, tl;					\
    198		rh1 = rl1 = rh = rl = 0;				\
    199		for (i = 0; i < nw; i += 8) {				\
    200			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i],	\
    201				pe64_to_cpup((mp)+i+1)+(kp)[i+1]);	\
    202			ADD128(rh, rl, th, tl);				\
    203			MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2],	\
    204				pe64_to_cpup((mp)+i+1)+(kp)[i+3]);	\
    205			ADD128(rh1, rl1, th, tl);			\
    206			MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2],	\
    207				pe64_to_cpup((mp)+i+3)+(kp)[i+3]);	\
    208			ADD128(rh, rl, th, tl);				\
    209			MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4],	\
    210				pe64_to_cpup((mp)+i+3)+(kp)[i+5]);	\
    211			ADD128(rh1, rl1, th, tl);			\
    212			MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4],	\
    213				pe64_to_cpup((mp)+i+5)+(kp)[i+5]);	\
    214			ADD128(rh, rl, th, tl);				\
    215			MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6],	\
    216				pe64_to_cpup((mp)+i+5)+(kp)[i+7]);	\
    217			ADD128(rh1, rl1, th, tl);			\
    218			MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6],	\
    219				pe64_to_cpup((mp)+i+7)+(kp)[i+7]);	\
    220			ADD128(rh, rl, th, tl);				\
    221			MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8],	\
    222				pe64_to_cpup((mp)+i+7)+(kp)[i+9]);	\
    223			ADD128(rh1, rl1, th, tl);			\
    224		}							\
    225	} while (0)
    226#endif
    227
    228#define poly_step(ah, al, kh, kl, mh, ml)				\
    229	do {								\
    230		u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0;		\
    231		/* compute ab*cd, put bd into result registers */	\
    232		PMUL64(t3h, t3l, al, kh);				\
    233		PMUL64(t2h, t2l, ah, kl);				\
    234		PMUL64(t1h, t1l, ah, 2*kh);				\
    235		PMUL64(ah, al, al, kl);					\
    236		/* add 2 * ac to result */				\
    237		ADD128(ah, al, t1h, t1l);				\
    238		/* add together ad + bc */				\
    239		ADD128(t2h, t2l, t3h, t3l);				\
    240		/* now (ah,al), (t2l,2*t2h) need summing */		\
    241		/* first add the high registers, carrying into t2h */	\
    242		ADD128(t2h, ah, z, t2l);				\
    243		/* double t2h and add top bit of ah */			\
    244		t2h = 2 * t2h + (ah >> 63);				\
    245		ah &= m63;						\
    246		/* now add the low registers */				\
    247		ADD128(ah, al, mh, ml);					\
    248		ADD128(ah, al, z, t2h);					\
    249	} while (0)
    250
    251#else /* ! CONFIG_64BIT */
    252
    253#ifndef nh_16
    254#define nh_16(mp, kp, nw, rh, rl)					\
    255	do {								\
    256		u64 t1, t2, m1, m2, t;					\
    257		int i;							\
    258		rh = rl = t = 0;					\
    259		for (i = 0; i < nw; i += 2)  {				\
    260			t1 = pe64_to_cpup(mp+i) + kp[i];		\
    261			t2 = pe64_to_cpup(mp+i+1) + kp[i+1];		\
    262			m2 = MUL32(t1 >> 32, t2);			\
    263			m1 = MUL32(t1, t2 >> 32);			\
    264			ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32),	\
    265				MUL32(t1, t2));				\
    266			rh += (u64)(u32)(m1 >> 32)			\
    267				+ (u32)(m2 >> 32);			\
    268			t += (u64)(u32)m1 + (u32)m2;			\
    269		}							\
    270		ADD128(rh, rl, (t >> 32), (t << 32));			\
    271	} while (0)
    272#endif
    273
    274static void poly_step_func(u64 *ahi, u64 *alo,
    275			const u64 *kh, const u64 *kl,
    276			const u64 *mh, const u64 *ml)
    277{
    278#define a0 (*(((u32 *)alo)+INDEX_LOW))
    279#define a1 (*(((u32 *)alo)+INDEX_HIGH))
    280#define a2 (*(((u32 *)ahi)+INDEX_LOW))
    281#define a3 (*(((u32 *)ahi)+INDEX_HIGH))
    282#define k0 (*(((u32 *)kl)+INDEX_LOW))
    283#define k1 (*(((u32 *)kl)+INDEX_HIGH))
    284#define k2 (*(((u32 *)kh)+INDEX_LOW))
    285#define k3 (*(((u32 *)kh)+INDEX_HIGH))
    286
    287	u64 p, q, t;
    288	u32 t2;
    289
    290	p = MUL32(a3, k3);
    291	p += p;
    292	p += *(u64 *)mh;
    293	p += MUL32(a0, k2);
    294	p += MUL32(a1, k1);
    295	p += MUL32(a2, k0);
    296	t = (u32)(p);
    297	p >>= 32;
    298	p += MUL32(a0, k3);
    299	p += MUL32(a1, k2);
    300	p += MUL32(a2, k1);
    301	p += MUL32(a3, k0);
    302	t |= ((u64)((u32)p & 0x7fffffff)) << 32;
    303	p >>= 31;
    304	p += (u64)(((u32 *)ml)[INDEX_LOW]);
    305	p += MUL32(a0, k0);
    306	q =  MUL32(a1, k3);
    307	q += MUL32(a2, k2);
    308	q += MUL32(a3, k1);
    309	q += q;
    310	p += q;
    311	t2 = (u32)(p);
    312	p >>= 32;
    313	p += (u64)(((u32 *)ml)[INDEX_HIGH]);
    314	p += MUL32(a0, k1);
    315	p += MUL32(a1, k0);
    316	q =  MUL32(a2, k3);
    317	q += MUL32(a3, k2);
    318	q += q;
    319	p += q;
    320	*(u64 *)(alo) = (p << 32) | t2;
    321	p >>= 32;
    322	*(u64 *)(ahi) = p + t;
    323
    324#undef a0
    325#undef a1
    326#undef a2
    327#undef a3
    328#undef k0
    329#undef k1
    330#undef k2
    331#undef k3
    332}
    333
    334#define poly_step(ah, al, kh, kl, mh, ml)				\
    335	poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
    336
    337#endif  /* end of specialized NH and poly definitions */
    338
    339/* At least nh_16 is defined. Defined others as needed here */
    340#ifndef nh_16_2
    341#define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2)				\
    342	do { 								\
    343		nh_16(mp, kp, nw, rh, rl);				\
    344		nh_16(mp, ((kp)+2), nw, rh2, rl2);			\
    345	} while (0)
    346#endif
    347#ifndef nh_vmac_nhbytes
    348#define nh_vmac_nhbytes(mp, kp, nw, rh, rl)				\
    349	nh_16(mp, kp, nw, rh, rl)
    350#endif
    351#ifndef nh_vmac_nhbytes_2
    352#define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2)			\
    353	do {								\
    354		nh_vmac_nhbytes(mp, kp, nw, rh, rl);			\
    355		nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2);		\
    356	} while (0)
    357#endif
    358
    359static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
    360{
    361	u64 rh, rl, t, z = 0;
    362
    363	/* fully reduce (p1,p2)+(len,0) mod p127 */
    364	t = p1 >> 63;
    365	p1 &= m63;
    366	ADD128(p1, p2, len, t);
    367	/* At this point, (p1,p2) is at most 2^127+(len<<64) */
    368	t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
    369	ADD128(p1, p2, z, t);
    370	p1 &= m63;
    371
    372	/* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
    373	t = p1 + (p2 >> 32);
    374	t += (t >> 32);
    375	t += (u32)t > 0xfffffffeu;
    376	p1 += (t >> 32);
    377	p2 += (p1 << 32);
    378
    379	/* compute (p1+k1)%p64 and (p2+k2)%p64 */
    380	p1 += k1;
    381	p1 += (0 - (p1 < k1)) & 257;
    382	p2 += k2;
    383	p2 += (0 - (p2 < k2)) & 257;
    384
    385	/* compute (p1+k1)*(p2+k2)%p64 */
    386	MUL64(rh, rl, p1, p2);
    387	t = rh >> 56;
    388	ADD128(t, rl, z, rh);
    389	rh <<= 8;
    390	ADD128(t, rl, z, rh);
    391	t += t << 8;
    392	rl += t;
    393	rl += (0 - (rl < t)) & 257;
    394	rl += (0 - (rl > p64-1)) & 257;
    395	return rl;
    396}
    397
    398/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
    399static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
    400			 struct vmac_desc_ctx *dctx,
    401			 const __le64 *mptr, unsigned int blocks)
    402{
    403	const u64 *kptr = tctx->nhkey;
    404	const u64 pkh = tctx->polykey[0];
    405	const u64 pkl = tctx->polykey[1];
    406	u64 ch = dctx->polytmp[0];
    407	u64 cl = dctx->polytmp[1];
    408	u64 rh, rl;
    409
    410	if (!dctx->first_block_processed) {
    411		dctx->first_block_processed = true;
    412		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
    413		rh &= m62;
    414		ADD128(ch, cl, rh, rl);
    415		mptr += (VMAC_NHBYTES/sizeof(u64));
    416		blocks--;
    417	}
    418
    419	while (blocks--) {
    420		nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
    421		rh &= m62;
    422		poly_step(ch, cl, pkh, pkl, rh, rl);
    423		mptr += (VMAC_NHBYTES/sizeof(u64));
    424	}
    425
    426	dctx->polytmp[0] = ch;
    427	dctx->polytmp[1] = cl;
    428}
    429
    430static int vmac_setkey(struct crypto_shash *tfm,
    431		       const u8 *key, unsigned int keylen)
    432{
    433	struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
    434	__be64 out[2];
    435	u8 in[16] = { 0 };
    436	unsigned int i;
    437	int err;
    438
    439	if (keylen != VMAC_KEY_LEN)
    440		return -EINVAL;
    441
    442	err = crypto_cipher_setkey(tctx->cipher, key, keylen);
    443	if (err)
    444		return err;
    445
    446	/* Fill nh key */
    447	in[0] = 0x80;
    448	for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
    449		crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
    450		tctx->nhkey[i] = be64_to_cpu(out[0]);
    451		tctx->nhkey[i+1] = be64_to_cpu(out[1]);
    452		in[15]++;
    453	}
    454
    455	/* Fill poly key */
    456	in[0] = 0xC0;
    457	in[15] = 0;
    458	for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
    459		crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
    460		tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
    461		tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
    462		in[15]++;
    463	}
    464
    465	/* Fill ip key */
    466	in[0] = 0xE0;
    467	in[15] = 0;
    468	for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
    469		do {
    470			crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
    471			tctx->l3key[i] = be64_to_cpu(out[0]);
    472			tctx->l3key[i+1] = be64_to_cpu(out[1]);
    473			in[15]++;
    474		} while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
    475	}
    476
    477	return 0;
    478}
    479
    480static int vmac_init(struct shash_desc *desc)
    481{
    482	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
    483	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
    484
    485	dctx->partial_size = 0;
    486	dctx->first_block_processed = false;
    487	memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
    488	dctx->nonce_size = 0;
    489	return 0;
    490}
    491
    492static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
    493{
    494	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
    495	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
    496	unsigned int n;
    497
    498	/* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
    499	if (dctx->nonce_size < VMAC_NONCEBYTES) {
    500		n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
    501		memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
    502		dctx->nonce_size += n;
    503		p += n;
    504		len -= n;
    505	}
    506
    507	if (dctx->partial_size) {
    508		n = min(len, VMAC_NHBYTES - dctx->partial_size);
    509		memcpy(&dctx->partial[dctx->partial_size], p, n);
    510		dctx->partial_size += n;
    511		p += n;
    512		len -= n;
    513		if (dctx->partial_size == VMAC_NHBYTES) {
    514			vhash_blocks(tctx, dctx, dctx->partial_words, 1);
    515			dctx->partial_size = 0;
    516		}
    517	}
    518
    519	if (len >= VMAC_NHBYTES) {
    520		n = round_down(len, VMAC_NHBYTES);
    521		/* TODO: 'p' may be misaligned here */
    522		vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
    523		p += n;
    524		len -= n;
    525	}
    526
    527	if (len) {
    528		memcpy(dctx->partial, p, len);
    529		dctx->partial_size = len;
    530	}
    531
    532	return 0;
    533}
    534
    535static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
    536		       struct vmac_desc_ctx *dctx)
    537{
    538	unsigned int partial = dctx->partial_size;
    539	u64 ch = dctx->polytmp[0];
    540	u64 cl = dctx->polytmp[1];
    541
    542	/* L1 and L2-hash the final block if needed */
    543	if (partial) {
    544		/* Zero-pad to next 128-bit boundary */
    545		unsigned int n = round_up(partial, 16);
    546		u64 rh, rl;
    547
    548		memset(&dctx->partial[partial], 0, n - partial);
    549		nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
    550		rh &= m62;
    551		if (dctx->first_block_processed)
    552			poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
    553				  rh, rl);
    554		else
    555			ADD128(ch, cl, rh, rl);
    556	}
    557
    558	/* L3-hash the 128-bit output of L2-hash */
    559	return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
    560}
    561
    562static int vmac_final(struct shash_desc *desc, u8 *out)
    563{
    564	const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
    565	struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
    566	int index;
    567	u64 hash, pad;
    568
    569	if (dctx->nonce_size != VMAC_NONCEBYTES)
    570		return -EINVAL;
    571
    572	/*
    573	 * The VMAC specification requires a nonce at least 1 bit shorter than
    574	 * the block cipher's block length, so we actually only accept a 127-bit
    575	 * nonce.  We define the unused bit to be the first one and require that
    576	 * it be 0, so the needed prepending of a 0 bit is implicit.
    577	 */
    578	if (dctx->nonce.bytes[0] & 0x80)
    579		return -EINVAL;
    580
    581	/* Finish calculating the VHASH of the message */
    582	hash = vhash_final(tctx, dctx);
    583
    584	/* Generate pseudorandom pad by encrypting the nonce */
    585	BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
    586	index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
    587	dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
    588	crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
    589				  dctx->nonce.bytes);
    590	pad = be64_to_cpu(dctx->nonce.pads[index]);
    591
    592	/* The VMAC is the sum of VHASH and the pseudorandom pad */
    593	put_unaligned_be64(hash + pad, out);
    594	return 0;
    595}
    596
    597static int vmac_init_tfm(struct crypto_tfm *tfm)
    598{
    599	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
    600	struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
    601	struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
    602	struct crypto_cipher *cipher;
    603
    604	cipher = crypto_spawn_cipher(spawn);
    605	if (IS_ERR(cipher))
    606		return PTR_ERR(cipher);
    607
    608	tctx->cipher = cipher;
    609	return 0;
    610}
    611
    612static void vmac_exit_tfm(struct crypto_tfm *tfm)
    613{
    614	struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
    615
    616	crypto_free_cipher(tctx->cipher);
    617}
    618
    619static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
    620{
    621	struct shash_instance *inst;
    622	struct crypto_cipher_spawn *spawn;
    623	struct crypto_alg *alg;
    624	u32 mask;
    625	int err;
    626
    627	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
    628	if (err)
    629		return err;
    630
    631	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
    632	if (!inst)
    633		return -ENOMEM;
    634	spawn = shash_instance_ctx(inst);
    635
    636	err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
    637				 crypto_attr_alg_name(tb[1]), 0, mask);
    638	if (err)
    639		goto err_free_inst;
    640	alg = crypto_spawn_cipher_alg(spawn);
    641
    642	err = -EINVAL;
    643	if (alg->cra_blocksize != VMAC_NONCEBYTES)
    644		goto err_free_inst;
    645
    646	err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
    647	if (err)
    648		goto err_free_inst;
    649
    650	inst->alg.base.cra_priority = alg->cra_priority;
    651	inst->alg.base.cra_blocksize = alg->cra_blocksize;
    652	inst->alg.base.cra_alignmask = alg->cra_alignmask;
    653
    654	inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
    655	inst->alg.base.cra_init = vmac_init_tfm;
    656	inst->alg.base.cra_exit = vmac_exit_tfm;
    657
    658	inst->alg.descsize = sizeof(struct vmac_desc_ctx);
    659	inst->alg.digestsize = VMAC_TAG_LEN / 8;
    660	inst->alg.init = vmac_init;
    661	inst->alg.update = vmac_update;
    662	inst->alg.final = vmac_final;
    663	inst->alg.setkey = vmac_setkey;
    664
    665	inst->free = shash_free_singlespawn_instance;
    666
    667	err = shash_register_instance(tmpl, inst);
    668	if (err) {
    669err_free_inst:
    670		shash_free_singlespawn_instance(inst);
    671	}
    672	return err;
    673}
    674
    675static struct crypto_template vmac64_tmpl = {
    676	.name = "vmac64",
    677	.create = vmac_create,
    678	.module = THIS_MODULE,
    679};
    680
    681static int __init vmac_module_init(void)
    682{
    683	return crypto_register_template(&vmac64_tmpl);
    684}
    685
    686static void __exit vmac_module_exit(void)
    687{
    688	crypto_unregister_template(&vmac64_tmpl);
    689}
    690
    691subsys_initcall(vmac_module_init);
    692module_exit(vmac_module_exit);
    693
    694MODULE_LICENSE("GPL");
    695MODULE_DESCRIPTION("VMAC hash algorithm");
    696MODULE_ALIAS_CRYPTO("vmac64");
    697MODULE_IMPORT_NS(CRYPTO_INTERNAL);