cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ima_crypto.c (20896B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
      4 *
      5 * Authors:
      6 * Mimi Zohar <zohar@us.ibm.com>
      7 * Kylene Hall <kjhall@us.ibm.com>
      8 *
      9 * File: ima_crypto.c
     10 *	Calculates md5/sha1 file hash, template hash, boot-aggreate hash
     11 */
     12
     13#include <linux/kernel.h>
     14#include <linux/moduleparam.h>
     15#include <linux/ratelimit.h>
     16#include <linux/file.h>
     17#include <linux/crypto.h>
     18#include <linux/scatterlist.h>
     19#include <linux/err.h>
     20#include <linux/slab.h>
     21#include <crypto/hash.h>
     22
     23#include "ima.h"
     24
     25/* minimum file size for ahash use */
     26static unsigned long ima_ahash_minsize;
     27module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
     28MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
     29
     30/* default is 0 - 1 page. */
     31static int ima_maxorder;
     32static unsigned int ima_bufsize = PAGE_SIZE;
     33
     34static int param_set_bufsize(const char *val, const struct kernel_param *kp)
     35{
     36	unsigned long long size;
     37	int order;
     38
     39	size = memparse(val, NULL);
     40	order = get_order(size);
     41	if (order >= MAX_ORDER)
     42		return -EINVAL;
     43	ima_maxorder = order;
     44	ima_bufsize = PAGE_SIZE << order;
     45	return 0;
     46}
     47
     48static const struct kernel_param_ops param_ops_bufsize = {
     49	.set = param_set_bufsize,
     50	.get = param_get_uint,
     51};
     52#define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
     53
     54module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
     55MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
     56
     57static struct crypto_shash *ima_shash_tfm;
     58static struct crypto_ahash *ima_ahash_tfm;
     59
     60struct ima_algo_desc {
     61	struct crypto_shash *tfm;
     62	enum hash_algo algo;
     63};
     64
     65int ima_sha1_idx __ro_after_init;
     66int ima_hash_algo_idx __ro_after_init;
     67/*
     68 * Additional number of slots reserved, as needed, for SHA1
     69 * and IMA default algo.
     70 */
     71int ima_extra_slots __ro_after_init;
     72
     73static struct ima_algo_desc *ima_algo_array;
     74
     75static int __init ima_init_ima_crypto(void)
     76{
     77	long rc;
     78
     79	ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
     80	if (IS_ERR(ima_shash_tfm)) {
     81		rc = PTR_ERR(ima_shash_tfm);
     82		pr_err("Can not allocate %s (reason: %ld)\n",
     83		       hash_algo_name[ima_hash_algo], rc);
     84		return rc;
     85	}
     86	pr_info("Allocated hash algorithm: %s\n",
     87		hash_algo_name[ima_hash_algo]);
     88	return 0;
     89}
     90
     91static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
     92{
     93	struct crypto_shash *tfm = ima_shash_tfm;
     94	int rc, i;
     95
     96	if (algo < 0 || algo >= HASH_ALGO__LAST)
     97		algo = ima_hash_algo;
     98
     99	if (algo == ima_hash_algo)
    100		return tfm;
    101
    102	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
    103		if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
    104			return ima_algo_array[i].tfm;
    105
    106	tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
    107	if (IS_ERR(tfm)) {
    108		rc = PTR_ERR(tfm);
    109		pr_err("Can not allocate %s (reason: %d)\n",
    110		       hash_algo_name[algo], rc);
    111	}
    112	return tfm;
    113}
    114
    115int __init ima_init_crypto(void)
    116{
    117	enum hash_algo algo;
    118	long rc;
    119	int i;
    120
    121	rc = ima_init_ima_crypto();
    122	if (rc)
    123		return rc;
    124
    125	ima_sha1_idx = -1;
    126	ima_hash_algo_idx = -1;
    127
    128	for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
    129		algo = ima_tpm_chip->allocated_banks[i].crypto_id;
    130		if (algo == HASH_ALGO_SHA1)
    131			ima_sha1_idx = i;
    132
    133		if (algo == ima_hash_algo)
    134			ima_hash_algo_idx = i;
    135	}
    136
    137	if (ima_sha1_idx < 0) {
    138		ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
    139		if (ima_hash_algo == HASH_ALGO_SHA1)
    140			ima_hash_algo_idx = ima_sha1_idx;
    141	}
    142
    143	if (ima_hash_algo_idx < 0)
    144		ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
    145
    146	ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
    147				 sizeof(*ima_algo_array), GFP_KERNEL);
    148	if (!ima_algo_array) {
    149		rc = -ENOMEM;
    150		goto out;
    151	}
    152
    153	for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
    154		algo = ima_tpm_chip->allocated_banks[i].crypto_id;
    155		ima_algo_array[i].algo = algo;
    156
    157		/* unknown TPM algorithm */
    158		if (algo == HASH_ALGO__LAST)
    159			continue;
    160
    161		if (algo == ima_hash_algo) {
    162			ima_algo_array[i].tfm = ima_shash_tfm;
    163			continue;
    164		}
    165
    166		ima_algo_array[i].tfm = ima_alloc_tfm(algo);
    167		if (IS_ERR(ima_algo_array[i].tfm)) {
    168			if (algo == HASH_ALGO_SHA1) {
    169				rc = PTR_ERR(ima_algo_array[i].tfm);
    170				ima_algo_array[i].tfm = NULL;
    171				goto out_array;
    172			}
    173
    174			ima_algo_array[i].tfm = NULL;
    175		}
    176	}
    177
    178	if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
    179		if (ima_hash_algo == HASH_ALGO_SHA1) {
    180			ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
    181		} else {
    182			ima_algo_array[ima_sha1_idx].tfm =
    183						ima_alloc_tfm(HASH_ALGO_SHA1);
    184			if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
    185				rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
    186				goto out_array;
    187			}
    188		}
    189
    190		ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
    191	}
    192
    193	if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
    194	    ima_hash_algo_idx != ima_sha1_idx) {
    195		ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
    196		ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
    197	}
    198
    199	return 0;
    200out_array:
    201	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
    202		if (!ima_algo_array[i].tfm ||
    203		    ima_algo_array[i].tfm == ima_shash_tfm)
    204			continue;
    205
    206		crypto_free_shash(ima_algo_array[i].tfm);
    207	}
    208out:
    209	crypto_free_shash(ima_shash_tfm);
    210	return rc;
    211}
    212
    213static void ima_free_tfm(struct crypto_shash *tfm)
    214{
    215	int i;
    216
    217	if (tfm == ima_shash_tfm)
    218		return;
    219
    220	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
    221		if (ima_algo_array[i].tfm == tfm)
    222			return;
    223
    224	crypto_free_shash(tfm);
    225}
    226
    227/**
    228 * ima_alloc_pages() - Allocate contiguous pages.
    229 * @max_size:       Maximum amount of memory to allocate.
    230 * @allocated_size: Returned size of actual allocation.
    231 * @last_warn:      Should the min_size allocation warn or not.
    232 *
    233 * Tries to do opportunistic allocation for memory first trying to allocate
    234 * max_size amount of memory and then splitting that until zero order is
    235 * reached. Allocation is tried without generating allocation warnings unless
    236 * last_warn is set. Last_warn set affects only last allocation of zero order.
    237 *
    238 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
    239 *
    240 * Return pointer to allocated memory, or NULL on failure.
    241 */
    242static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
    243			     int last_warn)
    244{
    245	void *ptr;
    246	int order = ima_maxorder;
    247	gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
    248
    249	if (order)
    250		order = min(get_order(max_size), order);
    251
    252	for (; order; order--) {
    253		ptr = (void *)__get_free_pages(gfp_mask, order);
    254		if (ptr) {
    255			*allocated_size = PAGE_SIZE << order;
    256			return ptr;
    257		}
    258	}
    259
    260	/* order is zero - one page */
    261
    262	gfp_mask = GFP_KERNEL;
    263
    264	if (!last_warn)
    265		gfp_mask |= __GFP_NOWARN;
    266
    267	ptr = (void *)__get_free_pages(gfp_mask, 0);
    268	if (ptr) {
    269		*allocated_size = PAGE_SIZE;
    270		return ptr;
    271	}
    272
    273	*allocated_size = 0;
    274	return NULL;
    275}
    276
    277/**
    278 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
    279 * @ptr:  Pointer to allocated pages.
    280 * @size: Size of allocated buffer.
    281 */
    282static void ima_free_pages(void *ptr, size_t size)
    283{
    284	if (!ptr)
    285		return;
    286	free_pages((unsigned long)ptr, get_order(size));
    287}
    288
    289static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
    290{
    291	struct crypto_ahash *tfm = ima_ahash_tfm;
    292	int rc;
    293
    294	if (algo < 0 || algo >= HASH_ALGO__LAST)
    295		algo = ima_hash_algo;
    296
    297	if (algo != ima_hash_algo || !tfm) {
    298		tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
    299		if (!IS_ERR(tfm)) {
    300			if (algo == ima_hash_algo)
    301				ima_ahash_tfm = tfm;
    302		} else {
    303			rc = PTR_ERR(tfm);
    304			pr_err("Can not allocate %s (reason: %d)\n",
    305			       hash_algo_name[algo], rc);
    306		}
    307	}
    308	return tfm;
    309}
    310
    311static void ima_free_atfm(struct crypto_ahash *tfm)
    312{
    313	if (tfm != ima_ahash_tfm)
    314		crypto_free_ahash(tfm);
    315}
    316
    317static inline int ahash_wait(int err, struct crypto_wait *wait)
    318{
    319
    320	err = crypto_wait_req(err, wait);
    321
    322	if (err)
    323		pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
    324
    325	return err;
    326}
    327
    328static int ima_calc_file_hash_atfm(struct file *file,
    329				   struct ima_digest_data *hash,
    330				   struct crypto_ahash *tfm)
    331{
    332	loff_t i_size, offset;
    333	char *rbuf[2] = { NULL, };
    334	int rc, rbuf_len, active = 0, ahash_rc = 0;
    335	struct ahash_request *req;
    336	struct scatterlist sg[1];
    337	struct crypto_wait wait;
    338	size_t rbuf_size[2];
    339
    340	hash->length = crypto_ahash_digestsize(tfm);
    341
    342	req = ahash_request_alloc(tfm, GFP_KERNEL);
    343	if (!req)
    344		return -ENOMEM;
    345
    346	crypto_init_wait(&wait);
    347	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
    348				   CRYPTO_TFM_REQ_MAY_SLEEP,
    349				   crypto_req_done, &wait);
    350
    351	rc = ahash_wait(crypto_ahash_init(req), &wait);
    352	if (rc)
    353		goto out1;
    354
    355	i_size = i_size_read(file_inode(file));
    356
    357	if (i_size == 0)
    358		goto out2;
    359
    360	/*
    361	 * Try to allocate maximum size of memory.
    362	 * Fail if even a single page cannot be allocated.
    363	 */
    364	rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
    365	if (!rbuf[0]) {
    366		rc = -ENOMEM;
    367		goto out1;
    368	}
    369
    370	/* Only allocate one buffer if that is enough. */
    371	if (i_size > rbuf_size[0]) {
    372		/*
    373		 * Try to allocate secondary buffer. If that fails fallback to
    374		 * using single buffering. Use previous memory allocation size
    375		 * as baseline for possible allocation size.
    376		 */
    377		rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
    378					  &rbuf_size[1], 0);
    379	}
    380
    381	for (offset = 0; offset < i_size; offset += rbuf_len) {
    382		if (!rbuf[1] && offset) {
    383			/* Not using two buffers, and it is not the first
    384			 * read/request, wait for the completion of the
    385			 * previous ahash_update() request.
    386			 */
    387			rc = ahash_wait(ahash_rc, &wait);
    388			if (rc)
    389				goto out3;
    390		}
    391		/* read buffer */
    392		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
    393		rc = integrity_kernel_read(file, offset, rbuf[active],
    394					   rbuf_len);
    395		if (rc != rbuf_len) {
    396			if (rc >= 0)
    397				rc = -EINVAL;
    398			/*
    399			 * Forward current rc, do not overwrite with return value
    400			 * from ahash_wait()
    401			 */
    402			ahash_wait(ahash_rc, &wait);
    403			goto out3;
    404		}
    405
    406		if (rbuf[1] && offset) {
    407			/* Using two buffers, and it is not the first
    408			 * read/request, wait for the completion of the
    409			 * previous ahash_update() request.
    410			 */
    411			rc = ahash_wait(ahash_rc, &wait);
    412			if (rc)
    413				goto out3;
    414		}
    415
    416		sg_init_one(&sg[0], rbuf[active], rbuf_len);
    417		ahash_request_set_crypt(req, sg, NULL, rbuf_len);
    418
    419		ahash_rc = crypto_ahash_update(req);
    420
    421		if (rbuf[1])
    422			active = !active; /* swap buffers, if we use two */
    423	}
    424	/* wait for the last update request to complete */
    425	rc = ahash_wait(ahash_rc, &wait);
    426out3:
    427	ima_free_pages(rbuf[0], rbuf_size[0]);
    428	ima_free_pages(rbuf[1], rbuf_size[1]);
    429out2:
    430	if (!rc) {
    431		ahash_request_set_crypt(req, NULL, hash->digest, 0);
    432		rc = ahash_wait(crypto_ahash_final(req), &wait);
    433	}
    434out1:
    435	ahash_request_free(req);
    436	return rc;
    437}
    438
    439static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
    440{
    441	struct crypto_ahash *tfm;
    442	int rc;
    443
    444	tfm = ima_alloc_atfm(hash->algo);
    445	if (IS_ERR(tfm))
    446		return PTR_ERR(tfm);
    447
    448	rc = ima_calc_file_hash_atfm(file, hash, tfm);
    449
    450	ima_free_atfm(tfm);
    451
    452	return rc;
    453}
    454
    455static int ima_calc_file_hash_tfm(struct file *file,
    456				  struct ima_digest_data *hash,
    457				  struct crypto_shash *tfm)
    458{
    459	loff_t i_size, offset = 0;
    460	char *rbuf;
    461	int rc;
    462	SHASH_DESC_ON_STACK(shash, tfm);
    463
    464	shash->tfm = tfm;
    465
    466	hash->length = crypto_shash_digestsize(tfm);
    467
    468	rc = crypto_shash_init(shash);
    469	if (rc != 0)
    470		return rc;
    471
    472	i_size = i_size_read(file_inode(file));
    473
    474	if (i_size == 0)
    475		goto out;
    476
    477	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
    478	if (!rbuf)
    479		return -ENOMEM;
    480
    481	while (offset < i_size) {
    482		int rbuf_len;
    483
    484		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
    485		if (rbuf_len < 0) {
    486			rc = rbuf_len;
    487			break;
    488		}
    489		if (rbuf_len == 0) {	/* unexpected EOF */
    490			rc = -EINVAL;
    491			break;
    492		}
    493		offset += rbuf_len;
    494
    495		rc = crypto_shash_update(shash, rbuf, rbuf_len);
    496		if (rc)
    497			break;
    498	}
    499	kfree(rbuf);
    500out:
    501	if (!rc)
    502		rc = crypto_shash_final(shash, hash->digest);
    503	return rc;
    504}
    505
    506static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
    507{
    508	struct crypto_shash *tfm;
    509	int rc;
    510
    511	tfm = ima_alloc_tfm(hash->algo);
    512	if (IS_ERR(tfm))
    513		return PTR_ERR(tfm);
    514
    515	rc = ima_calc_file_hash_tfm(file, hash, tfm);
    516
    517	ima_free_tfm(tfm);
    518
    519	return rc;
    520}
    521
    522/*
    523 * ima_calc_file_hash - calculate file hash
    524 *
    525 * Asynchronous hash (ahash) allows using HW acceleration for calculating
    526 * a hash. ahash performance varies for different data sizes on different
    527 * crypto accelerators. shash performance might be better for smaller files.
    528 * The 'ima.ahash_minsize' module parameter allows specifying the best
    529 * minimum file size for using ahash on the system.
    530 *
    531 * If the ima.ahash_minsize parameter is not specified, this function uses
    532 * shash for the hash calculation.  If ahash fails, it falls back to using
    533 * shash.
    534 */
    535int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
    536{
    537	loff_t i_size;
    538	int rc;
    539	struct file *f = file;
    540	bool new_file_instance = false;
    541
    542	/*
    543	 * For consistency, fail file's opened with the O_DIRECT flag on
    544	 * filesystems mounted with/without DAX option.
    545	 */
    546	if (file->f_flags & O_DIRECT) {
    547		hash->length = hash_digest_size[ima_hash_algo];
    548		hash->algo = ima_hash_algo;
    549		return -EINVAL;
    550	}
    551
    552	/* Open a new file instance in O_RDONLY if we cannot read */
    553	if (!(file->f_mode & FMODE_READ)) {
    554		int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
    555				O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
    556		flags |= O_RDONLY;
    557		f = dentry_open(&file->f_path, flags, file->f_cred);
    558		if (IS_ERR(f))
    559			return PTR_ERR(f);
    560
    561		new_file_instance = true;
    562	}
    563
    564	i_size = i_size_read(file_inode(f));
    565
    566	if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
    567		rc = ima_calc_file_ahash(f, hash);
    568		if (!rc)
    569			goto out;
    570	}
    571
    572	rc = ima_calc_file_shash(f, hash);
    573out:
    574	if (new_file_instance)
    575		fput(f);
    576	return rc;
    577}
    578
    579/*
    580 * Calculate the hash of template data
    581 */
    582static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
    583					 struct ima_template_entry *entry,
    584					 int tfm_idx)
    585{
    586	SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
    587	struct ima_template_desc *td = entry->template_desc;
    588	int num_fields = entry->template_desc->num_fields;
    589	int rc, i;
    590
    591	shash->tfm = ima_algo_array[tfm_idx].tfm;
    592
    593	rc = crypto_shash_init(shash);
    594	if (rc != 0)
    595		return rc;
    596
    597	for (i = 0; i < num_fields; i++) {
    598		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
    599		u8 *data_to_hash = field_data[i].data;
    600		u32 datalen = field_data[i].len;
    601		u32 datalen_to_hash = !ima_canonical_fmt ?
    602				datalen : (__force u32)cpu_to_le32(datalen);
    603
    604		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
    605			rc = crypto_shash_update(shash,
    606						(const u8 *) &datalen_to_hash,
    607						sizeof(datalen_to_hash));
    608			if (rc)
    609				break;
    610		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
    611			memcpy(buffer, data_to_hash, datalen);
    612			data_to_hash = buffer;
    613			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
    614		}
    615		rc = crypto_shash_update(shash, data_to_hash, datalen);
    616		if (rc)
    617			break;
    618	}
    619
    620	if (!rc)
    621		rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
    622
    623	return rc;
    624}
    625
    626int ima_calc_field_array_hash(struct ima_field_data *field_data,
    627			      struct ima_template_entry *entry)
    628{
    629	u16 alg_id;
    630	int rc, i;
    631
    632	rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
    633	if (rc)
    634		return rc;
    635
    636	entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
    637
    638	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
    639		if (i == ima_sha1_idx)
    640			continue;
    641
    642		if (i < NR_BANKS(ima_tpm_chip)) {
    643			alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
    644			entry->digests[i].alg_id = alg_id;
    645		}
    646
    647		/* for unmapped TPM algorithms digest is still a padded SHA1 */
    648		if (!ima_algo_array[i].tfm) {
    649			memcpy(entry->digests[i].digest,
    650			       entry->digests[ima_sha1_idx].digest,
    651			       TPM_DIGEST_SIZE);
    652			continue;
    653		}
    654
    655		rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
    656		if (rc)
    657			return rc;
    658	}
    659	return rc;
    660}
    661
    662static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
    663				  struct ima_digest_data *hash,
    664				  struct crypto_ahash *tfm)
    665{
    666	struct ahash_request *req;
    667	struct scatterlist sg;
    668	struct crypto_wait wait;
    669	int rc, ahash_rc = 0;
    670
    671	hash->length = crypto_ahash_digestsize(tfm);
    672
    673	req = ahash_request_alloc(tfm, GFP_KERNEL);
    674	if (!req)
    675		return -ENOMEM;
    676
    677	crypto_init_wait(&wait);
    678	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
    679				   CRYPTO_TFM_REQ_MAY_SLEEP,
    680				   crypto_req_done, &wait);
    681
    682	rc = ahash_wait(crypto_ahash_init(req), &wait);
    683	if (rc)
    684		goto out;
    685
    686	sg_init_one(&sg, buf, len);
    687	ahash_request_set_crypt(req, &sg, NULL, len);
    688
    689	ahash_rc = crypto_ahash_update(req);
    690
    691	/* wait for the update request to complete */
    692	rc = ahash_wait(ahash_rc, &wait);
    693	if (!rc) {
    694		ahash_request_set_crypt(req, NULL, hash->digest, 0);
    695		rc = ahash_wait(crypto_ahash_final(req), &wait);
    696	}
    697out:
    698	ahash_request_free(req);
    699	return rc;
    700}
    701
    702static int calc_buffer_ahash(const void *buf, loff_t len,
    703			     struct ima_digest_data *hash)
    704{
    705	struct crypto_ahash *tfm;
    706	int rc;
    707
    708	tfm = ima_alloc_atfm(hash->algo);
    709	if (IS_ERR(tfm))
    710		return PTR_ERR(tfm);
    711
    712	rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
    713
    714	ima_free_atfm(tfm);
    715
    716	return rc;
    717}
    718
    719static int calc_buffer_shash_tfm(const void *buf, loff_t size,
    720				struct ima_digest_data *hash,
    721				struct crypto_shash *tfm)
    722{
    723	SHASH_DESC_ON_STACK(shash, tfm);
    724	unsigned int len;
    725	int rc;
    726
    727	shash->tfm = tfm;
    728
    729	hash->length = crypto_shash_digestsize(tfm);
    730
    731	rc = crypto_shash_init(shash);
    732	if (rc != 0)
    733		return rc;
    734
    735	while (size) {
    736		len = size < PAGE_SIZE ? size : PAGE_SIZE;
    737		rc = crypto_shash_update(shash, buf, len);
    738		if (rc)
    739			break;
    740		buf += len;
    741		size -= len;
    742	}
    743
    744	if (!rc)
    745		rc = crypto_shash_final(shash, hash->digest);
    746	return rc;
    747}
    748
    749static int calc_buffer_shash(const void *buf, loff_t len,
    750			     struct ima_digest_data *hash)
    751{
    752	struct crypto_shash *tfm;
    753	int rc;
    754
    755	tfm = ima_alloc_tfm(hash->algo);
    756	if (IS_ERR(tfm))
    757		return PTR_ERR(tfm);
    758
    759	rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
    760
    761	ima_free_tfm(tfm);
    762	return rc;
    763}
    764
    765int ima_calc_buffer_hash(const void *buf, loff_t len,
    766			 struct ima_digest_data *hash)
    767{
    768	int rc;
    769
    770	if (ima_ahash_minsize && len >= ima_ahash_minsize) {
    771		rc = calc_buffer_ahash(buf, len, hash);
    772		if (!rc)
    773			return 0;
    774	}
    775
    776	return calc_buffer_shash(buf, len, hash);
    777}
    778
    779static void ima_pcrread(u32 idx, struct tpm_digest *d)
    780{
    781	if (!ima_tpm_chip)
    782		return;
    783
    784	if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
    785		pr_err("Error Communicating to TPM chip\n");
    786}
    787
    788/*
    789 * The boot_aggregate is a cumulative hash over TPM registers 0 - 7.  With
    790 * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
    791 * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
    792 * allowing firmware to configure and enable different banks.
    793 *
    794 * Knowing which TPM bank is read to calculate the boot_aggregate digest
    795 * needs to be conveyed to a verifier.  For this reason, use the same
    796 * hash algorithm for reading the TPM PCRs as for calculating the boot
    797 * aggregate digest as stored in the measurement list.
    798 */
    799static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
    800				       struct crypto_shash *tfm)
    801{
    802	struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
    803	int rc;
    804	u32 i;
    805	SHASH_DESC_ON_STACK(shash, tfm);
    806
    807	shash->tfm = tfm;
    808
    809	pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
    810		 d.alg_id);
    811
    812	rc = crypto_shash_init(shash);
    813	if (rc != 0)
    814		return rc;
    815
    816	/* cumulative digest over TPM registers 0-7 */
    817	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
    818		ima_pcrread(i, &d);
    819		/* now accumulate with current aggregate */
    820		rc = crypto_shash_update(shash, d.digest,
    821					 crypto_shash_digestsize(tfm));
    822		if (rc != 0)
    823			return rc;
    824	}
    825	/*
    826	 * Extend cumulative digest over TPM registers 8-9, which contain
    827	 * measurement for the kernel command line (reg. 8) and image (reg. 9)
    828	 * in a typical PCR allocation. Registers 8-9 are only included in
    829	 * non-SHA1 boot_aggregate digests to avoid ambiguity.
    830	 */
    831	if (alg_id != TPM_ALG_SHA1) {
    832		for (i = TPM_PCR8; i < TPM_PCR10; i++) {
    833			ima_pcrread(i, &d);
    834			rc = crypto_shash_update(shash, d.digest,
    835						crypto_shash_digestsize(tfm));
    836		}
    837	}
    838	if (!rc)
    839		crypto_shash_final(shash, digest);
    840	return rc;
    841}
    842
    843int ima_calc_boot_aggregate(struct ima_digest_data *hash)
    844{
    845	struct crypto_shash *tfm;
    846	u16 crypto_id, alg_id;
    847	int rc, i, bank_idx = -1;
    848
    849	for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
    850		crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
    851		if (crypto_id == hash->algo) {
    852			bank_idx = i;
    853			break;
    854		}
    855
    856		if (crypto_id == HASH_ALGO_SHA256)
    857			bank_idx = i;
    858
    859		if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
    860			bank_idx = i;
    861	}
    862
    863	if (bank_idx == -1) {
    864		pr_err("No suitable TPM algorithm for boot aggregate\n");
    865		return 0;
    866	}
    867
    868	hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
    869
    870	tfm = ima_alloc_tfm(hash->algo);
    871	if (IS_ERR(tfm))
    872		return PTR_ERR(tfm);
    873
    874	hash->length = crypto_shash_digestsize(tfm);
    875	alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
    876	rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
    877
    878	ima_free_tfm(tfm);
    879
    880	return rc;
    881}