cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ima_queue.c (6569B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
      4 *
      5 * Authors:
      6 * Serge Hallyn <serue@us.ibm.com>
      7 * Reiner Sailer <sailer@watson.ibm.com>
      8 * Mimi Zohar <zohar@us.ibm.com>
      9 *
     10 * File: ima_queue.c
     11 *       Implements queues that store template measurements and
     12 *       maintains aggregate over the stored measurements
     13 *       in the pre-configured TPM PCR (if available).
     14 *       The measurement list is append-only. No entry is
     15 *       ever removed or changed during the boot-cycle.
     16 */
     17
     18#include <linux/rculist.h>
     19#include <linux/slab.h>
     20#include "ima.h"
     21
     22#define AUDIT_CAUSE_LEN_MAX 32
     23
     24/* pre-allocated array of tpm_digest structures to extend a PCR */
     25static struct tpm_digest *digests;
     26
     27LIST_HEAD(ima_measurements);	/* list of all measurements */
     28#ifdef CONFIG_IMA_KEXEC
     29static unsigned long binary_runtime_size;
     30#else
     31static unsigned long binary_runtime_size = ULONG_MAX;
     32#endif
     33
     34/* key: inode (before secure-hashing a file) */
     35struct ima_h_table ima_htable = {
     36	.len = ATOMIC_LONG_INIT(0),
     37	.violations = ATOMIC_LONG_INIT(0),
     38	.queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
     39};
     40
     41/* mutex protects atomicity of extending measurement list
     42 * and extending the TPM PCR aggregate. Since tpm_extend can take
     43 * long (and the tpm driver uses a mutex), we can't use the spinlock.
     44 */
     45static DEFINE_MUTEX(ima_extend_list_mutex);
     46
     47/* lookup up the digest value in the hash table, and return the entry */
     48static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
     49						       int pcr)
     50{
     51	struct ima_queue_entry *qe, *ret = NULL;
     52	unsigned int key;
     53	int rc;
     54
     55	key = ima_hash_key(digest_value);
     56	rcu_read_lock();
     57	hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
     58		rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest,
     59			    digest_value, hash_digest_size[ima_hash_algo]);
     60		if ((rc == 0) && (qe->entry->pcr == pcr)) {
     61			ret = qe;
     62			break;
     63		}
     64	}
     65	rcu_read_unlock();
     66	return ret;
     67}
     68
     69/*
     70 * Calculate the memory required for serializing a single
     71 * binary_runtime_measurement list entry, which contains a
     72 * couple of variable length fields (e.g template name and data).
     73 */
     74static int get_binary_runtime_size(struct ima_template_entry *entry)
     75{
     76	int size = 0;
     77
     78	size += sizeof(u32);	/* pcr */
     79	size += TPM_DIGEST_SIZE;
     80	size += sizeof(int);	/* template name size field */
     81	size += strlen(entry->template_desc->name);
     82	size += sizeof(entry->template_data_len);
     83	size += entry->template_data_len;
     84	return size;
     85}
     86
     87/* ima_add_template_entry helper function:
     88 * - Add template entry to the measurement list and hash table, for
     89 *   all entries except those carried across kexec.
     90 *
     91 * (Called with ima_extend_list_mutex held.)
     92 */
     93static int ima_add_digest_entry(struct ima_template_entry *entry,
     94				bool update_htable)
     95{
     96	struct ima_queue_entry *qe;
     97	unsigned int key;
     98
     99	qe = kmalloc(sizeof(*qe), GFP_KERNEL);
    100	if (qe == NULL) {
    101		pr_err("OUT OF MEMORY ERROR creating queue entry\n");
    102		return -ENOMEM;
    103	}
    104	qe->entry = entry;
    105
    106	INIT_LIST_HEAD(&qe->later);
    107	list_add_tail_rcu(&qe->later, &ima_measurements);
    108
    109	atomic_long_inc(&ima_htable.len);
    110	if (update_htable) {
    111		key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest);
    112		hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
    113	}
    114
    115	if (binary_runtime_size != ULONG_MAX) {
    116		int size;
    117
    118		size = get_binary_runtime_size(entry);
    119		binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
    120		     binary_runtime_size + size : ULONG_MAX;
    121	}
    122	return 0;
    123}
    124
    125/*
    126 * Return the amount of memory required for serializing the
    127 * entire binary_runtime_measurement list, including the ima_kexec_hdr
    128 * structure.
    129 */
    130unsigned long ima_get_binary_runtime_size(void)
    131{
    132	if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
    133		return ULONG_MAX;
    134	else
    135		return binary_runtime_size + sizeof(struct ima_kexec_hdr);
    136}
    137
    138static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr)
    139{
    140	int result = 0;
    141
    142	if (!ima_tpm_chip)
    143		return result;
    144
    145	result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg);
    146	if (result != 0)
    147		pr_err("Error Communicating to TPM chip, result: %d\n", result);
    148	return result;
    149}
    150
    151/*
    152 * Add template entry to the measurement list and hash table, and
    153 * extend the pcr.
    154 *
    155 * On systems which support carrying the IMA measurement list across
    156 * kexec, maintain the total memory size required for serializing the
    157 * binary_runtime_measurements.
    158 */
    159int ima_add_template_entry(struct ima_template_entry *entry, int violation,
    160			   const char *op, struct inode *inode,
    161			   const unsigned char *filename)
    162{
    163	u8 *digest = entry->digests[ima_hash_algo_idx].digest;
    164	struct tpm_digest *digests_arg = entry->digests;
    165	const char *audit_cause = "hash_added";
    166	char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
    167	int audit_info = 1;
    168	int result = 0, tpmresult = 0;
    169
    170	mutex_lock(&ima_extend_list_mutex);
    171	if (!violation && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) {
    172		if (ima_lookup_digest_entry(digest, entry->pcr)) {
    173			audit_cause = "hash_exists";
    174			result = -EEXIST;
    175			goto out;
    176		}
    177	}
    178
    179	result = ima_add_digest_entry(entry,
    180				      !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE));
    181	if (result < 0) {
    182		audit_cause = "ENOMEM";
    183		audit_info = 0;
    184		goto out;
    185	}
    186
    187	if (violation)		/* invalidate pcr */
    188		digests_arg = digests;
    189
    190	tpmresult = ima_pcr_extend(digests_arg, entry->pcr);
    191	if (tpmresult != 0) {
    192		snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
    193			 tpmresult);
    194		audit_cause = tpm_audit_cause;
    195		audit_info = 0;
    196	}
    197out:
    198	mutex_unlock(&ima_extend_list_mutex);
    199	integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
    200			    op, audit_cause, result, audit_info);
    201	return result;
    202}
    203
    204int ima_restore_measurement_entry(struct ima_template_entry *entry)
    205{
    206	int result = 0;
    207
    208	mutex_lock(&ima_extend_list_mutex);
    209	result = ima_add_digest_entry(entry, 0);
    210	mutex_unlock(&ima_extend_list_mutex);
    211	return result;
    212}
    213
    214int __init ima_init_digests(void)
    215{
    216	u16 digest_size;
    217	u16 crypto_id;
    218	int i;
    219
    220	if (!ima_tpm_chip)
    221		return 0;
    222
    223	digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
    224			  GFP_NOFS);
    225	if (!digests)
    226		return -ENOMEM;
    227
    228	for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
    229		digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
    230		digest_size = ima_tpm_chip->allocated_banks[i].digest_size;
    231		crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
    232
    233		/* for unmapped TPM algorithms digest is still a padded SHA1 */
    234		if (crypto_id == HASH_ALGO__LAST)
    235			digest_size = SHA1_DIGEST_SIZE;
    236
    237		memset(digests[i].digest, 0xff, digest_size);
    238	}
    239
    240	return 0;
    241}