1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
6 * Serge Hallyn <serue@us.ibm.com>
7 * Reiner Sailer <sailer@watson.ibm.com>
8 * Mimi Zohar <zohar@us.ibm.com>
11 * Implements queues that store template measurements and
12 * maintains aggregate over the stored measurements
13 * in the pre-configured TPM PCR (if available).
14 * The measurement list is append-only. No entry is
15 * ever removed or changed during the boot-cycle.
18 #include <linux/rculist.h>
19 #include <linux/slab.h>
22 #define AUDIT_CAUSE_LEN_MAX 32
24 /* pre-allocated array of tpm_digest structures to extend a PCR */
25 static struct tpm_digest
*digests
;
27 LIST_HEAD(ima_measurements
); /* list of all measurements */
28 #ifdef CONFIG_IMA_KEXEC
29 static unsigned long binary_runtime_size
;
31 static unsigned long binary_runtime_size
= ULONG_MAX
;
34 /* key: inode (before secure-hashing a file) */
35 struct ima_h_table ima_htable
= {
36 .len
= ATOMIC_LONG_INIT(0),
37 .violations
= ATOMIC_LONG_INIT(0),
38 .queue
[0 ... IMA_MEASURE_HTABLE_SIZE
- 1] = HLIST_HEAD_INIT
41 /* mutex protects atomicity of extending measurement list
42 * and extending the TPM PCR aggregate. Since tpm_extend can take
43 * long (and the tpm driver uses a mutex), we can't use the spinlock.
45 static DEFINE_MUTEX(ima_extend_list_mutex
);
47 /* lookup up the digest value in the hash table, and return the entry */
48 static struct ima_queue_entry
*ima_lookup_digest_entry(u8
*digest_value
,
51 struct ima_queue_entry
*qe
, *ret
= NULL
;
55 key
= ima_hash_key(digest_value
);
57 hlist_for_each_entry_rcu(qe
, &ima_htable
.queue
[key
], hnext
) {
58 rc
= memcmp(qe
->entry
->digests
[ima_hash_algo_idx
].digest
,
59 digest_value
, hash_digest_size
[ima_hash_algo
]);
60 if ((rc
== 0) && (qe
->entry
->pcr
== pcr
)) {
70 * Calculate the memory required for serializing a single
71 * binary_runtime_measurement list entry, which contains a
72 * couple of variable length fields (e.g template name and data).
74 static int get_binary_runtime_size(struct ima_template_entry
*entry
)
78 size
+= sizeof(u32
); /* pcr */
79 size
+= TPM_DIGEST_SIZE
;
80 size
+= sizeof(int); /* template name size field */
81 size
+= strlen(entry
->template_desc
->name
);
82 size
+= sizeof(entry
->template_data_len
);
83 size
+= entry
->template_data_len
;
87 /* ima_add_template_entry helper function:
88 * - Add template entry to the measurement list and hash table, for
89 * all entries except those carried across kexec.
91 * (Called with ima_extend_list_mutex held.)
93 static int ima_add_digest_entry(struct ima_template_entry
*entry
,
96 struct ima_queue_entry
*qe
;
99 qe
= kmalloc(sizeof(*qe
), GFP_KERNEL
);
101 pr_err("OUT OF MEMORY ERROR creating queue entry\n");
106 INIT_LIST_HEAD(&qe
->later
);
107 list_add_tail_rcu(&qe
->later
, &ima_measurements
);
109 atomic_long_inc(&ima_htable
.len
);
111 key
= ima_hash_key(entry
->digests
[ima_hash_algo_idx
].digest
);
112 hlist_add_head_rcu(&qe
->hnext
, &ima_htable
.queue
[key
]);
115 if (binary_runtime_size
!= ULONG_MAX
) {
118 size
= get_binary_runtime_size(entry
);
119 binary_runtime_size
= (binary_runtime_size
< ULONG_MAX
- size
) ?
120 binary_runtime_size
+ size
: ULONG_MAX
;
126 * Return the amount of memory required for serializing the
127 * entire binary_runtime_measurement list, including the ima_kexec_hdr
130 unsigned long ima_get_binary_runtime_size(void)
132 if (binary_runtime_size
>= (ULONG_MAX
- sizeof(struct ima_kexec_hdr
)))
135 return binary_runtime_size
+ sizeof(struct ima_kexec_hdr
);
138 static int ima_pcr_extend(struct tpm_digest
*digests_arg
, int pcr
)
145 result
= tpm_pcr_extend(ima_tpm_chip
, pcr
, digests_arg
);
147 pr_err("Error Communicating to TPM chip, result: %d\n", result
);
152 * Add template entry to the measurement list and hash table, and
155 * On systems which support carrying the IMA measurement list across
156 * kexec, maintain the total memory size required for serializing the
157 * binary_runtime_measurements.
159 int ima_add_template_entry(struct ima_template_entry
*entry
, int violation
,
160 const char *op
, struct inode
*inode
,
161 const unsigned char *filename
)
163 u8
*digest
= entry
->digests
[ima_hash_algo_idx
].digest
;
164 struct tpm_digest
*digests_arg
= entry
->digests
;
165 const char *audit_cause
= "hash_added";
166 char tpm_audit_cause
[AUDIT_CAUSE_LEN_MAX
];
168 int result
= 0, tpmresult
= 0;
170 mutex_lock(&ima_extend_list_mutex
);
171 if (!violation
&& !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE
)) {
172 if (ima_lookup_digest_entry(digest
, entry
->pcr
)) {
173 audit_cause
= "hash_exists";
179 result
= ima_add_digest_entry(entry
,
180 !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE
));
182 audit_cause
= "ENOMEM";
187 if (violation
) /* invalidate pcr */
188 digests_arg
= digests
;
190 tpmresult
= ima_pcr_extend(digests_arg
, entry
->pcr
);
191 if (tpmresult
!= 0) {
192 snprintf(tpm_audit_cause
, AUDIT_CAUSE_LEN_MAX
, "TPM_error(%d)",
194 audit_cause
= tpm_audit_cause
;
198 mutex_unlock(&ima_extend_list_mutex
);
199 integrity_audit_msg(AUDIT_INTEGRITY_PCR
, inode
, filename
,
200 op
, audit_cause
, result
, audit_info
);
204 int ima_restore_measurement_entry(struct ima_template_entry
*entry
)
208 mutex_lock(&ima_extend_list_mutex
);
209 result
= ima_add_digest_entry(entry
, 0);
210 mutex_unlock(&ima_extend_list_mutex
);
214 int __init
ima_init_digests(void)
223 digests
= kcalloc(ima_tpm_chip
->nr_allocated_banks
, sizeof(*digests
),
228 for (i
= 0; i
< ima_tpm_chip
->nr_allocated_banks
; i
++) {
229 digests
[i
].alg_id
= ima_tpm_chip
->allocated_banks
[i
].alg_id
;
230 digest_size
= ima_tpm_chip
->allocated_banks
[i
].digest_size
;
231 crypto_id
= ima_tpm_chip
->allocated_banks
[i
].crypto_id
;
233 /* for unmapped TPM algorithms digest is still a padded SHA1 */
234 if (crypto_id
== HASH_ALGO__LAST
)
235 digest_size
= SHA1_DIGEST_SIZE
;
237 memset(digests
[i
].digest
, 0xff, digest_size
);