2 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
5 * Serge Hallyn <serue@us.ibm.com>
6 * Reiner Sailer <sailer@watson.ibm.com>
7 * Mimi Zohar <zohar@us.ibm.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation, version 2 of the
15 * Implements queues that store template measurements and
16 * maintains aggregate over the stored measurements
17 * in the pre-configured TPM PCR (if available).
18 * The measurement list is append-only. No entry is
19 * ever removed or changed during the boot-cycle.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/rculist.h>
25 #include <linux/slab.h>
28 #define AUDIT_CAUSE_LEN_MAX 32
30 /* pre-allocated array of tpm_digest structures to extend a PCR */
31 static struct tpm_digest
*digests
;
33 LIST_HEAD(ima_measurements
); /* list of all measurements */
34 #ifdef CONFIG_IMA_KEXEC
35 static unsigned long binary_runtime_size
;
37 static unsigned long binary_runtime_size
= ULONG_MAX
;
40 /* key: inode (before secure-hashing a file) */
41 struct ima_h_table ima_htable
= {
42 .len
= ATOMIC_LONG_INIT(0),
43 .violations
= ATOMIC_LONG_INIT(0),
44 .queue
[0 ... IMA_MEASURE_HTABLE_SIZE
- 1] = HLIST_HEAD_INIT
47 /* mutex protects atomicity of extending measurement list
48 * and extending the TPM PCR aggregate. Since tpm_extend can take
49 * long (and the tpm driver uses a mutex), we can't use the spinlock.
51 static DEFINE_MUTEX(ima_extend_list_mutex
);
53 /* lookup up the digest value in the hash table, and return the entry */
54 static struct ima_queue_entry
*ima_lookup_digest_entry(u8
*digest_value
,
57 struct ima_queue_entry
*qe
, *ret
= NULL
;
61 key
= ima_hash_key(digest_value
);
63 hlist_for_each_entry_rcu(qe
, &ima_htable
.queue
[key
], hnext
) {
64 rc
= memcmp(qe
->entry
->digest
, digest_value
, TPM_DIGEST_SIZE
);
65 if ((rc
== 0) && (qe
->entry
->pcr
== pcr
)) {
75 * Calculate the memory required for serializing a single
76 * binary_runtime_measurement list entry, which contains a
77 * couple of variable length fields (e.g template name and data).
79 static int get_binary_runtime_size(struct ima_template_entry
*entry
)
83 size
+= sizeof(u32
); /* pcr */
84 size
+= sizeof(entry
->digest
);
85 size
+= sizeof(int); /* template name size field */
86 size
+= strlen(entry
->template_desc
->name
);
87 size
+= sizeof(entry
->template_data_len
);
88 size
+= entry
->template_data_len
;
92 /* ima_add_template_entry helper function:
93 * - Add template entry to the measurement list and hash table, for
94 * all entries except those carried across kexec.
96 * (Called with ima_extend_list_mutex held.)
98 static int ima_add_digest_entry(struct ima_template_entry
*entry
,
101 struct ima_queue_entry
*qe
;
104 qe
= kmalloc(sizeof(*qe
), GFP_KERNEL
);
106 pr_err("OUT OF MEMORY ERROR creating queue entry\n");
111 INIT_LIST_HEAD(&qe
->later
);
112 list_add_tail_rcu(&qe
->later
, &ima_measurements
);
114 atomic_long_inc(&ima_htable
.len
);
116 key
= ima_hash_key(entry
->digest
);
117 hlist_add_head_rcu(&qe
->hnext
, &ima_htable
.queue
[key
]);
120 if (binary_runtime_size
!= ULONG_MAX
) {
123 size
= get_binary_runtime_size(entry
);
124 binary_runtime_size
= (binary_runtime_size
< ULONG_MAX
- size
) ?
125 binary_runtime_size
+ size
: ULONG_MAX
;
131 * Return the amount of memory required for serializing the
132 * entire binary_runtime_measurement list, including the ima_kexec_hdr
135 unsigned long ima_get_binary_runtime_size(void)
137 if (binary_runtime_size
>= (ULONG_MAX
- sizeof(struct ima_kexec_hdr
)))
140 return binary_runtime_size
+ sizeof(struct ima_kexec_hdr
);
143 static int ima_pcr_extend(const u8
*hash
, int pcr
)
151 for (i
= 0; i
< ima_tpm_chip
->nr_allocated_banks
; i
++)
152 memcpy(digests
[i
].digest
, hash
, TPM_DIGEST_SIZE
);
154 result
= tpm_pcr_extend(ima_tpm_chip
, pcr
, digests
);
156 pr_err("Error Communicating to TPM chip, result: %d\n", result
);
161 * Add template entry to the measurement list and hash table, and
164 * On systems which support carrying the IMA measurement list across
165 * kexec, maintain the total memory size required for serializing the
166 * binary_runtime_measurements.
168 int ima_add_template_entry(struct ima_template_entry
*entry
, int violation
,
169 const char *op
, struct inode
*inode
,
170 const unsigned char *filename
)
172 u8 digest
[TPM_DIGEST_SIZE
];
173 const char *audit_cause
= "hash_added";
174 char tpm_audit_cause
[AUDIT_CAUSE_LEN_MAX
];
176 int result
= 0, tpmresult
= 0;
178 mutex_lock(&ima_extend_list_mutex
);
180 memcpy(digest
, entry
->digest
, sizeof(digest
));
181 if (ima_lookup_digest_entry(digest
, entry
->pcr
)) {
182 audit_cause
= "hash_exists";
188 result
= ima_add_digest_entry(entry
, 1);
190 audit_cause
= "ENOMEM";
195 if (violation
) /* invalidate pcr */
196 memset(digest
, 0xff, sizeof(digest
));
198 tpmresult
= ima_pcr_extend(digest
, entry
->pcr
);
199 if (tpmresult
!= 0) {
200 snprintf(tpm_audit_cause
, AUDIT_CAUSE_LEN_MAX
, "TPM_error(%d)",
202 audit_cause
= tpm_audit_cause
;
206 mutex_unlock(&ima_extend_list_mutex
);
207 integrity_audit_msg(AUDIT_INTEGRITY_PCR
, inode
, filename
,
208 op
, audit_cause
, result
, audit_info
);
212 int ima_restore_measurement_entry(struct ima_template_entry
*entry
)
216 mutex_lock(&ima_extend_list_mutex
);
217 result
= ima_add_digest_entry(entry
, 0);
218 mutex_unlock(&ima_extend_list_mutex
);
222 int __init
ima_init_digests(void)
229 digests
= kcalloc(ima_tpm_chip
->nr_allocated_banks
, sizeof(*digests
),
234 for (i
= 0; i
< ima_tpm_chip
->nr_allocated_banks
; i
++)
235 digests
[i
].alg_id
= ima_tpm_chip
->allocated_banks
[i
].alg_id
;