clk: samsung: Add bus clock for GPU/G3D on Exynos4412
[linux/fpc-iii.git] / security / integrity / ima / ima_queue.c
blob6b6d044e044033eb33fca509529b40656170cd6a
1 /*
2 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
4 * Authors:
5 * Serge Hallyn <serue@us.ibm.com>
6 * Reiner Sailer <sailer@watson.ibm.com>
7 * Mimi Zohar <zohar@us.ibm.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation, version 2 of the
12 * License.
14 * File: ima_queue.c
15 * Implements queues that store template measurements and
16 * maintains aggregate over the stored measurements
17 * in the pre-configured TPM PCR (if available).
18 * The measurement list is append-only. No entry is
19 * ever removed or changed during the boot-cycle.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/rculist.h>
25 #include <linux/slab.h>
26 #include "ima.h"
28 #define AUDIT_CAUSE_LEN_MAX 32
30 /* pre-allocated array of tpm_digest structures to extend a PCR */
31 static struct tpm_digest *digests;
33 LIST_HEAD(ima_measurements); /* list of all measurements */
34 #ifdef CONFIG_IMA_KEXEC
35 static unsigned long binary_runtime_size;
36 #else
37 static unsigned long binary_runtime_size = ULONG_MAX;
38 #endif
40 /* key: inode (before secure-hashing a file) */
41 struct ima_h_table ima_htable = {
42 .len = ATOMIC_LONG_INIT(0),
43 .violations = ATOMIC_LONG_INIT(0),
44 .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
47 /* mutex protects atomicity of extending measurement list
48 * and extending the TPM PCR aggregate. Since tpm_extend can take
49 * long (and the tpm driver uses a mutex), we can't use the spinlock.
51 static DEFINE_MUTEX(ima_extend_list_mutex);
53 /* lookup up the digest value in the hash table, and return the entry */
54 static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
55 int pcr)
57 struct ima_queue_entry *qe, *ret = NULL;
58 unsigned int key;
59 int rc;
61 key = ima_hash_key(digest_value);
62 rcu_read_lock();
63 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
64 rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
65 if ((rc == 0) && (qe->entry->pcr == pcr)) {
66 ret = qe;
67 break;
70 rcu_read_unlock();
71 return ret;
75 * Calculate the memory required for serializing a single
76 * binary_runtime_measurement list entry, which contains a
77 * couple of variable length fields (e.g template name and data).
79 static int get_binary_runtime_size(struct ima_template_entry *entry)
81 int size = 0;
83 size += sizeof(u32); /* pcr */
84 size += sizeof(entry->digest);
85 size += sizeof(int); /* template name size field */
86 size += strlen(entry->template_desc->name);
87 size += sizeof(entry->template_data_len);
88 size += entry->template_data_len;
89 return size;
92 /* ima_add_template_entry helper function:
93 * - Add template entry to the measurement list and hash table, for
94 * all entries except those carried across kexec.
96 * (Called with ima_extend_list_mutex held.)
98 static int ima_add_digest_entry(struct ima_template_entry *entry,
99 bool update_htable)
101 struct ima_queue_entry *qe;
102 unsigned int key;
104 qe = kmalloc(sizeof(*qe), GFP_KERNEL);
105 if (qe == NULL) {
106 pr_err("OUT OF MEMORY ERROR creating queue entry\n");
107 return -ENOMEM;
109 qe->entry = entry;
111 INIT_LIST_HEAD(&qe->later);
112 list_add_tail_rcu(&qe->later, &ima_measurements);
114 atomic_long_inc(&ima_htable.len);
115 if (update_htable) {
116 key = ima_hash_key(entry->digest);
117 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
120 if (binary_runtime_size != ULONG_MAX) {
121 int size;
123 size = get_binary_runtime_size(entry);
124 binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
125 binary_runtime_size + size : ULONG_MAX;
127 return 0;
131 * Return the amount of memory required for serializing the
132 * entire binary_runtime_measurement list, including the ima_kexec_hdr
133 * structure.
135 unsigned long ima_get_binary_runtime_size(void)
137 if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
138 return ULONG_MAX;
139 else
140 return binary_runtime_size + sizeof(struct ima_kexec_hdr);
143 static int ima_pcr_extend(const u8 *hash, int pcr)
145 int result = 0;
146 int i;
148 if (!ima_tpm_chip)
149 return result;
151 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
152 memcpy(digests[i].digest, hash, TPM_DIGEST_SIZE);
154 result = tpm_pcr_extend(ima_tpm_chip, pcr, digests);
155 if (result != 0)
156 pr_err("Error Communicating to TPM chip, result: %d\n", result);
157 return result;
161 * Add template entry to the measurement list and hash table, and
162 * extend the pcr.
164 * On systems which support carrying the IMA measurement list across
165 * kexec, maintain the total memory size required for serializing the
166 * binary_runtime_measurements.
168 int ima_add_template_entry(struct ima_template_entry *entry, int violation,
169 const char *op, struct inode *inode,
170 const unsigned char *filename)
172 u8 digest[TPM_DIGEST_SIZE];
173 const char *audit_cause = "hash_added";
174 char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
175 int audit_info = 1;
176 int result = 0, tpmresult = 0;
178 mutex_lock(&ima_extend_list_mutex);
179 if (!violation) {
180 memcpy(digest, entry->digest, sizeof(digest));
181 if (ima_lookup_digest_entry(digest, entry->pcr)) {
182 audit_cause = "hash_exists";
183 result = -EEXIST;
184 goto out;
188 result = ima_add_digest_entry(entry, 1);
189 if (result < 0) {
190 audit_cause = "ENOMEM";
191 audit_info = 0;
192 goto out;
195 if (violation) /* invalidate pcr */
196 memset(digest, 0xff, sizeof(digest));
198 tpmresult = ima_pcr_extend(digest, entry->pcr);
199 if (tpmresult != 0) {
200 snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
201 tpmresult);
202 audit_cause = tpm_audit_cause;
203 audit_info = 0;
205 out:
206 mutex_unlock(&ima_extend_list_mutex);
207 integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
208 op, audit_cause, result, audit_info);
209 return result;
212 int ima_restore_measurement_entry(struct ima_template_entry *entry)
214 int result = 0;
216 mutex_lock(&ima_extend_list_mutex);
217 result = ima_add_digest_entry(entry, 0);
218 mutex_unlock(&ima_extend_list_mutex);
219 return result;
222 int __init ima_init_digests(void)
224 int i;
226 if (!ima_tpm_chip)
227 return 0;
229 digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests),
230 GFP_NOFS);
231 if (!digests)
232 return -ENOMEM;
234 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++)
235 digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
237 return 0;