2 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2011-2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/module.h>
27 #include "nx_csbcpb.h"
31 static int nx_sha256_init(struct shash_desc
*desc
)
33 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
34 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
37 nx_ctx_init(nx_ctx
, HCOP_FC_SHA
);
39 memset(sctx
, 0, sizeof *sctx
);
41 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_SHA256
];
43 NX_CPB_SET_DIGEST_SIZE(nx_ctx
->csbcpb
, NX_DS_SHA256
);
44 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*)sctx
->state
,
45 SHA256_DIGEST_SIZE
, nx_ctx
->ap
->sglen
);
46 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
51 static int nx_sha256_update(struct shash_desc
*desc
, const u8
*data
,
54 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
55 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
56 struct nx_csbcpb
*csbcpb
= (struct nx_csbcpb
*)nx_ctx
->csbcpb
;
58 u64 to_process
, leftover
, total
;
60 unsigned long irq_flags
;
63 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
65 /* 2 cases for total data len:
66 * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
67 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
69 total
= sctx
->count
+ len
;
70 if (total
< SHA256_BLOCK_SIZE
) {
71 memcpy(sctx
->buf
+ sctx
->count
, data
, len
);
76 in_sg
= nx_ctx
->in_sg
;
77 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
82 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
83 * this update. This value is also restricted by the sg list
86 to_process
= min_t(u64
, total
, nx_ctx
->ap
->databytelen
);
87 to_process
= min_t(u64
, to_process
,
88 NX_PAGE_SIZE
* (max_sg_len
- 1));
89 to_process
= to_process
& ~(SHA256_BLOCK_SIZE
- 1);
90 leftover
= total
- to_process
;
93 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
,
95 sctx
->count
, max_sg_len
);
97 in_sg
= nx_build_sg_list(in_sg
, (u8
*) data
,
98 to_process
- sctx
->count
,
100 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) *
101 sizeof(struct nx_sg
);
103 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
105 * we've hit the nx chip previously and we're updating
106 * again, so copy over the partial digest.
108 memcpy(csbcpb
->cpb
.sha256
.input_partial_digest
,
109 csbcpb
->cpb
.sha256
.message_digest
,
113 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
114 if (!nx_ctx
->op
.inlen
|| !nx_ctx
->op
.outlen
) {
119 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
120 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
124 atomic_inc(&(nx_ctx
->stats
->sha256_ops
));
125 csbcpb
->cpb
.sha256
.message_bit_length
+= (u64
)
126 (csbcpb
->cpb
.sha256
.spbc
* 8);
128 /* everything after the first update is continuation */
129 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
132 data
+= to_process
- sctx
->count
;
134 in_sg
= nx_ctx
->in_sg
;
135 } while (leftover
>= SHA256_BLOCK_SIZE
);
137 /* copy the leftover back into the state struct */
139 memcpy(sctx
->buf
, data
, leftover
);
140 sctx
->count
= leftover
;
142 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
146 static int nx_sha256_final(struct shash_desc
*desc
, u8
*out
)
148 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
149 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
150 struct nx_csbcpb
*csbcpb
= (struct nx_csbcpb
*)nx_ctx
->csbcpb
;
151 struct nx_sg
*in_sg
, *out_sg
;
153 unsigned long irq_flags
;
156 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
158 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
, nx_ctx
->ap
->sglen
);
160 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
161 /* we've hit the nx chip previously, now we're finalizing,
162 * so copy over the partial digest */
163 memcpy(csbcpb
->cpb
.sha256
.input_partial_digest
,
164 csbcpb
->cpb
.sha256
.message_digest
, SHA256_DIGEST_SIZE
);
167 /* final is represented by continuing the operation and indicating that
168 * this is not an intermediate operation */
169 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
171 csbcpb
->cpb
.sha256
.message_bit_length
+= (u64
)(sctx
->count
* 8);
173 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*)sctx
->buf
,
174 sctx
->count
, max_sg_len
);
175 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, out
, SHA256_DIGEST_SIZE
,
177 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
178 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
180 if (!nx_ctx
->op
.outlen
) {
185 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
186 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
190 atomic_inc(&(nx_ctx
->stats
->sha256_ops
));
192 atomic64_add(csbcpb
->cpb
.sha256
.message_bit_length
/ 8,
193 &(nx_ctx
->stats
->sha256_bytes
));
194 memcpy(out
, csbcpb
->cpb
.sha256
.message_digest
, SHA256_DIGEST_SIZE
);
196 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
200 static int nx_sha256_export(struct shash_desc
*desc
, void *out
)
202 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
203 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
204 struct nx_csbcpb
*csbcpb
= (struct nx_csbcpb
*)nx_ctx
->csbcpb
;
205 struct sha256_state
*octx
= out
;
206 unsigned long irq_flags
;
208 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
210 octx
->count
= sctx
->count
+
211 (csbcpb
->cpb
.sha256
.message_bit_length
/ 8);
212 memcpy(octx
->buf
, sctx
->buf
, sizeof(octx
->buf
));
214 /* if no data has been processed yet, we need to export SHA256's
215 * initial data, in case this context gets imported into a software
217 if (csbcpb
->cpb
.sha256
.message_bit_length
)
218 memcpy(octx
->state
, csbcpb
->cpb
.sha256
.message_digest
,
221 octx
->state
[0] = SHA256_H0
;
222 octx
->state
[1] = SHA256_H1
;
223 octx
->state
[2] = SHA256_H2
;
224 octx
->state
[3] = SHA256_H3
;
225 octx
->state
[4] = SHA256_H4
;
226 octx
->state
[5] = SHA256_H5
;
227 octx
->state
[6] = SHA256_H6
;
228 octx
->state
[7] = SHA256_H7
;
231 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
235 static int nx_sha256_import(struct shash_desc
*desc
, const void *in
)
237 struct sha256_state
*sctx
= shash_desc_ctx(desc
);
238 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
239 struct nx_csbcpb
*csbcpb
= (struct nx_csbcpb
*)nx_ctx
->csbcpb
;
240 const struct sha256_state
*ictx
= in
;
241 unsigned long irq_flags
;
243 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
245 memcpy(sctx
->buf
, ictx
->buf
, sizeof(ictx
->buf
));
247 sctx
->count
= ictx
->count
& 0x3f;
248 csbcpb
->cpb
.sha256
.message_bit_length
= (ictx
->count
& ~0x3f) * 8;
250 if (csbcpb
->cpb
.sha256
.message_bit_length
) {
251 memcpy(csbcpb
->cpb
.sha256
.message_digest
, ictx
->state
,
254 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
255 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
258 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
262 struct shash_alg nx_shash_sha256_alg
= {
263 .digestsize
= SHA256_DIGEST_SIZE
,
264 .init
= nx_sha256_init
,
265 .update
= nx_sha256_update
,
266 .final
= nx_sha256_final
,
267 .export
= nx_sha256_export
,
268 .import
= nx_sha256_import
,
269 .descsize
= sizeof(struct sha256_state
),
270 .statesize
= sizeof(struct sha256_state
),
272 .cra_name
= "sha256",
273 .cra_driver_name
= "sha256-nx",
275 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
276 .cra_blocksize
= SHA256_BLOCK_SIZE
,
277 .cra_module
= THIS_MODULE
,
278 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
279 .cra_init
= nx_crypto_ctx_sha_init
,
280 .cra_exit
= nx_crypto_ctx_exit
,