mm/hmm.c: remove superfluous RCU protection around radix tree lookup
[linux/fpc-iii.git] / drivers / crypto / omap-aes-gcm.c
blob0cc3b65d7162ba3111f3f28b41ded7688a6ebfc9
1 /*
2 * Cryptographic API.
4 * Support for OMAP AES GCM HW acceleration.
6 * Copyright (c) 2016 Texas Instruments Incorporated
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
14 #include <linux/errno.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmaengine.h>
18 #include <linux/omap-dma.h>
19 #include <linux/interrupt.h>
20 #include <crypto/aes.h>
21 #include <crypto/gcm.h>
22 #include <crypto/scatterwalk.h>
23 #include <crypto/skcipher.h>
24 #include <crypto/internal/aead.h>
26 #include "omap-crypto.h"
27 #include "omap-aes.h"
29 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
30 struct aead_request *req);
32 static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
34 struct aead_request *req = dd->aead_req;
36 dd->flags &= ~FLAGS_BUSY;
37 dd->in_sg = NULL;
38 dd->out_sg = NULL;
40 req->base.complete(&req->base, ret);
43 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
45 u8 *tag;
46 int alen, clen, i, ret = 0, nsg;
47 struct omap_aes_reqctx *rctx;
49 alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
50 clen = ALIGN(dd->total, AES_BLOCK_SIZE);
51 rctx = aead_request_ctx(dd->aead_req);
53 nsg = !!(dd->assoc_len && dd->total);
55 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
56 DMA_FROM_DEVICE);
57 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
58 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
59 omap_aes_crypt_dma_stop(dd);
61 omap_crypto_cleanup(dd->out_sg, dd->orig_out,
62 dd->aead_req->assoclen, dd->total,
63 FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
65 if (dd->flags & FLAGS_ENCRYPT)
66 scatterwalk_map_and_copy(rctx->auth_tag,
67 dd->aead_req->dst,
68 dd->total + dd->aead_req->assoclen,
69 dd->authsize, 1);
71 omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
72 FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
74 omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
75 FLAGS_IN_DATA_ST_SHIFT, dd->flags);
77 if (!(dd->flags & FLAGS_ENCRYPT)) {
78 tag = (u8 *)rctx->auth_tag;
79 for (i = 0; i < dd->authsize; i++) {
80 if (tag[i]) {
81 dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
82 ret = -EBADMSG;
87 omap_aes_gcm_finish_req(dd, ret);
88 omap_aes_gcm_handle_queue(dd, NULL);
91 static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
92 struct aead_request *req)
94 int alen, clen, cryptlen, assoclen, ret;
95 struct crypto_aead *aead = crypto_aead_reqtfm(req);
96 unsigned int authlen = crypto_aead_authsize(aead);
97 struct scatterlist *tmp, sg_arr[2];
98 int nsg;
99 u16 flags;
101 assoclen = req->assoclen;
102 cryptlen = req->cryptlen;
104 if (dd->flags & FLAGS_RFC4106_GCM)
105 assoclen -= 8;
107 if (!(dd->flags & FLAGS_ENCRYPT))
108 cryptlen -= authlen;
110 alen = ALIGN(assoclen, AES_BLOCK_SIZE);
111 clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
113 nsg = !!(assoclen && cryptlen);
115 omap_aes_clear_copy_flags(dd);
117 sg_init_table(dd->in_sgl, nsg + 1);
118 if (assoclen) {
119 tmp = req->src;
120 ret = omap_crypto_align_sg(&tmp, assoclen,
121 AES_BLOCK_SIZE, dd->in_sgl,
122 OMAP_CRYPTO_COPY_DATA |
123 OMAP_CRYPTO_ZERO_BUF |
124 OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
125 FLAGS_ASSOC_DATA_ST_SHIFT,
126 &dd->flags);
129 if (cryptlen) {
130 tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
132 ret = omap_crypto_align_sg(&tmp, cryptlen,
133 AES_BLOCK_SIZE, &dd->in_sgl[nsg],
134 OMAP_CRYPTO_COPY_DATA |
135 OMAP_CRYPTO_ZERO_BUF |
136 OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
137 FLAGS_IN_DATA_ST_SHIFT,
138 &dd->flags);
141 dd->in_sg = dd->in_sgl;
142 dd->total = cryptlen;
143 dd->assoc_len = assoclen;
144 dd->authsize = authlen;
146 dd->out_sg = req->dst;
147 dd->orig_out = req->dst;
149 dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
151 flags = 0;
152 if (req->src == req->dst || dd->out_sg == sg_arr)
153 flags |= OMAP_CRYPTO_FORCE_COPY;
155 ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
156 AES_BLOCK_SIZE, &dd->out_sgl,
157 flags,
158 FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
159 if (ret)
160 return ret;
162 dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
163 dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
165 return 0;
168 static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
170 struct omap_aes_gcm_result *res = req->data;
172 if (err == -EINPROGRESS)
173 return;
175 res->err = err;
176 complete(&res->completion);
179 static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
181 struct scatterlist iv_sg, tag_sg;
182 struct skcipher_request *sk_req;
183 struct omap_aes_gcm_result result;
184 struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
185 int ret = 0;
187 sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
188 if (!sk_req) {
189 pr_err("skcipher: Failed to allocate request\n");
190 return -ENOMEM;
193 init_completion(&result.completion);
195 sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
196 sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
197 skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
198 omap_aes_gcm_complete, &result);
199 ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
200 skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
201 NULL);
202 ret = crypto_skcipher_encrypt(sk_req);
203 switch (ret) {
204 case 0:
205 break;
206 case -EINPROGRESS:
207 case -EBUSY:
208 ret = wait_for_completion_interruptible(&result.completion);
209 if (!ret) {
210 ret = result.err;
211 if (!ret) {
212 reinit_completion(&result.completion);
213 break;
216 /* fall through */
217 default:
218 pr_err("Encryption of IV failed for GCM mode\n");
219 break;
222 skcipher_request_free(sk_req);
223 return ret;
226 void omap_aes_gcm_dma_out_callback(void *data)
228 struct omap_aes_dev *dd = data;
229 struct omap_aes_reqctx *rctx;
230 int i, val;
231 u32 *auth_tag, tag[4];
233 if (!(dd->flags & FLAGS_ENCRYPT))
234 scatterwalk_map_and_copy(tag, dd->aead_req->src,
235 dd->total + dd->aead_req->assoclen,
236 dd->authsize, 0);
238 rctx = aead_request_ctx(dd->aead_req);
239 auth_tag = (u32 *)rctx->auth_tag;
240 for (i = 0; i < 4; i++) {
241 val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
242 auth_tag[i] = val ^ auth_tag[i];
243 if (!(dd->flags & FLAGS_ENCRYPT))
244 auth_tag[i] = auth_tag[i] ^ tag[i];
247 omap_aes_gcm_done_task(dd);
250 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
251 struct aead_request *req)
253 struct omap_aes_ctx *ctx;
254 struct aead_request *backlog;
255 struct omap_aes_reqctx *rctx;
256 unsigned long flags;
257 int err, ret = 0;
259 spin_lock_irqsave(&dd->lock, flags);
260 if (req)
261 ret = aead_enqueue_request(&dd->aead_queue, req);
262 if (dd->flags & FLAGS_BUSY) {
263 spin_unlock_irqrestore(&dd->lock, flags);
264 return ret;
267 backlog = aead_get_backlog(&dd->aead_queue);
268 req = aead_dequeue_request(&dd->aead_queue);
269 if (req)
270 dd->flags |= FLAGS_BUSY;
271 spin_unlock_irqrestore(&dd->lock, flags);
273 if (!req)
274 return ret;
276 if (backlog)
277 backlog->base.complete(&backlog->base, -EINPROGRESS);
279 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
280 rctx = aead_request_ctx(req);
282 dd->ctx = ctx;
283 rctx->dd = dd;
284 dd->aead_req = req;
286 rctx->mode &= FLAGS_MODE_MASK;
287 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
289 err = omap_aes_gcm_copy_buffers(dd, req);
290 if (err)
291 return err;
293 err = omap_aes_write_ctrl(dd);
294 if (!err)
295 err = omap_aes_crypt_dma_start(dd);
297 if (err) {
298 omap_aes_gcm_finish_req(dd, err);
299 omap_aes_gcm_handle_queue(dd, NULL);
302 return ret;
305 static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
307 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
308 struct crypto_aead *aead = crypto_aead_reqtfm(req);
309 unsigned int authlen = crypto_aead_authsize(aead);
310 struct omap_aes_dev *dd;
311 __be32 counter = cpu_to_be32(1);
312 int err, assoclen;
314 memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
315 memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
317 err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
318 if (err)
319 return err;
321 if (mode & FLAGS_RFC4106_GCM)
322 assoclen = req->assoclen - 8;
323 else
324 assoclen = req->assoclen;
325 if (assoclen + req->cryptlen == 0) {
326 scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
328 return 0;
331 dd = omap_aes_find_dev(rctx);
332 if (!dd)
333 return -ENODEV;
334 rctx->mode = mode;
336 return omap_aes_gcm_handle_queue(dd, req);
339 int omap_aes_gcm_encrypt(struct aead_request *req)
341 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
343 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
344 return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
347 int omap_aes_gcm_decrypt(struct aead_request *req)
349 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
351 memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
352 return omap_aes_gcm_crypt(req, FLAGS_GCM);
355 int omap_aes_4106gcm_encrypt(struct aead_request *req)
357 struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
358 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
360 memcpy(rctx->iv, ctx->nonce, 4);
361 memcpy(rctx->iv + 4, req->iv, 8);
362 return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
363 FLAGS_RFC4106_GCM);
366 int omap_aes_4106gcm_decrypt(struct aead_request *req)
368 struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
369 struct omap_aes_reqctx *rctx = aead_request_ctx(req);
371 memcpy(rctx->iv, ctx->nonce, 4);
372 memcpy(rctx->iv + 4, req->iv, 8);
373 return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
376 int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
377 unsigned int keylen)
379 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
381 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
382 keylen != AES_KEYSIZE_256)
383 return -EINVAL;
385 memcpy(ctx->key, key, keylen);
386 ctx->keylen = keylen;
388 return 0;
391 int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
392 unsigned int keylen)
394 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
396 if (keylen < 4)
397 return -EINVAL;
399 keylen -= 4;
400 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
401 keylen != AES_KEYSIZE_256)
402 return -EINVAL;
404 memcpy(ctx->key, key, keylen);
405 memcpy(ctx->nonce, key + keylen, 4);
406 ctx->keylen = keylen;
408 return 0;