4 * Support for OMAP AES GCM HW acceleration.
6 * Copyright (c) 2016 Texas Instruments Incorporated
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
14 #include <linux/errno.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmaengine.h>
18 #include <linux/omap-dma.h>
19 #include <linux/interrupt.h>
20 #include <crypto/aes.h>
21 #include <crypto/scatterwalk.h>
22 #include <crypto/skcipher.h>
23 #include <crypto/internal/aead.h>
25 #include "omap-crypto.h"
28 static int omap_aes_gcm_handle_queue(struct omap_aes_dev
*dd
,
29 struct aead_request
*req
);
31 static void omap_aes_gcm_finish_req(struct omap_aes_dev
*dd
, int ret
)
33 struct aead_request
*req
= dd
->aead_req
;
35 dd
->flags
&= ~FLAGS_BUSY
;
39 req
->base
.complete(&req
->base
, ret
);
42 static void omap_aes_gcm_done_task(struct omap_aes_dev
*dd
)
45 int alen
, clen
, i
, ret
= 0, nsg
;
46 struct omap_aes_reqctx
*rctx
;
48 alen
= ALIGN(dd
->assoc_len
, AES_BLOCK_SIZE
);
49 clen
= ALIGN(dd
->total
, AES_BLOCK_SIZE
);
50 rctx
= aead_request_ctx(dd
->aead_req
);
52 nsg
= !!(dd
->assoc_len
&& dd
->total
);
54 dma_sync_sg_for_device(dd
->dev
, dd
->out_sg
, dd
->out_sg_len
,
56 dma_unmap_sg(dd
->dev
, dd
->in_sg
, dd
->in_sg_len
, DMA_TO_DEVICE
);
57 dma_unmap_sg(dd
->dev
, dd
->out_sg
, dd
->out_sg_len
, DMA_FROM_DEVICE
);
58 omap_aes_crypt_dma_stop(dd
);
60 omap_crypto_cleanup(dd
->out_sg
, dd
->orig_out
,
61 dd
->aead_req
->assoclen
, dd
->total
,
62 FLAGS_OUT_DATA_ST_SHIFT
, dd
->flags
);
64 if (dd
->flags
& FLAGS_ENCRYPT
)
65 scatterwalk_map_and_copy(rctx
->auth_tag
,
67 dd
->total
+ dd
->aead_req
->assoclen
,
70 omap_crypto_cleanup(&dd
->in_sgl
[0], NULL
, 0, alen
,
71 FLAGS_ASSOC_DATA_ST_SHIFT
, dd
->flags
);
73 omap_crypto_cleanup(&dd
->in_sgl
[nsg
], NULL
, 0, clen
,
74 FLAGS_IN_DATA_ST_SHIFT
, dd
->flags
);
76 if (!(dd
->flags
& FLAGS_ENCRYPT
)) {
77 tag
= (u8
*)rctx
->auth_tag
;
78 for (i
= 0; i
< dd
->authsize
; i
++) {
80 dev_err(dd
->dev
, "GCM decryption: Tag Message is wrong\n");
86 omap_aes_gcm_finish_req(dd
, ret
);
87 omap_aes_gcm_handle_queue(dd
, NULL
);
90 static int omap_aes_gcm_copy_buffers(struct omap_aes_dev
*dd
,
91 struct aead_request
*req
)
93 int alen
, clen
, cryptlen
, assoclen
, ret
;
94 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
95 unsigned int authlen
= crypto_aead_authsize(aead
);
96 struct scatterlist
*tmp
, sg_arr
[2];
100 assoclen
= req
->assoclen
;
101 cryptlen
= req
->cryptlen
;
103 if (dd
->flags
& FLAGS_RFC4106_GCM
)
106 if (!(dd
->flags
& FLAGS_ENCRYPT
))
109 alen
= ALIGN(assoclen
, AES_BLOCK_SIZE
);
110 clen
= ALIGN(cryptlen
, AES_BLOCK_SIZE
);
112 nsg
= !!(assoclen
&& cryptlen
);
114 omap_aes_clear_copy_flags(dd
);
116 sg_init_table(dd
->in_sgl
, nsg
+ 1);
119 ret
= omap_crypto_align_sg(&tmp
, assoclen
,
120 AES_BLOCK_SIZE
, dd
->in_sgl
,
121 OMAP_CRYPTO_COPY_DATA
|
122 OMAP_CRYPTO_ZERO_BUF
|
123 OMAP_CRYPTO_FORCE_SINGLE_ENTRY
,
124 FLAGS_ASSOC_DATA_ST_SHIFT
,
129 tmp
= scatterwalk_ffwd(sg_arr
, req
->src
, req
->assoclen
);
131 ret
= omap_crypto_align_sg(&tmp
, cryptlen
,
132 AES_BLOCK_SIZE
, &dd
->in_sgl
[nsg
],
133 OMAP_CRYPTO_COPY_DATA
|
134 OMAP_CRYPTO_ZERO_BUF
|
135 OMAP_CRYPTO_FORCE_SINGLE_ENTRY
,
136 FLAGS_IN_DATA_ST_SHIFT
,
140 dd
->in_sg
= dd
->in_sgl
;
141 dd
->total
= cryptlen
;
142 dd
->assoc_len
= assoclen
;
143 dd
->authsize
= authlen
;
145 dd
->out_sg
= req
->dst
;
146 dd
->orig_out
= req
->dst
;
148 dd
->out_sg
= scatterwalk_ffwd(sg_arr
, req
->dst
, assoclen
);
151 if (req
->src
== req
->dst
|| dd
->out_sg
== sg_arr
)
152 flags
|= OMAP_CRYPTO_FORCE_COPY
;
154 ret
= omap_crypto_align_sg(&dd
->out_sg
, cryptlen
,
155 AES_BLOCK_SIZE
, &dd
->out_sgl
,
157 FLAGS_OUT_DATA_ST_SHIFT
, &dd
->flags
);
161 dd
->in_sg_len
= sg_nents_for_len(dd
->in_sg
, alen
+ clen
);
162 dd
->out_sg_len
= sg_nents_for_len(dd
->out_sg
, clen
);
167 static void omap_aes_gcm_complete(struct crypto_async_request
*req
, int err
)
169 struct omap_aes_gcm_result
*res
= req
->data
;
171 if (err
== -EINPROGRESS
)
175 complete(&res
->completion
);
178 static int do_encrypt_iv(struct aead_request
*req
, u32
*tag
, u32
*iv
)
180 struct scatterlist iv_sg
, tag_sg
;
181 struct skcipher_request
*sk_req
;
182 struct omap_aes_gcm_result result
;
183 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
186 sk_req
= skcipher_request_alloc(ctx
->ctr
, GFP_KERNEL
);
188 pr_err("skcipher: Failed to allocate request\n");
192 init_completion(&result
.completion
);
194 sg_init_one(&iv_sg
, iv
, AES_BLOCK_SIZE
);
195 sg_init_one(&tag_sg
, tag
, AES_BLOCK_SIZE
);
196 skcipher_request_set_callback(sk_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
197 omap_aes_gcm_complete
, &result
);
198 ret
= crypto_skcipher_setkey(ctx
->ctr
, (u8
*)ctx
->key
, ctx
->keylen
);
199 skcipher_request_set_crypt(sk_req
, &iv_sg
, &tag_sg
, AES_BLOCK_SIZE
,
201 ret
= crypto_skcipher_encrypt(sk_req
);
207 ret
= wait_for_completion_interruptible(&result
.completion
);
211 reinit_completion(&result
.completion
);
217 pr_err("Encryption of IV failed for GCM mode");
221 skcipher_request_free(sk_req
);
225 void omap_aes_gcm_dma_out_callback(void *data
)
227 struct omap_aes_dev
*dd
= data
;
228 struct omap_aes_reqctx
*rctx
;
230 u32
*auth_tag
, tag
[4];
232 if (!(dd
->flags
& FLAGS_ENCRYPT
))
233 scatterwalk_map_and_copy(tag
, dd
->aead_req
->src
,
234 dd
->total
+ dd
->aead_req
->assoclen
,
237 rctx
= aead_request_ctx(dd
->aead_req
);
238 auth_tag
= (u32
*)rctx
->auth_tag
;
239 for (i
= 0; i
< 4; i
++) {
240 val
= omap_aes_read(dd
, AES_REG_TAG_N(dd
, i
));
241 auth_tag
[i
] = val
^ auth_tag
[i
];
242 if (!(dd
->flags
& FLAGS_ENCRYPT
))
243 auth_tag
[i
] = auth_tag
[i
] ^ tag
[i
];
246 omap_aes_gcm_done_task(dd
);
249 static int omap_aes_gcm_handle_queue(struct omap_aes_dev
*dd
,
250 struct aead_request
*req
)
252 struct omap_aes_ctx
*ctx
;
253 struct aead_request
*backlog
;
254 struct omap_aes_reqctx
*rctx
;
258 spin_lock_irqsave(&dd
->lock
, flags
);
260 ret
= aead_enqueue_request(&dd
->aead_queue
, req
);
261 if (dd
->flags
& FLAGS_BUSY
) {
262 spin_unlock_irqrestore(&dd
->lock
, flags
);
266 backlog
= aead_get_backlog(&dd
->aead_queue
);
267 req
= aead_dequeue_request(&dd
->aead_queue
);
269 dd
->flags
|= FLAGS_BUSY
;
270 spin_unlock_irqrestore(&dd
->lock
, flags
);
276 backlog
->base
.complete(&backlog
->base
, -EINPROGRESS
);
278 ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
279 rctx
= aead_request_ctx(req
);
285 rctx
->mode
&= FLAGS_MODE_MASK
;
286 dd
->flags
= (dd
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
288 err
= omap_aes_gcm_copy_buffers(dd
, req
);
292 err
= omap_aes_write_ctrl(dd
);
294 err
= omap_aes_crypt_dma_start(dd
);
297 omap_aes_gcm_finish_req(dd
, err
);
298 omap_aes_gcm_handle_queue(dd
, NULL
);
304 static int omap_aes_gcm_crypt(struct aead_request
*req
, unsigned long mode
)
306 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
307 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
308 unsigned int authlen
= crypto_aead_authsize(aead
);
309 struct omap_aes_dev
*dd
;
310 __be32 counter
= cpu_to_be32(1);
313 memset(rctx
->auth_tag
, 0, sizeof(rctx
->auth_tag
));
314 memcpy(rctx
->iv
+ 12, &counter
, 4);
316 err
= do_encrypt_iv(req
, (u32
*)rctx
->auth_tag
, (u32
*)rctx
->iv
);
320 if (mode
& FLAGS_RFC4106_GCM
)
321 assoclen
= req
->assoclen
- 8;
323 assoclen
= req
->assoclen
;
324 if (assoclen
+ req
->cryptlen
== 0) {
325 scatterwalk_map_and_copy(rctx
->auth_tag
, req
->dst
, 0, authlen
,
330 dd
= omap_aes_find_dev(rctx
);
335 return omap_aes_gcm_handle_queue(dd
, req
);
338 int omap_aes_gcm_encrypt(struct aead_request
*req
)
340 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
342 memcpy(rctx
->iv
, req
->iv
, 12);
343 return omap_aes_gcm_crypt(req
, FLAGS_ENCRYPT
| FLAGS_GCM
);
346 int omap_aes_gcm_decrypt(struct aead_request
*req
)
348 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
350 memcpy(rctx
->iv
, req
->iv
, 12);
351 return omap_aes_gcm_crypt(req
, FLAGS_GCM
);
354 int omap_aes_4106gcm_encrypt(struct aead_request
*req
)
356 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
357 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
359 memcpy(rctx
->iv
, ctx
->nonce
, 4);
360 memcpy(rctx
->iv
+ 4, req
->iv
, 8);
361 return omap_aes_gcm_crypt(req
, FLAGS_ENCRYPT
| FLAGS_GCM
|
365 int omap_aes_4106gcm_decrypt(struct aead_request
*req
)
367 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
368 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
370 memcpy(rctx
->iv
, ctx
->nonce
, 4);
371 memcpy(rctx
->iv
+ 4, req
->iv
, 8);
372 return omap_aes_gcm_crypt(req
, FLAGS_GCM
| FLAGS_RFC4106_GCM
);
375 int omap_aes_gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
378 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
380 if (keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_192
&&
381 keylen
!= AES_KEYSIZE_256
)
384 memcpy(ctx
->key
, key
, keylen
);
385 ctx
->keylen
= keylen
;
390 int omap_aes_4106gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
393 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
399 if (keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_192
&&
400 keylen
!= AES_KEYSIZE_256
)
403 memcpy(ctx
->key
, key
, keylen
);
404 memcpy(ctx
->nonce
, key
+ keylen
, 4);
405 ctx
->keylen
= keylen
;