4 * Support for OMAP AES GCM HW acceleration.
6 * Copyright (c) 2016 Texas Instruments Incorporated
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
14 #include <linux/errno.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmaengine.h>
18 #include <linux/omap-dma.h>
19 #include <linux/interrupt.h>
20 #include <crypto/aes.h>
21 #include <crypto/gcm.h>
22 #include <crypto/scatterwalk.h>
23 #include <crypto/skcipher.h>
24 #include <crypto/internal/aead.h>
26 #include "omap-crypto.h"
29 static int omap_aes_gcm_handle_queue(struct omap_aes_dev
*dd
,
30 struct aead_request
*req
);
32 static void omap_aes_gcm_finish_req(struct omap_aes_dev
*dd
, int ret
)
34 struct aead_request
*req
= dd
->aead_req
;
36 dd
->flags
&= ~FLAGS_BUSY
;
40 req
->base
.complete(&req
->base
, ret
);
43 static void omap_aes_gcm_done_task(struct omap_aes_dev
*dd
)
46 int alen
, clen
, i
, ret
= 0, nsg
;
47 struct omap_aes_reqctx
*rctx
;
49 alen
= ALIGN(dd
->assoc_len
, AES_BLOCK_SIZE
);
50 clen
= ALIGN(dd
->total
, AES_BLOCK_SIZE
);
51 rctx
= aead_request_ctx(dd
->aead_req
);
53 nsg
= !!(dd
->assoc_len
&& dd
->total
);
55 dma_sync_sg_for_device(dd
->dev
, dd
->out_sg
, dd
->out_sg_len
,
57 dma_unmap_sg(dd
->dev
, dd
->in_sg
, dd
->in_sg_len
, DMA_TO_DEVICE
);
58 dma_unmap_sg(dd
->dev
, dd
->out_sg
, dd
->out_sg_len
, DMA_FROM_DEVICE
);
59 omap_aes_crypt_dma_stop(dd
);
61 omap_crypto_cleanup(dd
->out_sg
, dd
->orig_out
,
62 dd
->aead_req
->assoclen
, dd
->total
,
63 FLAGS_OUT_DATA_ST_SHIFT
, dd
->flags
);
65 if (dd
->flags
& FLAGS_ENCRYPT
)
66 scatterwalk_map_and_copy(rctx
->auth_tag
,
68 dd
->total
+ dd
->aead_req
->assoclen
,
71 omap_crypto_cleanup(&dd
->in_sgl
[0], NULL
, 0, alen
,
72 FLAGS_ASSOC_DATA_ST_SHIFT
, dd
->flags
);
74 omap_crypto_cleanup(&dd
->in_sgl
[nsg
], NULL
, 0, clen
,
75 FLAGS_IN_DATA_ST_SHIFT
, dd
->flags
);
77 if (!(dd
->flags
& FLAGS_ENCRYPT
)) {
78 tag
= (u8
*)rctx
->auth_tag
;
79 for (i
= 0; i
< dd
->authsize
; i
++) {
81 dev_err(dd
->dev
, "GCM decryption: Tag Message is wrong\n");
87 omap_aes_gcm_finish_req(dd
, ret
);
88 omap_aes_gcm_handle_queue(dd
, NULL
);
91 static int omap_aes_gcm_copy_buffers(struct omap_aes_dev
*dd
,
92 struct aead_request
*req
)
94 int alen
, clen
, cryptlen
, assoclen
, ret
;
95 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
96 unsigned int authlen
= crypto_aead_authsize(aead
);
97 struct scatterlist
*tmp
, sg_arr
[2];
101 assoclen
= req
->assoclen
;
102 cryptlen
= req
->cryptlen
;
104 if (dd
->flags
& FLAGS_RFC4106_GCM
)
107 if (!(dd
->flags
& FLAGS_ENCRYPT
))
110 alen
= ALIGN(assoclen
, AES_BLOCK_SIZE
);
111 clen
= ALIGN(cryptlen
, AES_BLOCK_SIZE
);
113 nsg
= !!(assoclen
&& cryptlen
);
115 omap_aes_clear_copy_flags(dd
);
117 sg_init_table(dd
->in_sgl
, nsg
+ 1);
120 ret
= omap_crypto_align_sg(&tmp
, assoclen
,
121 AES_BLOCK_SIZE
, dd
->in_sgl
,
122 OMAP_CRYPTO_COPY_DATA
|
123 OMAP_CRYPTO_ZERO_BUF
|
124 OMAP_CRYPTO_FORCE_SINGLE_ENTRY
,
125 FLAGS_ASSOC_DATA_ST_SHIFT
,
130 tmp
= scatterwalk_ffwd(sg_arr
, req
->src
, req
->assoclen
);
132 ret
= omap_crypto_align_sg(&tmp
, cryptlen
,
133 AES_BLOCK_SIZE
, &dd
->in_sgl
[nsg
],
134 OMAP_CRYPTO_COPY_DATA
|
135 OMAP_CRYPTO_ZERO_BUF
|
136 OMAP_CRYPTO_FORCE_SINGLE_ENTRY
,
137 FLAGS_IN_DATA_ST_SHIFT
,
141 dd
->in_sg
= dd
->in_sgl
;
142 dd
->total
= cryptlen
;
143 dd
->assoc_len
= assoclen
;
144 dd
->authsize
= authlen
;
146 dd
->out_sg
= req
->dst
;
147 dd
->orig_out
= req
->dst
;
149 dd
->out_sg
= scatterwalk_ffwd(sg_arr
, req
->dst
, assoclen
);
152 if (req
->src
== req
->dst
|| dd
->out_sg
== sg_arr
)
153 flags
|= OMAP_CRYPTO_FORCE_COPY
;
155 ret
= omap_crypto_align_sg(&dd
->out_sg
, cryptlen
,
156 AES_BLOCK_SIZE
, &dd
->out_sgl
,
158 FLAGS_OUT_DATA_ST_SHIFT
, &dd
->flags
);
162 dd
->in_sg_len
= sg_nents_for_len(dd
->in_sg
, alen
+ clen
);
163 dd
->out_sg_len
= sg_nents_for_len(dd
->out_sg
, clen
);
168 static void omap_aes_gcm_complete(struct crypto_async_request
*req
, int err
)
170 struct omap_aes_gcm_result
*res
= req
->data
;
172 if (err
== -EINPROGRESS
)
176 complete(&res
->completion
);
179 static int do_encrypt_iv(struct aead_request
*req
, u32
*tag
, u32
*iv
)
181 struct scatterlist iv_sg
, tag_sg
;
182 struct skcipher_request
*sk_req
;
183 struct omap_aes_gcm_result result
;
184 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
187 sk_req
= skcipher_request_alloc(ctx
->ctr
, GFP_KERNEL
);
189 pr_err("skcipher: Failed to allocate request\n");
193 init_completion(&result
.completion
);
195 sg_init_one(&iv_sg
, iv
, AES_BLOCK_SIZE
);
196 sg_init_one(&tag_sg
, tag
, AES_BLOCK_SIZE
);
197 skcipher_request_set_callback(sk_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
198 omap_aes_gcm_complete
, &result
);
199 ret
= crypto_skcipher_setkey(ctx
->ctr
, (u8
*)ctx
->key
, ctx
->keylen
);
200 skcipher_request_set_crypt(sk_req
, &iv_sg
, &tag_sg
, AES_BLOCK_SIZE
,
202 ret
= crypto_skcipher_encrypt(sk_req
);
208 ret
= wait_for_completion_interruptible(&result
.completion
);
212 reinit_completion(&result
.completion
);
218 pr_err("Encryption of IV failed for GCM mode\n");
222 skcipher_request_free(sk_req
);
226 void omap_aes_gcm_dma_out_callback(void *data
)
228 struct omap_aes_dev
*dd
= data
;
229 struct omap_aes_reqctx
*rctx
;
231 u32
*auth_tag
, tag
[4];
233 if (!(dd
->flags
& FLAGS_ENCRYPT
))
234 scatterwalk_map_and_copy(tag
, dd
->aead_req
->src
,
235 dd
->total
+ dd
->aead_req
->assoclen
,
238 rctx
= aead_request_ctx(dd
->aead_req
);
239 auth_tag
= (u32
*)rctx
->auth_tag
;
240 for (i
= 0; i
< 4; i
++) {
241 val
= omap_aes_read(dd
, AES_REG_TAG_N(dd
, i
));
242 auth_tag
[i
] = val
^ auth_tag
[i
];
243 if (!(dd
->flags
& FLAGS_ENCRYPT
))
244 auth_tag
[i
] = auth_tag
[i
] ^ tag
[i
];
247 omap_aes_gcm_done_task(dd
);
250 static int omap_aes_gcm_handle_queue(struct omap_aes_dev
*dd
,
251 struct aead_request
*req
)
253 struct omap_aes_ctx
*ctx
;
254 struct aead_request
*backlog
;
255 struct omap_aes_reqctx
*rctx
;
259 spin_lock_irqsave(&dd
->lock
, flags
);
261 ret
= aead_enqueue_request(&dd
->aead_queue
, req
);
262 if (dd
->flags
& FLAGS_BUSY
) {
263 spin_unlock_irqrestore(&dd
->lock
, flags
);
267 backlog
= aead_get_backlog(&dd
->aead_queue
);
268 req
= aead_dequeue_request(&dd
->aead_queue
);
270 dd
->flags
|= FLAGS_BUSY
;
271 spin_unlock_irqrestore(&dd
->lock
, flags
);
277 backlog
->base
.complete(&backlog
->base
, -EINPROGRESS
);
279 ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
280 rctx
= aead_request_ctx(req
);
286 rctx
->mode
&= FLAGS_MODE_MASK
;
287 dd
->flags
= (dd
->flags
& ~FLAGS_MODE_MASK
) | rctx
->mode
;
289 err
= omap_aes_gcm_copy_buffers(dd
, req
);
293 err
= omap_aes_write_ctrl(dd
);
295 err
= omap_aes_crypt_dma_start(dd
);
298 omap_aes_gcm_finish_req(dd
, err
);
299 omap_aes_gcm_handle_queue(dd
, NULL
);
305 static int omap_aes_gcm_crypt(struct aead_request
*req
, unsigned long mode
)
307 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
308 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
309 unsigned int authlen
= crypto_aead_authsize(aead
);
310 struct omap_aes_dev
*dd
;
311 __be32 counter
= cpu_to_be32(1);
314 memset(rctx
->auth_tag
, 0, sizeof(rctx
->auth_tag
));
315 memcpy(rctx
->iv
+ GCM_AES_IV_SIZE
, &counter
, 4);
317 err
= do_encrypt_iv(req
, (u32
*)rctx
->auth_tag
, (u32
*)rctx
->iv
);
321 if (mode
& FLAGS_RFC4106_GCM
)
322 assoclen
= req
->assoclen
- 8;
324 assoclen
= req
->assoclen
;
325 if (assoclen
+ req
->cryptlen
== 0) {
326 scatterwalk_map_and_copy(rctx
->auth_tag
, req
->dst
, 0, authlen
,
331 dd
= omap_aes_find_dev(rctx
);
336 return omap_aes_gcm_handle_queue(dd
, req
);
339 int omap_aes_gcm_encrypt(struct aead_request
*req
)
341 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
343 memcpy(rctx
->iv
, req
->iv
, GCM_AES_IV_SIZE
);
344 return omap_aes_gcm_crypt(req
, FLAGS_ENCRYPT
| FLAGS_GCM
);
347 int omap_aes_gcm_decrypt(struct aead_request
*req
)
349 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
351 memcpy(rctx
->iv
, req
->iv
, GCM_AES_IV_SIZE
);
352 return omap_aes_gcm_crypt(req
, FLAGS_GCM
);
355 int omap_aes_4106gcm_encrypt(struct aead_request
*req
)
357 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
358 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
360 memcpy(rctx
->iv
, ctx
->nonce
, 4);
361 memcpy(rctx
->iv
+ 4, req
->iv
, 8);
362 return omap_aes_gcm_crypt(req
, FLAGS_ENCRYPT
| FLAGS_GCM
|
366 int omap_aes_4106gcm_decrypt(struct aead_request
*req
)
368 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
369 struct omap_aes_reqctx
*rctx
= aead_request_ctx(req
);
371 memcpy(rctx
->iv
, ctx
->nonce
, 4);
372 memcpy(rctx
->iv
+ 4, req
->iv
, 8);
373 return omap_aes_gcm_crypt(req
, FLAGS_GCM
| FLAGS_RFC4106_GCM
);
376 int omap_aes_gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
379 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
381 if (keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_192
&&
382 keylen
!= AES_KEYSIZE_256
)
385 memcpy(ctx
->key
, key
, keylen
);
386 ctx
->keylen
= keylen
;
391 int omap_aes_4106gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
394 struct omap_aes_ctx
*ctx
= crypto_aead_ctx(tfm
);
400 if (keylen
!= AES_KEYSIZE_128
&& keylen
!= AES_KEYSIZE_192
&&
401 keylen
!= AES_KEYSIZE_256
)
404 memcpy(ctx
->key
, key
, keylen
);
405 memcpy(ctx
->nonce
, key
+ keylen
, 4);
406 ctx
->keylen
= keylen
;