1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel Keem Bay OCS AES Crypto Driver.
5 * Copyright (C) 2018-2020 Intel Corporation
9 #include <linux/completion.h>
10 #include <linux/crypto.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
14 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/types.h>
19 #include <crypto/aes.h>
20 #include <crypto/engine.h>
21 #include <crypto/gcm.h>
22 #include <crypto/scatterwalk.h>
24 #include <crypto/internal/aead.h>
25 #include <crypto/internal/skcipher.h>
29 #define KMB_OCS_PRIORITY 350
30 #define DRV_NAME "keembay-ocs-aes"
32 #define OCS_AES_MIN_KEY_SIZE 16
33 #define OCS_AES_MAX_KEY_SIZE 32
34 #define OCS_AES_KEYSIZE_128 16
35 #define OCS_AES_KEYSIZE_192 24
36 #define OCS_AES_KEYSIZE_256 32
37 #define OCS_SM4_KEY_SIZE 16
40 * struct ocs_aes_tctx - OCS AES Transform context
41 * @engine_ctx: Engine context.
42 * @aes_dev: The OCS AES device.
44 * @key_len: The length (in bytes) of @key.
45 * @cipher: OCS cipher to use (either AES or SM4).
46 * @sw_cipher: The cipher to use as fallback.
47 * @use_fallback: Whether or not fallback cipher should be used.
50 struct crypto_engine_ctx engine_ctx
;
51 struct ocs_aes_dev
*aes_dev
;
52 u8 key
[OCS_AES_KEYSIZE_256
];
54 enum ocs_cipher cipher
;
56 struct crypto_sync_skcipher
*sk
;
57 struct crypto_aead
*aead
;
63 * struct ocs_aes_rctx - OCS AES Request context.
64 * @instruction: Instruction to be executed (encrypt / decrypt).
65 * @mode: Mode to use (ECB, CBC, CTR, CCm, GCM, CTS)
66 * @src_nents: Number of source SG entries.
67 * @dst_nents: Number of destination SG entries.
68 * @src_dma_count: The number of DMA-mapped entries of the source SG.
69 * @dst_dma_count: The number of DMA-mapped entries of the destination SG.
70 * @in_place: Whether or not this is an in place request, i.e.,
72 * @src_dll: OCS DMA linked list for input data.
73 * @dst_dll: OCS DMA linked list for output data.
74 * @last_ct_blk: Buffer to hold last cipher text block (only used in CBC
76 * @cts_swap: Whether or not CTS swap must be performed.
77 * @aad_src_dll: OCS DMA linked list for input AAD data.
78 * @aad_dst_dll: OCS DMA linked list for output AAD data.
79 * @in_tag: Buffer to hold input encrypted tag (only used for
81 * @out_tag: Buffer to hold output encrypted / decrypted tag (only
82 * used for GCM encrypt / decrypt).
85 /* Fields common across all modes. */
86 enum ocs_instruction instruction
;
93 struct ocs_dll_desc src_dll
;
94 struct ocs_dll_desc dst_dll
;
97 u8 last_ct_blk
[AES_BLOCK_SIZE
];
102 /* CCM/GCM specific */
103 struct ocs_dll_desc aad_src_dll
;
104 struct ocs_dll_desc aad_dst_dll
;
105 u8 in_tag
[AES_BLOCK_SIZE
];
108 u8 out_tag
[AES_BLOCK_SIZE
];
113 struct list_head dev_list
;
114 spinlock_t lock
; /* Protects dev_list. */
117 static struct ocs_aes_drv ocs_aes
= {
118 .dev_list
= LIST_HEAD_INIT(ocs_aes
.dev_list
),
119 .lock
= __SPIN_LOCK_UNLOCKED(ocs_aes
.lock
),
122 static struct ocs_aes_dev
*kmb_ocs_aes_find_dev(struct ocs_aes_tctx
*tctx
)
124 struct ocs_aes_dev
*aes_dev
;
126 spin_lock(&ocs_aes
.lock
);
129 aes_dev
= tctx
->aes_dev
;
133 /* Only a single OCS device available */
134 aes_dev
= list_first_entry(&ocs_aes
.dev_list
, struct ocs_aes_dev
, list
);
135 tctx
->aes_dev
= aes_dev
;
138 spin_unlock(&ocs_aes
.lock
);
144 * Ensure key is 128-bit or 256-bit for AES or 128-bit for SM4 and an actual
145 * key is being passed in.
147 * Return: 0 if key is valid, -EINVAL otherwise.
149 static int check_key(const u8
*in_key
, size_t key_len
, enum ocs_cipher cipher
)
154 /* For AES, only 128-byte or 256-byte keys are supported. */
155 if (cipher
== OCS_AES
&& (key_len
== OCS_AES_KEYSIZE_128
||
156 key_len
== OCS_AES_KEYSIZE_256
))
159 /* For SM4, only 128-byte keys are supported. */
160 if (cipher
== OCS_SM4
&& key_len
== OCS_AES_KEYSIZE_128
)
163 /* Everything else is unsupported. */
167 /* Save key into transformation context. */
168 static int save_key(struct ocs_aes_tctx
*tctx
, const u8
*in_key
, size_t key_len
,
169 enum ocs_cipher cipher
)
173 ret
= check_key(in_key
, key_len
, cipher
);
177 memcpy(tctx
->key
, in_key
, key_len
);
178 tctx
->key_len
= key_len
;
179 tctx
->cipher
= cipher
;
184 /* Set key for symmetric cypher. */
185 static int kmb_ocs_sk_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
186 size_t key_len
, enum ocs_cipher cipher
)
188 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
190 /* Fallback is used for AES with 192-bit key. */
191 tctx
->use_fallback
= (cipher
== OCS_AES
&&
192 key_len
== OCS_AES_KEYSIZE_192
);
194 if (!tctx
->use_fallback
)
195 return save_key(tctx
, in_key
, key_len
, cipher
);
197 crypto_sync_skcipher_clear_flags(tctx
->sw_cipher
.sk
,
198 CRYPTO_TFM_REQ_MASK
);
199 crypto_sync_skcipher_set_flags(tctx
->sw_cipher
.sk
,
200 tfm
->base
.crt_flags
&
201 CRYPTO_TFM_REQ_MASK
);
203 return crypto_sync_skcipher_setkey(tctx
->sw_cipher
.sk
, in_key
, key_len
);
206 /* Set key for AEAD cipher. */
207 static int kmb_ocs_aead_set_key(struct crypto_aead
*tfm
, const u8
*in_key
,
208 size_t key_len
, enum ocs_cipher cipher
)
210 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(tfm
);
212 /* Fallback is used for AES with 192-bit key. */
213 tctx
->use_fallback
= (cipher
== OCS_AES
&&
214 key_len
== OCS_AES_KEYSIZE_192
);
216 if (!tctx
->use_fallback
)
217 return save_key(tctx
, in_key
, key_len
, cipher
);
219 crypto_aead_clear_flags(tctx
->sw_cipher
.aead
, CRYPTO_TFM_REQ_MASK
);
220 crypto_aead_set_flags(tctx
->sw_cipher
.aead
,
221 crypto_aead_get_flags(tfm
) & CRYPTO_TFM_REQ_MASK
);
223 return crypto_aead_setkey(tctx
->sw_cipher
.aead
, in_key
, key_len
);
226 /* Swap two AES blocks in SG lists. */
227 static void sg_swap_blocks(struct scatterlist
*sgl
, unsigned int nents
,
228 off_t blk1_offset
, off_t blk2_offset
)
230 u8 tmp_buf1
[AES_BLOCK_SIZE
], tmp_buf2
[AES_BLOCK_SIZE
];
233 * No easy way to copy within sg list, so copy both blocks to temporary
236 sg_pcopy_to_buffer(sgl
, nents
, tmp_buf1
, AES_BLOCK_SIZE
, blk1_offset
);
237 sg_pcopy_to_buffer(sgl
, nents
, tmp_buf2
, AES_BLOCK_SIZE
, blk2_offset
);
238 sg_pcopy_from_buffer(sgl
, nents
, tmp_buf1
, AES_BLOCK_SIZE
, blk2_offset
);
239 sg_pcopy_from_buffer(sgl
, nents
, tmp_buf2
, AES_BLOCK_SIZE
, blk1_offset
);
242 /* Initialize request context to default values. */
243 static void ocs_aes_init_rctx(struct ocs_aes_rctx
*rctx
)
245 /* Zero everything. */
246 memset(rctx
, 0, sizeof(*rctx
));
248 /* Set initial value for DMA addresses. */
249 rctx
->src_dll
.dma_addr
= DMA_MAPPING_ERROR
;
250 rctx
->dst_dll
.dma_addr
= DMA_MAPPING_ERROR
;
251 rctx
->aad_src_dll
.dma_addr
= DMA_MAPPING_ERROR
;
252 rctx
->aad_dst_dll
.dma_addr
= DMA_MAPPING_ERROR
;
255 static int kmb_ocs_sk_validate_input(struct skcipher_request
*req
,
258 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
259 int iv_size
= crypto_skcipher_ivsize(tfm
);
263 /* Ensure input length is multiple of block size */
264 if (req
->cryptlen
% AES_BLOCK_SIZE
!= 0)
270 /* Ensure input length is multiple of block size */
271 if (req
->cryptlen
% AES_BLOCK_SIZE
!= 0)
274 /* Ensure IV is present and block size in length */
275 if (!req
->iv
|| iv_size
!= AES_BLOCK_SIZE
)
278 * NOTE: Since req->cryptlen == 0 case was already handled in
279 * kmb_ocs_sk_common(), the above two conditions also guarantee
280 * that: cryptlen >= iv_size
285 /* Ensure IV is present and block size in length */
286 if (!req
->iv
|| iv_size
!= AES_BLOCK_SIZE
)
291 /* Ensure input length >= block size */
292 if (req
->cryptlen
< AES_BLOCK_SIZE
)
295 /* Ensure IV is present and block size in length */
296 if (!req
->iv
|| iv_size
!= AES_BLOCK_SIZE
)
306 * Called by encrypt() / decrypt() skcipher functions.
308 * Use fallback if needed, otherwise initialize context and enqueue request
311 static int kmb_ocs_sk_common(struct skcipher_request
*req
,
312 enum ocs_cipher cipher
,
313 enum ocs_instruction instruction
,
316 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
317 struct ocs_aes_rctx
*rctx
= skcipher_request_ctx(req
);
318 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
319 struct ocs_aes_dev
*aes_dev
;
322 if (tctx
->use_fallback
) {
323 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq
, tctx
->sw_cipher
.sk
);
325 skcipher_request_set_sync_tfm(subreq
, tctx
->sw_cipher
.sk
);
326 skcipher_request_set_callback(subreq
, req
->base
.flags
, NULL
,
328 skcipher_request_set_crypt(subreq
, req
->src
, req
->dst
,
329 req
->cryptlen
, req
->iv
);
331 if (instruction
== OCS_ENCRYPT
)
332 rc
= crypto_skcipher_encrypt(subreq
);
334 rc
= crypto_skcipher_decrypt(subreq
);
336 skcipher_request_zero(subreq
);
342 * If cryptlen == 0, no processing needed for ECB, CBC and CTR.
344 * For CTS continue: kmb_ocs_sk_validate_input() will return -EINVAL.
346 if (!req
->cryptlen
&& mode
!= OCS_MODE_CTS
)
349 rc
= kmb_ocs_sk_validate_input(req
, mode
);
353 aes_dev
= kmb_ocs_aes_find_dev(tctx
);
357 if (cipher
!= tctx
->cipher
)
360 ocs_aes_init_rctx(rctx
);
361 rctx
->instruction
= instruction
;
364 return crypto_transfer_skcipher_request_to_engine(aes_dev
->engine
, req
);
367 static void cleanup_ocs_dma_linked_list(struct device
*dev
,
368 struct ocs_dll_desc
*dll
)
371 dma_free_coherent(dev
, dll
->size
, dll
->vaddr
, dll
->dma_addr
);
374 dll
->dma_addr
= DMA_MAPPING_ERROR
;
377 static void kmb_ocs_sk_dma_cleanup(struct skcipher_request
*req
)
379 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
380 struct ocs_aes_rctx
*rctx
= skcipher_request_ctx(req
);
381 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
382 struct device
*dev
= tctx
->aes_dev
->dev
;
384 if (rctx
->src_dma_count
) {
385 dma_unmap_sg(dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
386 rctx
->src_dma_count
= 0;
389 if (rctx
->dst_dma_count
) {
390 dma_unmap_sg(dev
, req
->dst
, rctx
->dst_nents
, rctx
->in_place
?
393 rctx
->dst_dma_count
= 0;
396 /* Clean up OCS DMA linked lists */
397 cleanup_ocs_dma_linked_list(dev
, &rctx
->src_dll
);
398 cleanup_ocs_dma_linked_list(dev
, &rctx
->dst_dll
);
401 static int kmb_ocs_sk_prepare_inplace(struct skcipher_request
*req
)
403 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
404 struct ocs_aes_rctx
*rctx
= skcipher_request_ctx(req
);
405 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
406 int iv_size
= crypto_skcipher_ivsize(tfm
);
410 * For CBC decrypt, save last block (iv) to last_ct_blk buffer.
412 * Note: if we are here, we already checked that cryptlen >= iv_size
413 * and iv_size == AES_BLOCK_SIZE (i.e., the size of last_ct_blk); see
414 * kmb_ocs_sk_validate_input().
416 if (rctx
->mode
== OCS_MODE_CBC
&& rctx
->instruction
== OCS_DECRYPT
)
417 scatterwalk_map_and_copy(rctx
->last_ct_blk
, req
->src
,
418 req
->cryptlen
- iv_size
, iv_size
, 0);
420 /* For CTS decrypt, swap last two blocks, if needed. */
421 if (rctx
->cts_swap
&& rctx
->instruction
== OCS_DECRYPT
)
422 sg_swap_blocks(req
->dst
, rctx
->dst_nents
,
423 req
->cryptlen
- AES_BLOCK_SIZE
,
424 req
->cryptlen
- (2 * AES_BLOCK_SIZE
));
426 /* src and dst buffers are the same, use bidirectional DMA mapping. */
427 rctx
->dst_dma_count
= dma_map_sg(tctx
->aes_dev
->dev
, req
->dst
,
428 rctx
->dst_nents
, DMA_BIDIRECTIONAL
);
429 if (rctx
->dst_dma_count
== 0) {
430 dev_err(tctx
->aes_dev
->dev
, "Failed to map destination sg\n");
434 /* Create DST linked list */
435 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->dst
,
436 rctx
->dst_dma_count
, &rctx
->dst_dll
,
441 * If descriptor creation was successful, set the src_dll.dma_addr to
442 * the value of dst_dll.dma_addr, as we do in-place AES operation on
445 rctx
->src_dll
.dma_addr
= rctx
->dst_dll
.dma_addr
;
450 static int kmb_ocs_sk_prepare_notinplace(struct skcipher_request
*req
)
452 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
453 struct ocs_aes_rctx
*rctx
= skcipher_request_ctx(req
);
454 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
457 rctx
->src_nents
= sg_nents_for_len(req
->src
, req
->cryptlen
);
458 if (rctx
->src_nents
< 0)
462 rctx
->src_dma_count
= dma_map_sg(tctx
->aes_dev
->dev
, req
->src
,
463 rctx
->src_nents
, DMA_TO_DEVICE
);
464 if (rctx
->src_dma_count
== 0) {
465 dev_err(tctx
->aes_dev
->dev
, "Failed to map source sg\n");
469 /* Create SRC linked list */
470 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->src
,
471 rctx
->src_dma_count
, &rctx
->src_dll
,
477 rctx
->dst_dma_count
= dma_map_sg(tctx
->aes_dev
->dev
, req
->dst
,
478 rctx
->dst_nents
, DMA_FROM_DEVICE
);
479 if (rctx
->dst_dma_count
== 0) {
480 dev_err(tctx
->aes_dev
->dev
, "Failed to map destination sg\n");
484 /* Create DST linked list */
485 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->dst
,
486 rctx
->dst_dma_count
, &rctx
->dst_dll
,
491 /* If this is not a CTS decrypt operation with swapping, we are done. */
492 if (!(rctx
->cts_swap
&& rctx
->instruction
== OCS_DECRYPT
))
496 * Otherwise, we have to copy src to dst (as we cannot modify src).
497 * Use OCS AES bypass mode to copy src to dst via DMA.
499 * NOTE: for anything other than small data sizes this is rather
502 rc
= ocs_aes_bypass_op(tctx
->aes_dev
, rctx
->dst_dll
.dma_addr
,
503 rctx
->src_dll
.dma_addr
, req
->cryptlen
);
508 * Now dst == src, so clean up what we did so far and use in_place
511 kmb_ocs_sk_dma_cleanup(req
);
512 rctx
->in_place
= true;
514 return kmb_ocs_sk_prepare_inplace(req
);
517 static int kmb_ocs_sk_run(struct skcipher_request
*req
)
519 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
520 struct ocs_aes_rctx
*rctx
= skcipher_request_ctx(req
);
521 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
522 struct ocs_aes_dev
*aes_dev
= tctx
->aes_dev
;
523 int iv_size
= crypto_skcipher_ivsize(tfm
);
526 rctx
->dst_nents
= sg_nents_for_len(req
->dst
, req
->cryptlen
);
527 if (rctx
->dst_nents
< 0)
531 * If 2 blocks or greater, and multiple of block size swap last two
532 * blocks to be compatible with other crypto API CTS implementations:
533 * OCS mode uses CBC-CS2, whereas other crypto API implementations use
535 * CBC-CS2 and CBC-CS3 defined by:
536 * https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38a-add.pdf
538 rctx
->cts_swap
= (rctx
->mode
== OCS_MODE_CTS
&&
539 req
->cryptlen
> AES_BLOCK_SIZE
&&
540 req
->cryptlen
% AES_BLOCK_SIZE
== 0);
542 rctx
->in_place
= (req
->src
== req
->dst
);
545 rc
= kmb_ocs_sk_prepare_inplace(req
);
547 rc
= kmb_ocs_sk_prepare_notinplace(req
);
552 rc
= ocs_aes_op(aes_dev
, rctx
->mode
, tctx
->cipher
, rctx
->instruction
,
553 rctx
->dst_dll
.dma_addr
, rctx
->src_dll
.dma_addr
,
554 req
->cryptlen
, req
->iv
, iv_size
);
558 /* Clean-up DMA before further processing output. */
559 kmb_ocs_sk_dma_cleanup(req
);
561 /* For CTS Encrypt, swap last 2 blocks, if needed. */
562 if (rctx
->cts_swap
&& rctx
->instruction
== OCS_ENCRYPT
) {
563 sg_swap_blocks(req
->dst
, rctx
->dst_nents
,
564 req
->cryptlen
- AES_BLOCK_SIZE
,
565 req
->cryptlen
- (2 * AES_BLOCK_SIZE
));
569 /* For CBC copy IV to req->IV. */
570 if (rctx
->mode
== OCS_MODE_CBC
) {
571 /* CBC encrypt case. */
572 if (rctx
->instruction
== OCS_ENCRYPT
) {
573 scatterwalk_map_and_copy(req
->iv
, req
->dst
,
574 req
->cryptlen
- iv_size
,
578 /* CBC decrypt case. */
580 memcpy(req
->iv
, rctx
->last_ct_blk
, iv_size
);
582 scatterwalk_map_and_copy(req
->iv
, req
->src
,
583 req
->cryptlen
- iv_size
,
587 /* For all other modes there's nothing to do. */
592 kmb_ocs_sk_dma_cleanup(req
);
597 static int kmb_ocs_aead_validate_input(struct aead_request
*req
,
598 enum ocs_instruction instruction
,
601 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
602 int tag_size
= crypto_aead_authsize(tfm
);
603 int iv_size
= crypto_aead_ivsize(tfm
);
605 /* For decrypt crytplen == len(PT) + len(tag). */
606 if (instruction
== OCS_DECRYPT
&& req
->cryptlen
< tag_size
)
609 /* IV is mandatory. */
615 if (iv_size
!= GCM_AES_IV_SIZE
)
621 /* Ensure IV is present and block size in length */
622 if (iv_size
!= AES_BLOCK_SIZE
)
633 * Called by encrypt() / decrypt() aead functions.
635 * Use fallback if needed, otherwise initialize context and enqueue request
638 static int kmb_ocs_aead_common(struct aead_request
*req
,
639 enum ocs_cipher cipher
,
640 enum ocs_instruction instruction
,
643 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
644 struct ocs_aes_rctx
*rctx
= aead_request_ctx(req
);
645 struct ocs_aes_dev
*dd
;
648 if (tctx
->use_fallback
) {
649 struct aead_request
*subreq
= aead_request_ctx(req
);
651 aead_request_set_tfm(subreq
, tctx
->sw_cipher
.aead
);
652 aead_request_set_callback(subreq
, req
->base
.flags
,
653 req
->base
.complete
, req
->base
.data
);
654 aead_request_set_crypt(subreq
, req
->src
, req
->dst
,
655 req
->cryptlen
, req
->iv
);
656 aead_request_set_ad(subreq
, req
->assoclen
);
657 rc
= crypto_aead_setauthsize(tctx
->sw_cipher
.aead
,
658 crypto_aead_authsize(crypto_aead_reqtfm(req
)));
662 return (instruction
== OCS_ENCRYPT
) ?
663 crypto_aead_encrypt(subreq
) :
664 crypto_aead_decrypt(subreq
);
667 rc
= kmb_ocs_aead_validate_input(req
, instruction
, mode
);
671 dd
= kmb_ocs_aes_find_dev(tctx
);
675 if (cipher
!= tctx
->cipher
)
678 ocs_aes_init_rctx(rctx
);
679 rctx
->instruction
= instruction
;
682 return crypto_transfer_aead_request_to_engine(dd
->engine
, req
);
685 static void kmb_ocs_aead_dma_cleanup(struct aead_request
*req
)
687 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
688 struct ocs_aes_rctx
*rctx
= aead_request_ctx(req
);
689 struct device
*dev
= tctx
->aes_dev
->dev
;
691 if (rctx
->src_dma_count
) {
692 dma_unmap_sg(dev
, req
->src
, rctx
->src_nents
, DMA_TO_DEVICE
);
693 rctx
->src_dma_count
= 0;
696 if (rctx
->dst_dma_count
) {
697 dma_unmap_sg(dev
, req
->dst
, rctx
->dst_nents
, rctx
->in_place
?
700 rctx
->dst_dma_count
= 0;
702 /* Clean up OCS DMA linked lists */
703 cleanup_ocs_dma_linked_list(dev
, &rctx
->src_dll
);
704 cleanup_ocs_dma_linked_list(dev
, &rctx
->dst_dll
);
705 cleanup_ocs_dma_linked_list(dev
, &rctx
->aad_src_dll
);
706 cleanup_ocs_dma_linked_list(dev
, &rctx
->aad_dst_dll
);
710 * kmb_ocs_aead_dma_prepare() - Do DMA mapping for AEAD processing.
711 * @req: The AEAD request being processed.
712 * @src_dll_size: Where to store the length of the data mapped into the
713 * src_dll OCS DMA list.
716 * - DMA map req->src and req->dst
717 * - Initialize the following OCS DMA linked lists: rctx->src_dll,
718 * rctx->dst_dll, rctx->aad_src_dll and rxtc->aad_dst_dll.
720 * Return: 0 on success, negative error code otherwise.
722 static int kmb_ocs_aead_dma_prepare(struct aead_request
*req
, u32
*src_dll_size
)
724 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
725 const int tag_size
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
726 struct ocs_aes_rctx
*rctx
= aead_request_ctx(req
);
727 u32 in_size
; /* The length of the data to be mapped by src_dll. */
728 u32 out_size
; /* The length of the data to be mapped by dst_dll. */
729 u32 dst_size
; /* The length of the data in dst_sg. */
732 /* Get number of entries in input data SG list. */
733 rctx
->src_nents
= sg_nents_for_len(req
->src
,
734 req
->assoclen
+ req
->cryptlen
);
735 if (rctx
->src_nents
< 0)
738 if (rctx
->instruction
== OCS_DECRYPT
) {
741 * - src sg list is: AAD|CT|tag
742 * - dst sg list expects: AAD|PT
744 * in_size == len(CT); out_size == len(PT)
747 /* req->cryptlen includes both CT and tag. */
748 in_size
= req
->cryptlen
- tag_size
;
750 /* out_size = PT size == CT size */
753 /* len(dst_sg) == len(AAD) + len(PT) */
754 dst_size
= req
->assoclen
+ out_size
;
757 * Copy tag from source SG list to 'in_tag' buffer.
759 * Note: this needs to be done here, before DMA mapping src_sg.
761 sg_pcopy_to_buffer(req
->src
, rctx
->src_nents
, rctx
->in_tag
,
762 tag_size
, req
->assoclen
+ in_size
);
764 } else { /* OCS_ENCRYPT */
767 * src sg list is: AAD|PT
768 * dst sg list expects: AAD|CT|tag
770 /* in_size == len(PT) */
771 in_size
= req
->cryptlen
;
774 * In CCM mode the OCS engine appends the tag to the ciphertext,
775 * but in GCM mode the tag must be read from the tag registers
776 * and appended manually below
778 out_size
= (rctx
->mode
== OCS_MODE_CCM
) ? in_size
+ tag_size
:
780 /* len(dst_sg) == len(AAD) + len(CT) + len(tag) */
781 dst_size
= req
->assoclen
+ in_size
+ tag_size
;
783 *src_dll_size
= in_size
;
785 /* Get number of entries in output data SG list. */
786 rctx
->dst_nents
= sg_nents_for_len(req
->dst
, dst_size
);
787 if (rctx
->dst_nents
< 0)
790 rctx
->in_place
= (req
->src
== req
->dst
) ? 1 : 0;
792 /* Map destination; use bidirectional mapping for in-place case. */
793 rctx
->dst_dma_count
= dma_map_sg(tctx
->aes_dev
->dev
, req
->dst
,
795 rctx
->in_place
? DMA_BIDIRECTIONAL
:
797 if (rctx
->dst_dma_count
== 0 && rctx
->dst_nents
!= 0) {
798 dev_err(tctx
->aes_dev
->dev
, "Failed to map destination sg\n");
802 /* Create AAD DST list: maps dst[0:AAD_SIZE-1]. */
803 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->dst
,
805 &rctx
->aad_dst_dll
, req
->assoclen
,
810 /* Create DST list: maps dst[AAD_SIZE:out_size] */
811 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->dst
,
812 rctx
->dst_dma_count
, &rctx
->dst_dll
,
813 out_size
, req
->assoclen
);
817 if (rctx
->in_place
) {
818 /* If this is not CCM encrypt, we are done. */
819 if (!(rctx
->mode
== OCS_MODE_CCM
&&
820 rctx
->instruction
== OCS_ENCRYPT
)) {
822 * SRC and DST are the same, so re-use the same DMA
823 * addresses (to avoid allocating new DMA lists
824 * identical to the dst ones).
826 rctx
->src_dll
.dma_addr
= rctx
->dst_dll
.dma_addr
;
827 rctx
->aad_src_dll
.dma_addr
= rctx
->aad_dst_dll
.dma_addr
;
832 * For CCM encrypt the input and output linked lists contain
833 * different amounts of data, so, we need to create different
834 * SRC and AAD SRC lists, even for the in-place case.
836 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->dst
,
842 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->dst
,
844 &rctx
->src_dll
, in_size
,
851 /* Not in-place case. */
854 rctx
->src_dma_count
= dma_map_sg(tctx
->aes_dev
->dev
, req
->src
,
855 rctx
->src_nents
, DMA_TO_DEVICE
);
856 if (rctx
->src_dma_count
== 0 && rctx
->src_nents
!= 0) {
857 dev_err(tctx
->aes_dev
->dev
, "Failed to map source sg\n");
861 /* Create AAD SRC list. */
862 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->src
,
869 /* Create SRC list. */
870 rc
= ocs_create_linked_list_from_sg(tctx
->aes_dev
, req
->src
,
872 &rctx
->src_dll
, in_size
,
877 if (req
->assoclen
== 0)
880 /* Copy AAD from src sg to dst sg using OCS DMA. */
881 rc
= ocs_aes_bypass_op(tctx
->aes_dev
, rctx
->aad_dst_dll
.dma_addr
,
882 rctx
->aad_src_dll
.dma_addr
, req
->cryptlen
);
884 dev_err(tctx
->aes_dev
->dev
,
885 "Failed to copy source AAD to destination AAD\n");
890 static int kmb_ocs_aead_run(struct aead_request
*req
)
892 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
893 const int tag_size
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
894 struct ocs_aes_rctx
*rctx
= aead_request_ctx(req
);
895 u32 in_size
; /* The length of the data mapped by src_dll. */
898 rc
= kmb_ocs_aead_dma_prepare(req
, &in_size
);
902 /* For CCM, we just call the OCS processing and we are done. */
903 if (rctx
->mode
== OCS_MODE_CCM
) {
904 rc
= ocs_aes_ccm_op(tctx
->aes_dev
, tctx
->cipher
,
905 rctx
->instruction
, rctx
->dst_dll
.dma_addr
,
906 rctx
->src_dll
.dma_addr
, in_size
,
908 rctx
->aad_src_dll
.dma_addr
, req
->assoclen
,
909 rctx
->in_tag
, tag_size
);
912 /* GCM case; invoke OCS processing. */
913 rc
= ocs_aes_gcm_op(tctx
->aes_dev
, tctx
->cipher
,
915 rctx
->dst_dll
.dma_addr
,
916 rctx
->src_dll
.dma_addr
, in_size
,
918 rctx
->aad_src_dll
.dma_addr
, req
->assoclen
,
919 rctx
->out_tag
, tag_size
);
923 /* For GCM decrypt, we have to compare in_tag with out_tag. */
924 if (rctx
->instruction
== OCS_DECRYPT
) {
925 rc
= memcmp(rctx
->in_tag
, rctx
->out_tag
, tag_size
) ?
930 /* For GCM encrypt, we must manually copy out_tag to DST sg. */
932 /* Clean-up must be called before the sg_pcopy_from_buffer() below. */
933 kmb_ocs_aead_dma_cleanup(req
);
935 /* Copy tag to destination sg after AAD and CT. */
936 sg_pcopy_from_buffer(req
->dst
, rctx
->dst_nents
, rctx
->out_tag
,
937 tag_size
, req
->assoclen
+ req
->cryptlen
);
939 /* Return directly as DMA cleanup already done. */
943 kmb_ocs_aead_dma_cleanup(req
);
948 static int kmb_ocs_aes_sk_do_one_request(struct crypto_engine
*engine
,
951 struct skcipher_request
*req
=
952 container_of(areq
, struct skcipher_request
, base
);
953 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
954 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
957 if (!tctx
->aes_dev
) {
962 err
= ocs_aes_set_key(tctx
->aes_dev
, tctx
->key_len
, tctx
->key
,
967 err
= kmb_ocs_sk_run(req
);
970 crypto_finalize_skcipher_request(engine
, req
, err
);
975 static int kmb_ocs_aes_aead_do_one_request(struct crypto_engine
*engine
,
978 struct aead_request
*req
= container_of(areq
,
979 struct aead_request
, base
);
980 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(crypto_aead_reqtfm(req
));
986 err
= ocs_aes_set_key(tctx
->aes_dev
, tctx
->key_len
, tctx
->key
,
991 err
= kmb_ocs_aead_run(req
);
994 crypto_finalize_aead_request(tctx
->aes_dev
->engine
, req
, err
);
999 static int kmb_ocs_aes_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
1000 unsigned int key_len
)
1002 return kmb_ocs_sk_set_key(tfm
, in_key
, key_len
, OCS_AES
);
1005 static int kmb_ocs_aes_aead_set_key(struct crypto_aead
*tfm
, const u8
*in_key
,
1006 unsigned int key_len
)
1008 return kmb_ocs_aead_set_key(tfm
, in_key
, key_len
, OCS_AES
);
1011 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1012 static int kmb_ocs_aes_ecb_encrypt(struct skcipher_request
*req
)
1014 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_ENCRYPT
, OCS_MODE_ECB
);
1017 static int kmb_ocs_aes_ecb_decrypt(struct skcipher_request
*req
)
1019 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_DECRYPT
, OCS_MODE_ECB
);
1021 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1023 static int kmb_ocs_aes_cbc_encrypt(struct skcipher_request
*req
)
1025 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_ENCRYPT
, OCS_MODE_CBC
);
1028 static int kmb_ocs_aes_cbc_decrypt(struct skcipher_request
*req
)
1030 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_DECRYPT
, OCS_MODE_CBC
);
1033 static int kmb_ocs_aes_ctr_encrypt(struct skcipher_request
*req
)
1035 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_ENCRYPT
, OCS_MODE_CTR
);
1038 static int kmb_ocs_aes_ctr_decrypt(struct skcipher_request
*req
)
1040 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_DECRYPT
, OCS_MODE_CTR
);
1043 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1044 static int kmb_ocs_aes_cts_encrypt(struct skcipher_request
*req
)
1046 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_ENCRYPT
, OCS_MODE_CTS
);
1049 static int kmb_ocs_aes_cts_decrypt(struct skcipher_request
*req
)
1051 return kmb_ocs_sk_common(req
, OCS_AES
, OCS_DECRYPT
, OCS_MODE_CTS
);
1053 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1055 static int kmb_ocs_aes_gcm_encrypt(struct aead_request
*req
)
1057 return kmb_ocs_aead_common(req
, OCS_AES
, OCS_ENCRYPT
, OCS_MODE_GCM
);
1060 static int kmb_ocs_aes_gcm_decrypt(struct aead_request
*req
)
1062 return kmb_ocs_aead_common(req
, OCS_AES
, OCS_DECRYPT
, OCS_MODE_GCM
);
1065 static int kmb_ocs_aes_ccm_encrypt(struct aead_request
*req
)
1067 return kmb_ocs_aead_common(req
, OCS_AES
, OCS_ENCRYPT
, OCS_MODE_CCM
);
1070 static int kmb_ocs_aes_ccm_decrypt(struct aead_request
*req
)
1072 return kmb_ocs_aead_common(req
, OCS_AES
, OCS_DECRYPT
, OCS_MODE_CCM
);
1075 static int kmb_ocs_sm4_set_key(struct crypto_skcipher
*tfm
, const u8
*in_key
,
1076 unsigned int key_len
)
1078 return kmb_ocs_sk_set_key(tfm
, in_key
, key_len
, OCS_SM4
);
1081 static int kmb_ocs_sm4_aead_set_key(struct crypto_aead
*tfm
, const u8
*in_key
,
1082 unsigned int key_len
)
1084 return kmb_ocs_aead_set_key(tfm
, in_key
, key_len
, OCS_SM4
);
1087 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1088 static int kmb_ocs_sm4_ecb_encrypt(struct skcipher_request
*req
)
1090 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_ENCRYPT
, OCS_MODE_ECB
);
1093 static int kmb_ocs_sm4_ecb_decrypt(struct skcipher_request
*req
)
1095 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_DECRYPT
, OCS_MODE_ECB
);
1097 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1099 static int kmb_ocs_sm4_cbc_encrypt(struct skcipher_request
*req
)
1101 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_ENCRYPT
, OCS_MODE_CBC
);
1104 static int kmb_ocs_sm4_cbc_decrypt(struct skcipher_request
*req
)
1106 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_DECRYPT
, OCS_MODE_CBC
);
1109 static int kmb_ocs_sm4_ctr_encrypt(struct skcipher_request
*req
)
1111 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_ENCRYPT
, OCS_MODE_CTR
);
1114 static int kmb_ocs_sm4_ctr_decrypt(struct skcipher_request
*req
)
1116 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_DECRYPT
, OCS_MODE_CTR
);
1119 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1120 static int kmb_ocs_sm4_cts_encrypt(struct skcipher_request
*req
)
1122 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_ENCRYPT
, OCS_MODE_CTS
);
1125 static int kmb_ocs_sm4_cts_decrypt(struct skcipher_request
*req
)
1127 return kmb_ocs_sk_common(req
, OCS_SM4
, OCS_DECRYPT
, OCS_MODE_CTS
);
1129 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1131 static int kmb_ocs_sm4_gcm_encrypt(struct aead_request
*req
)
1133 return kmb_ocs_aead_common(req
, OCS_SM4
, OCS_ENCRYPT
, OCS_MODE_GCM
);
1136 static int kmb_ocs_sm4_gcm_decrypt(struct aead_request
*req
)
1138 return kmb_ocs_aead_common(req
, OCS_SM4
, OCS_DECRYPT
, OCS_MODE_GCM
);
1141 static int kmb_ocs_sm4_ccm_encrypt(struct aead_request
*req
)
1143 return kmb_ocs_aead_common(req
, OCS_SM4
, OCS_ENCRYPT
, OCS_MODE_CCM
);
1146 static int kmb_ocs_sm4_ccm_decrypt(struct aead_request
*req
)
1148 return kmb_ocs_aead_common(req
, OCS_SM4
, OCS_DECRYPT
, OCS_MODE_CCM
);
1151 static inline int ocs_common_init(struct ocs_aes_tctx
*tctx
)
1153 tctx
->engine_ctx
.op
.prepare_request
= NULL
;
1154 tctx
->engine_ctx
.op
.do_one_request
= kmb_ocs_aes_sk_do_one_request
;
1155 tctx
->engine_ctx
.op
.unprepare_request
= NULL
;
1160 static int ocs_aes_init_tfm(struct crypto_skcipher
*tfm
)
1162 const char *alg_name
= crypto_tfm_alg_name(&tfm
->base
);
1163 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
1164 struct crypto_sync_skcipher
*blk
;
1166 /* set fallback cipher in case it will be needed */
1167 blk
= crypto_alloc_sync_skcipher(alg_name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
1169 return PTR_ERR(blk
);
1171 tctx
->sw_cipher
.sk
= blk
;
1173 crypto_skcipher_set_reqsize(tfm
, sizeof(struct ocs_aes_rctx
));
1175 return ocs_common_init(tctx
);
1178 static int ocs_sm4_init_tfm(struct crypto_skcipher
*tfm
)
1180 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
1182 crypto_skcipher_set_reqsize(tfm
, sizeof(struct ocs_aes_rctx
));
1184 return ocs_common_init(tctx
);
1187 static inline void clear_key(struct ocs_aes_tctx
*tctx
)
1189 memzero_explicit(tctx
->key
, OCS_AES_KEYSIZE_256
);
1191 /* Zero key registers if set */
1193 ocs_aes_set_key(tctx
->aes_dev
, OCS_AES_KEYSIZE_256
,
1194 tctx
->key
, OCS_AES
);
1197 static void ocs_exit_tfm(struct crypto_skcipher
*tfm
)
1199 struct ocs_aes_tctx
*tctx
= crypto_skcipher_ctx(tfm
);
1203 if (tctx
->sw_cipher
.sk
) {
1204 crypto_free_sync_skcipher(tctx
->sw_cipher
.sk
);
1205 tctx
->sw_cipher
.sk
= NULL
;
1209 static inline int ocs_common_aead_init(struct ocs_aes_tctx
*tctx
)
1211 tctx
->engine_ctx
.op
.prepare_request
= NULL
;
1212 tctx
->engine_ctx
.op
.do_one_request
= kmb_ocs_aes_aead_do_one_request
;
1213 tctx
->engine_ctx
.op
.unprepare_request
= NULL
;
1218 static int ocs_aes_aead_cra_init(struct crypto_aead
*tfm
)
1220 const char *alg_name
= crypto_tfm_alg_name(&tfm
->base
);
1221 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(tfm
);
1222 struct crypto_aead
*blk
;
1224 /* Set fallback cipher in case it will be needed */
1225 blk
= crypto_alloc_aead(alg_name
, 0, CRYPTO_ALG_NEED_FALLBACK
);
1227 return PTR_ERR(blk
);
1229 tctx
->sw_cipher
.aead
= blk
;
1231 crypto_aead_set_reqsize(tfm
,
1232 max(sizeof(struct ocs_aes_rctx
),
1233 (sizeof(struct aead_request
) +
1234 crypto_aead_reqsize(tctx
->sw_cipher
.aead
))));
1236 return ocs_common_aead_init(tctx
);
1239 static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead
*tfm
,
1240 unsigned int authsize
)
1256 static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead
*tfm
,
1257 unsigned int authsize
)
1259 return crypto_gcm_check_authsize(authsize
);
1262 static int ocs_sm4_aead_cra_init(struct crypto_aead
*tfm
)
1264 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(tfm
);
1266 crypto_aead_set_reqsize(tfm
, sizeof(struct ocs_aes_rctx
));
1268 return ocs_common_aead_init(tctx
);
1271 static void ocs_aead_cra_exit(struct crypto_aead
*tfm
)
1273 struct ocs_aes_tctx
*tctx
= crypto_aead_ctx(tfm
);
1277 if (tctx
->sw_cipher
.aead
) {
1278 crypto_free_aead(tctx
->sw_cipher
.aead
);
1279 tctx
->sw_cipher
.aead
= NULL
;
1283 static struct skcipher_alg algs
[] = {
1284 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1286 .base
.cra_name
= "ecb(aes)",
1287 .base
.cra_driver_name
= "ecb-aes-keembay-ocs",
1288 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1289 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1290 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1291 CRYPTO_ALG_NEED_FALLBACK
,
1292 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1293 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1294 .base
.cra_module
= THIS_MODULE
,
1295 .base
.cra_alignmask
= 0,
1297 .min_keysize
= OCS_AES_MIN_KEY_SIZE
,
1298 .max_keysize
= OCS_AES_MAX_KEY_SIZE
,
1299 .setkey
= kmb_ocs_aes_set_key
,
1300 .encrypt
= kmb_ocs_aes_ecb_encrypt
,
1301 .decrypt
= kmb_ocs_aes_ecb_decrypt
,
1302 .init
= ocs_aes_init_tfm
,
1303 .exit
= ocs_exit_tfm
,
1305 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1307 .base
.cra_name
= "cbc(aes)",
1308 .base
.cra_driver_name
= "cbc-aes-keembay-ocs",
1309 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1310 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1311 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1312 CRYPTO_ALG_NEED_FALLBACK
,
1313 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1314 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1315 .base
.cra_module
= THIS_MODULE
,
1316 .base
.cra_alignmask
= 0,
1318 .min_keysize
= OCS_AES_MIN_KEY_SIZE
,
1319 .max_keysize
= OCS_AES_MAX_KEY_SIZE
,
1320 .ivsize
= AES_BLOCK_SIZE
,
1321 .setkey
= kmb_ocs_aes_set_key
,
1322 .encrypt
= kmb_ocs_aes_cbc_encrypt
,
1323 .decrypt
= kmb_ocs_aes_cbc_decrypt
,
1324 .init
= ocs_aes_init_tfm
,
1325 .exit
= ocs_exit_tfm
,
1328 .base
.cra_name
= "ctr(aes)",
1329 .base
.cra_driver_name
= "ctr-aes-keembay-ocs",
1330 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1331 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1332 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1333 CRYPTO_ALG_NEED_FALLBACK
,
1334 .base
.cra_blocksize
= 1,
1335 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1336 .base
.cra_module
= THIS_MODULE
,
1337 .base
.cra_alignmask
= 0,
1339 .min_keysize
= OCS_AES_MIN_KEY_SIZE
,
1340 .max_keysize
= OCS_AES_MAX_KEY_SIZE
,
1341 .ivsize
= AES_BLOCK_SIZE
,
1342 .setkey
= kmb_ocs_aes_set_key
,
1343 .encrypt
= kmb_ocs_aes_ctr_encrypt
,
1344 .decrypt
= kmb_ocs_aes_ctr_decrypt
,
1345 .init
= ocs_aes_init_tfm
,
1346 .exit
= ocs_exit_tfm
,
1348 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1350 .base
.cra_name
= "cts(cbc(aes))",
1351 .base
.cra_driver_name
= "cts-aes-keembay-ocs",
1352 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1353 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1354 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1355 CRYPTO_ALG_NEED_FALLBACK
,
1356 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1357 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1358 .base
.cra_module
= THIS_MODULE
,
1359 .base
.cra_alignmask
= 0,
1361 .min_keysize
= OCS_AES_MIN_KEY_SIZE
,
1362 .max_keysize
= OCS_AES_MAX_KEY_SIZE
,
1363 .ivsize
= AES_BLOCK_SIZE
,
1364 .setkey
= kmb_ocs_aes_set_key
,
1365 .encrypt
= kmb_ocs_aes_cts_encrypt
,
1366 .decrypt
= kmb_ocs_aes_cts_decrypt
,
1367 .init
= ocs_aes_init_tfm
,
1368 .exit
= ocs_exit_tfm
,
1370 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1371 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1373 .base
.cra_name
= "ecb(sm4)",
1374 .base
.cra_driver_name
= "ecb-sm4-keembay-ocs",
1375 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1376 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1377 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1378 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1379 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1380 .base
.cra_module
= THIS_MODULE
,
1381 .base
.cra_alignmask
= 0,
1383 .min_keysize
= OCS_SM4_KEY_SIZE
,
1384 .max_keysize
= OCS_SM4_KEY_SIZE
,
1385 .setkey
= kmb_ocs_sm4_set_key
,
1386 .encrypt
= kmb_ocs_sm4_ecb_encrypt
,
1387 .decrypt
= kmb_ocs_sm4_ecb_decrypt
,
1388 .init
= ocs_sm4_init_tfm
,
1389 .exit
= ocs_exit_tfm
,
1391 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1393 .base
.cra_name
= "cbc(sm4)",
1394 .base
.cra_driver_name
= "cbc-sm4-keembay-ocs",
1395 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1396 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1397 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1398 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1399 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1400 .base
.cra_module
= THIS_MODULE
,
1401 .base
.cra_alignmask
= 0,
1403 .min_keysize
= OCS_SM4_KEY_SIZE
,
1404 .max_keysize
= OCS_SM4_KEY_SIZE
,
1405 .ivsize
= AES_BLOCK_SIZE
,
1406 .setkey
= kmb_ocs_sm4_set_key
,
1407 .encrypt
= kmb_ocs_sm4_cbc_encrypt
,
1408 .decrypt
= kmb_ocs_sm4_cbc_decrypt
,
1409 .init
= ocs_sm4_init_tfm
,
1410 .exit
= ocs_exit_tfm
,
1413 .base
.cra_name
= "ctr(sm4)",
1414 .base
.cra_driver_name
= "ctr-sm4-keembay-ocs",
1415 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1416 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1417 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1418 .base
.cra_blocksize
= 1,
1419 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1420 .base
.cra_module
= THIS_MODULE
,
1421 .base
.cra_alignmask
= 0,
1423 .min_keysize
= OCS_SM4_KEY_SIZE
,
1424 .max_keysize
= OCS_SM4_KEY_SIZE
,
1425 .ivsize
= AES_BLOCK_SIZE
,
1426 .setkey
= kmb_ocs_sm4_set_key
,
1427 .encrypt
= kmb_ocs_sm4_ctr_encrypt
,
1428 .decrypt
= kmb_ocs_sm4_ctr_decrypt
,
1429 .init
= ocs_sm4_init_tfm
,
1430 .exit
= ocs_exit_tfm
,
1432 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1434 .base
.cra_name
= "cts(cbc(sm4))",
1435 .base
.cra_driver_name
= "cts-sm4-keembay-ocs",
1436 .base
.cra_priority
= KMB_OCS_PRIORITY
,
1437 .base
.cra_flags
= CRYPTO_ALG_ASYNC
|
1438 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1439 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1440 .base
.cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1441 .base
.cra_module
= THIS_MODULE
,
1442 .base
.cra_alignmask
= 0,
1444 .min_keysize
= OCS_SM4_KEY_SIZE
,
1445 .max_keysize
= OCS_SM4_KEY_SIZE
,
1446 .ivsize
= AES_BLOCK_SIZE
,
1447 .setkey
= kmb_ocs_sm4_set_key
,
1448 .encrypt
= kmb_ocs_sm4_cts_encrypt
,
1449 .decrypt
= kmb_ocs_sm4_cts_decrypt
,
1450 .init
= ocs_sm4_init_tfm
,
1451 .exit
= ocs_exit_tfm
,
1453 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
1456 static struct aead_alg algs_aead
[] = {
1459 .cra_name
= "gcm(aes)",
1460 .cra_driver_name
= "gcm-aes-keembay-ocs",
1461 .cra_priority
= KMB_OCS_PRIORITY
,
1462 .cra_flags
= CRYPTO_ALG_ASYNC
|
1463 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1464 CRYPTO_ALG_NEED_FALLBACK
,
1466 .cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1468 .cra_module
= THIS_MODULE
,
1470 .init
= ocs_aes_aead_cra_init
,
1471 .exit
= ocs_aead_cra_exit
,
1472 .ivsize
= GCM_AES_IV_SIZE
,
1473 .maxauthsize
= AES_BLOCK_SIZE
,
1474 .setauthsize
= kmb_ocs_aead_gcm_setauthsize
,
1475 .setkey
= kmb_ocs_aes_aead_set_key
,
1476 .encrypt
= kmb_ocs_aes_gcm_encrypt
,
1477 .decrypt
= kmb_ocs_aes_gcm_decrypt
,
1481 .cra_name
= "ccm(aes)",
1482 .cra_driver_name
= "ccm-aes-keembay-ocs",
1483 .cra_priority
= KMB_OCS_PRIORITY
,
1484 .cra_flags
= CRYPTO_ALG_ASYNC
|
1485 CRYPTO_ALG_KERN_DRIVER_ONLY
|
1486 CRYPTO_ALG_NEED_FALLBACK
,
1488 .cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1490 .cra_module
= THIS_MODULE
,
1492 .init
= ocs_aes_aead_cra_init
,
1493 .exit
= ocs_aead_cra_exit
,
1494 .ivsize
= AES_BLOCK_SIZE
,
1495 .maxauthsize
= AES_BLOCK_SIZE
,
1496 .setauthsize
= kmb_ocs_aead_ccm_setauthsize
,
1497 .setkey
= kmb_ocs_aes_aead_set_key
,
1498 .encrypt
= kmb_ocs_aes_ccm_encrypt
,
1499 .decrypt
= kmb_ocs_aes_ccm_decrypt
,
1503 .cra_name
= "gcm(sm4)",
1504 .cra_driver_name
= "gcm-sm4-keembay-ocs",
1505 .cra_priority
= KMB_OCS_PRIORITY
,
1506 .cra_flags
= CRYPTO_ALG_ASYNC
|
1507 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1509 .cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1511 .cra_module
= THIS_MODULE
,
1513 .init
= ocs_sm4_aead_cra_init
,
1514 .exit
= ocs_aead_cra_exit
,
1515 .ivsize
= GCM_AES_IV_SIZE
,
1516 .maxauthsize
= AES_BLOCK_SIZE
,
1517 .setauthsize
= kmb_ocs_aead_gcm_setauthsize
,
1518 .setkey
= kmb_ocs_sm4_aead_set_key
,
1519 .encrypt
= kmb_ocs_sm4_gcm_encrypt
,
1520 .decrypt
= kmb_ocs_sm4_gcm_decrypt
,
1524 .cra_name
= "ccm(sm4)",
1525 .cra_driver_name
= "ccm-sm4-keembay-ocs",
1526 .cra_priority
= KMB_OCS_PRIORITY
,
1527 .cra_flags
= CRYPTO_ALG_ASYNC
|
1528 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1530 .cra_ctxsize
= sizeof(struct ocs_aes_tctx
),
1532 .cra_module
= THIS_MODULE
,
1534 .init
= ocs_sm4_aead_cra_init
,
1535 .exit
= ocs_aead_cra_exit
,
1536 .ivsize
= AES_BLOCK_SIZE
,
1537 .maxauthsize
= AES_BLOCK_SIZE
,
1538 .setauthsize
= kmb_ocs_aead_ccm_setauthsize
,
1539 .setkey
= kmb_ocs_sm4_aead_set_key
,
1540 .encrypt
= kmb_ocs_sm4_ccm_encrypt
,
1541 .decrypt
= kmb_ocs_sm4_ccm_decrypt
,
1545 static void unregister_aes_algs(struct ocs_aes_dev
*aes_dev
)
1547 crypto_unregister_aeads(algs_aead
, ARRAY_SIZE(algs_aead
));
1548 crypto_unregister_skciphers(algs
, ARRAY_SIZE(algs
));
1551 static int register_aes_algs(struct ocs_aes_dev
*aes_dev
)
1556 * If any algorithm fails to register, all preceding algorithms that
1557 * were successfully registered will be automatically unregistered.
1559 ret
= crypto_register_aeads(algs_aead
, ARRAY_SIZE(algs_aead
));
1563 ret
= crypto_register_skciphers(algs
, ARRAY_SIZE(algs
));
1565 crypto_unregister_aeads(algs_aead
, ARRAY_SIZE(algs
));
1570 /* Device tree driver match. */
1571 static const struct of_device_id kmb_ocs_aes_of_match
[] = {
1573 .compatible
= "intel,keembay-ocs-aes",
1578 static int kmb_ocs_aes_remove(struct platform_device
*pdev
)
1580 struct ocs_aes_dev
*aes_dev
;
1582 aes_dev
= platform_get_drvdata(pdev
);
1586 unregister_aes_algs(aes_dev
);
1588 spin_lock(&ocs_aes
.lock
);
1589 list_del(&aes_dev
->list
);
1590 spin_unlock(&ocs_aes
.lock
);
1592 crypto_engine_exit(aes_dev
->engine
);
1597 static int kmb_ocs_aes_probe(struct platform_device
*pdev
)
1599 struct device
*dev
= &pdev
->dev
;
1600 struct ocs_aes_dev
*aes_dev
;
1601 struct resource
*aes_mem
;
1604 aes_dev
= devm_kzalloc(dev
, sizeof(*aes_dev
), GFP_KERNEL
);
1610 platform_set_drvdata(pdev
, aes_dev
);
1612 rc
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
1614 dev_err(dev
, "Failed to set 32 bit dma mask %d\n", rc
);
1618 /* Get base register address. */
1619 aes_mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1621 dev_err(dev
, "Could not retrieve io mem resource\n");
1625 aes_dev
->base_reg
= devm_ioremap_resource(&pdev
->dev
, aes_mem
);
1626 if (IS_ERR(aes_dev
->base_reg
)) {
1627 dev_err(dev
, "Failed to get base address\n");
1628 return PTR_ERR(aes_dev
->base_reg
);
1631 /* Get and request IRQ */
1632 aes_dev
->irq
= platform_get_irq(pdev
, 0);
1633 if (aes_dev
->irq
< 0)
1634 return aes_dev
->irq
;
1636 rc
= devm_request_threaded_irq(dev
, aes_dev
->irq
, ocs_aes_irq_handler
,
1637 NULL
, 0, "keembay-ocs-aes", aes_dev
);
1639 dev_err(dev
, "Could not request IRQ\n");
1643 INIT_LIST_HEAD(&aes_dev
->list
);
1644 spin_lock(&ocs_aes
.lock
);
1645 list_add_tail(&aes_dev
->list
, &ocs_aes
.dev_list
);
1646 spin_unlock(&ocs_aes
.lock
);
1648 init_completion(&aes_dev
->irq_completion
);
1650 /* Initialize crypto engine */
1651 aes_dev
->engine
= crypto_engine_alloc_init(dev
, true);
1652 if (!aes_dev
->engine
)
1655 rc
= crypto_engine_start(aes_dev
->engine
);
1657 dev_err(dev
, "Could not start crypto engine\n");
1661 rc
= register_aes_algs(aes_dev
);
1664 "Could not register OCS algorithms with Crypto API\n");
1671 crypto_engine_exit(aes_dev
->engine
);
1673 spin_lock(&ocs_aes
.lock
);
1674 list_del(&aes_dev
->list
);
1675 spin_unlock(&ocs_aes
.lock
);
1680 /* The OCS driver is a platform device. */
1681 static struct platform_driver kmb_ocs_aes_driver
= {
1682 .probe
= kmb_ocs_aes_probe
,
1683 .remove
= kmb_ocs_aes_remove
,
1686 .of_match_table
= kmb_ocs_aes_of_match
,
1690 module_platform_driver(kmb_ocs_aes_driver
);
1692 MODULE_DESCRIPTION("Intel Keem Bay Offload and Crypto Subsystem (OCS) AES/SM4 Driver");
1693 MODULE_LICENSE("GPL");
1695 MODULE_ALIAS_CRYPTO("cbc-aes-keembay-ocs");
1696 MODULE_ALIAS_CRYPTO("ctr-aes-keembay-ocs");
1697 MODULE_ALIAS_CRYPTO("gcm-aes-keembay-ocs");
1698 MODULE_ALIAS_CRYPTO("ccm-aes-keembay-ocs");
1700 MODULE_ALIAS_CRYPTO("cbc-sm4-keembay-ocs");
1701 MODULE_ALIAS_CRYPTO("ctr-sm4-keembay-ocs");
1702 MODULE_ALIAS_CRYPTO("gcm-sm4-keembay-ocs");
1703 MODULE_ALIAS_CRYPTO("ccm-sm4-keembay-ocs");
1705 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
1706 MODULE_ALIAS_CRYPTO("ecb-aes-keembay-ocs");
1707 MODULE_ALIAS_CRYPTO("ecb-sm4-keembay-ocs");
1708 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
1710 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
1711 MODULE_ALIAS_CRYPTO("cts-aes-keembay-ocs");
1712 MODULE_ALIAS_CRYPTO("cts-sm4-keembay-ocs");
1713 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */