2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len
[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len
[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant
[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
97 unsigned char *input
, int err
);
99 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
101 return ctx
->crypto_ctx
->aeadctx
;
104 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
106 return ctx
->crypto_ctx
->ablkctx
;
109 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
111 return ctx
->crypto_ctx
->hmacctx
;
114 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
116 return gctx
->ctx
->gcm
;
119 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
121 return gctx
->ctx
->authenc
;
124 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
126 return ctx
->dev
->u_ctx
;
129 static inline int is_ofld_imm(const struct sk_buff
*skb
)
131 return (skb
->len
<= SGE_MAX_WR_LEN
);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx
*reqctx
)
136 memset(&reqctx
->hctx_wr
, 0, sizeof(struct chcr_hctx_per_wr
));
139 static int sg_nents_xlen(struct scatterlist
*sg
, unsigned int reqlen
,
145 unsigned int skip_len
= 0;
148 if (sg_dma_len(sg
) <= skip
) {
149 skip
-= sg_dma_len(sg
);
158 while (sg
&& reqlen
) {
159 less
= min(reqlen
, sg_dma_len(sg
) - skip_len
);
160 nents
+= DIV_ROUND_UP(less
, entlen
);
168 static inline int get_aead_subtype(struct crypto_aead
*aead
)
170 struct aead_alg
*alg
= crypto_aead_alg(aead
);
171 struct chcr_alg_template
*chcr_crypto_alg
=
172 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
173 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
176 void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
178 u8 temp
[SHA512_DIGEST_SIZE
];
179 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
180 int authsize
= crypto_aead_authsize(tfm
);
181 struct cpl_fw6_pld
*fw6_pld
;
184 fw6_pld
= (struct cpl_fw6_pld
*)input
;
185 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
186 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
187 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
190 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
191 authsize
, req
->assoclen
+
192 req
->cryptlen
- authsize
);
193 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
201 static inline void chcr_handle_aead_resp(struct aead_request
*req
,
202 unsigned char *input
,
205 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
207 chcr_aead_common_exit(req
);
208 if (reqctx
->verify
== VERIFY_SW
) {
209 chcr_verify_tag(req
, input
, &err
);
210 reqctx
->verify
= VERIFY_HW
;
212 req
->base
.complete(&req
->base
, err
);
215 static void get_aes_decrypt_key(unsigned char *dec_key
,
216 const unsigned char *key
,
217 unsigned int keylength
)
225 case AES_KEYLENGTH_128BIT
:
226 nk
= KEYLENGTH_4BYTES
;
227 nr
= NUMBER_OF_ROUNDS_10
;
229 case AES_KEYLENGTH_192BIT
:
230 nk
= KEYLENGTH_6BYTES
;
231 nr
= NUMBER_OF_ROUNDS_12
;
233 case AES_KEYLENGTH_256BIT
:
234 nk
= KEYLENGTH_8BYTES
;
235 nr
= NUMBER_OF_ROUNDS_14
;
240 for (i
= 0; i
< nk
; i
++)
241 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4 * i
]);
244 temp
= w_ring
[nk
- 1];
245 while (i
+ nk
< (nr
+ 1) * 4) {
248 temp
= (temp
<< 8) | (temp
>> 24);
249 temp
= aes_ks_subword(temp
);
250 temp
^= round_constant
[i
/ nk
];
251 } else if (nk
== 8 && (i
% 4 == 0)) {
252 temp
= aes_ks_subword(temp
);
254 w_ring
[i
% nk
] ^= temp
;
255 temp
= w_ring
[i
% nk
];
259 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
260 *((u32
*)dec_key
+ k
) = htonl(w_ring
[j
]);
267 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
269 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
272 case SHA1_DIGEST_SIZE
:
273 base_hash
= crypto_alloc_shash("sha1", 0, 0);
275 case SHA224_DIGEST_SIZE
:
276 base_hash
= crypto_alloc_shash("sha224", 0, 0);
278 case SHA256_DIGEST_SIZE
:
279 base_hash
= crypto_alloc_shash("sha256", 0, 0);
281 case SHA384_DIGEST_SIZE
:
282 base_hash
= crypto_alloc_shash("sha384", 0, 0);
284 case SHA512_DIGEST_SIZE
:
285 base_hash
= crypto_alloc_shash("sha512", 0, 0);
292 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
293 char *iopad
, char *result_hash
,
296 struct sha1_state sha1_st
;
297 struct sha256_state sha256_st
;
298 struct sha512_state sha512_st
;
301 if (digest_size
== SHA1_DIGEST_SIZE
) {
302 error
= crypto_shash_init(desc
) ?:
303 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
304 crypto_shash_export(desc
, (void *)&sha1_st
);
305 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
306 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
307 error
= crypto_shash_init(desc
) ?:
308 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
309 crypto_shash_export(desc
, (void *)&sha256_st
);
310 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
312 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
313 error
= crypto_shash_init(desc
) ?:
314 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
315 crypto_shash_export(desc
, (void *)&sha256_st
);
316 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
318 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
319 error
= crypto_shash_init(desc
) ?:
320 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
321 crypto_shash_export(desc
, (void *)&sha512_st
);
322 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
324 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
325 error
= crypto_shash_init(desc
) ?:
326 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
327 crypto_shash_export(desc
, (void *)&sha512_st
);
328 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
331 pr_err("Unknown digest size %d\n", digest_size
);
336 static void chcr_change_order(char *buf
, int ds
)
340 if (ds
== SHA512_DIGEST_SIZE
) {
341 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
342 *((__be64
*)buf
+ i
) =
343 cpu_to_be64(*((u64
*)buf
+ i
));
345 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
346 *((__be32
*)buf
+ i
) =
347 cpu_to_be32(*((u32
*)buf
+ i
));
351 static inline int is_hmac(struct crypto_tfm
*tfm
)
353 struct crypto_alg
*alg
= tfm
->__crt_alg
;
354 struct chcr_alg_template
*chcr_crypto_alg
=
355 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
357 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
362 static inline void dsgl_walk_init(struct dsgl_walk
*walk
,
363 struct cpl_rx_phys_dsgl
*dsgl
)
367 walk
->to
= (struct phys_sge_pairs
*)(dsgl
+ 1);
370 static inline void dsgl_walk_end(struct dsgl_walk
*walk
, unsigned short qid
)
372 struct cpl_rx_phys_dsgl
*phys_cpl
;
374 phys_cpl
= walk
->dsgl
;
376 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
377 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
378 phys_cpl
->pcirlxorder_to_noofsgentr
=
379 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
380 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
381 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
382 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
383 CPL_RX_PHYS_DSGL_DCAID_V(0) |
384 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk
->nents
));
385 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
386 phys_cpl
->rss_hdr_int
.qid
= htons(qid
);
387 phys_cpl
->rss_hdr_int
.hash_val
= 0;
390 static inline void dsgl_walk_add_page(struct dsgl_walk
*walk
,
399 walk
->to
->len
[j
% 8] = htons(size
);
400 walk
->to
->addr
[j
% 8] = cpu_to_be64(*addr
);
407 static void dsgl_walk_add_sg(struct dsgl_walk
*walk
,
408 struct scatterlist
*sg
,
413 unsigned int left_size
= slen
, len
= 0;
414 unsigned int j
= walk
->nents
;
420 if (sg_dma_len(sg
) <= skip
) {
421 skip
-= sg_dma_len(sg
);
430 while (left_size
&& sg
) {
431 len
= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
434 ent_len
= min_t(u32
, len
, CHCR_DST_SG_SIZE
);
435 walk
->to
->len
[j
% 8] = htons(ent_len
);
436 walk
->to
->addr
[j
% 8] = cpu_to_be64(sg_dma_address(sg
) +
445 walk
->last_sg_len
= min_t(u32
, left_size
, sg_dma_len(sg
) -
446 skip_len
) + skip_len
;
447 left_size
-= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
454 static inline void ulptx_walk_init(struct ulptx_walk
*walk
,
455 struct ulptx_sgl
*ulp
)
460 walk
->pair
= ulp
->sge
;
461 walk
->last_sg
= NULL
;
462 walk
->last_sg_len
= 0;
465 static inline void ulptx_walk_end(struct ulptx_walk
*walk
)
467 walk
->sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
468 ULPTX_NSGE_V(walk
->nents
));
472 static inline void ulptx_walk_add_page(struct ulptx_walk
*walk
,
479 if (walk
->nents
== 0) {
480 walk
->sgl
->len0
= cpu_to_be32(size
);
481 walk
->sgl
->addr0
= cpu_to_be64(*addr
);
483 walk
->pair
->addr
[walk
->pair_idx
] = cpu_to_be64(*addr
);
484 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(size
);
485 walk
->pair_idx
= !walk
->pair_idx
;
492 static void ulptx_walk_add_sg(struct ulptx_walk
*walk
,
493 struct scatterlist
*sg
,
504 if (sg_dma_len(sg
) <= skip
) {
505 skip
-= sg_dma_len(sg
);
513 WARN(!sg
, "SG should not be null here\n");
514 if (sg
&& (walk
->nents
== 0)) {
515 small
= min_t(unsigned int, sg_dma_len(sg
) - skip_len
, len
);
516 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
517 walk
->sgl
->len0
= cpu_to_be32(sgmin
);
518 walk
->sgl
->addr0
= cpu_to_be64(sg_dma_address(sg
) + skip_len
);
522 walk
->last_sg_len
= sgmin
+ skip_len
;
524 if (sg_dma_len(sg
) == skip_len
) {
531 small
= min(sg_dma_len(sg
) - skip_len
, len
);
532 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
533 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(sgmin
);
534 walk
->pair
->addr
[walk
->pair_idx
] =
535 cpu_to_be64(sg_dma_address(sg
) + skip_len
);
536 walk
->pair_idx
= !walk
->pair_idx
;
543 walk
->last_sg_len
= skip_len
;
544 if (sg_dma_len(sg
) == skip_len
) {
551 static inline int get_cryptoalg_subtype(struct crypto_tfm
*tfm
)
553 struct crypto_alg
*alg
= tfm
->__crt_alg
;
554 struct chcr_alg_template
*chcr_crypto_alg
=
555 container_of(alg
, struct chcr_alg_template
, alg
.crypto
);
557 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
560 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
562 struct adapter
*adap
= netdev2adap(dev
);
563 struct sge_uld_txq_info
*txq_info
=
564 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
565 struct sge_uld_txq
*txq
;
569 txq
= &txq_info
->uldtxq
[idx
];
570 spin_lock(&txq
->sendq
.lock
);
573 spin_unlock(&txq
->sendq
.lock
);
578 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
579 struct _key_ctx
*key_ctx
)
581 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
582 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
585 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
586 ablkctx
->enckey_len
>> 1);
587 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
588 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
593 static int chcr_hash_ent_in_wr(struct scatterlist
*src
,
596 unsigned int srcskip
)
600 int soffset
= 0, sless
;
602 if (sg_dma_len(src
) == srcskip
) {
606 while (src
&& space
> (sgl_ent_len
[srcsg
+ 1])) {
607 sless
= min_t(unsigned int, sg_dma_len(src
) - soffset
- srcskip
,
612 if (sg_dma_len(src
) == (soffset
+ srcskip
)) {
621 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
622 struct scatterlist
*dst
,
625 unsigned int srcskip
,
626 unsigned int dstskip
)
628 int srclen
= 0, dstlen
= 0;
629 int srcsg
= minsg
, dstsg
= minsg
;
630 int offset
= 0, soffset
= 0, less
, sless
= 0;
632 if (sg_dma_len(src
) == srcskip
) {
636 if (sg_dma_len(dst
) == dstskip
) {
642 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
643 sless
= min_t(unsigned int, sg_dma_len(src
) - srcskip
- soffset
,
648 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
649 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
650 if (srclen
<= dstlen
)
652 less
= min_t(unsigned int, sg_dma_len(dst
) - offset
-
653 dstskip
, CHCR_DST_SG_SIZE
);
656 if ((offset
+ dstskip
) == sg_dma_len(dst
)) {
664 if ((soffset
+ srcskip
) == sg_dma_len(src
)) {
671 return min(srclen
, dstlen
);
674 static int chcr_cipher_fallback(struct crypto_skcipher
*cipher
,
676 struct scatterlist
*src
,
677 struct scatterlist
*dst
,
680 unsigned short op_type
)
684 SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
686 skcipher_request_set_tfm(subreq
, cipher
);
687 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
688 skcipher_request_set_crypt(subreq
, src
, dst
,
691 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
692 crypto_skcipher_encrypt(subreq
);
693 skcipher_request_zero(subreq
);
698 static inline void create_wreq(struct chcr_context
*ctx
,
699 struct chcr_wr
*chcr_req
,
700 struct crypto_async_request
*req
,
707 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
708 int qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
711 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE
;
712 chcr_req
->wreq
.pld_size_hash_size
=
713 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
714 chcr_req
->wreq
.len16_pkd
=
715 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16
, 16)));
716 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
717 chcr_req
->wreq
.rx_chid_to_rx_q_id
=
718 FILL_WR_RX_Q_ID(ctx
->dev
->rx_channel_id
, qid
,
719 !!lcb
, ctx
->tx_qidx
);
721 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(ctx
->dev
->tx_channel_id
,
723 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP(len16
, 16) -
724 ((sizeof(chcr_req
->wreq
)) >> 4)));
726 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(!imm
);
727 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
728 sizeof(chcr_req
->key_ctx
) + sc_len
);
732 * create_cipher_wr - form the WR for cipher operations
734 * @ctx: crypto driver context of the request.
735 * @qid: ingress qid where response of this WR should be received.
736 * @op_type: encryption or decryption
738 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
740 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
741 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
742 struct sk_buff
*skb
= NULL
;
743 struct chcr_wr
*chcr_req
;
744 struct cpl_rx_phys_dsgl
*phys_cpl
;
745 struct ulptx_sgl
*ulptx
;
746 struct chcr_blkcipher_req_ctx
*reqctx
=
747 ablkcipher_request_ctx(wrparam
->req
);
748 unsigned int temp
= 0, transhdr_len
, dst_size
;
751 unsigned int kctx_len
;
752 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
753 GFP_KERNEL
: GFP_ATOMIC
;
754 struct adapter
*adap
= padap(c_ctx(tfm
)->dev
);
756 nents
= sg_nents_xlen(reqctx
->dstsg
, wrparam
->bytes
, CHCR_DST_SG_SIZE
,
758 dst_size
= get_space_for_phys_dsgl(nents
);
759 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
760 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
761 nents
= sg_nents_xlen(reqctx
->srcsg
, wrparam
->bytes
,
762 CHCR_SRC_SG_SIZE
, reqctx
->src_ofst
);
763 temp
= reqctx
->imm
? roundup(wrparam
->bytes
, 16) :
764 (sgl_len(nents
) * 8);
765 transhdr_len
+= temp
;
766 transhdr_len
= roundup(transhdr_len
, 16);
767 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
772 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
773 chcr_req
->sec_cpl
.op_ivinsrtofst
=
774 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm
)->dev
->rx_channel_id
, 2, 1);
776 chcr_req
->sec_cpl
.pldlen
= htonl(IV
+ wrparam
->bytes
);
777 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
778 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV
+ 1, 0);
780 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
781 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
782 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
785 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
788 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
789 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
790 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
791 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
792 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
793 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
794 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
796 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
797 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
798 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
799 ablkctx
->enckey_len
);
801 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
802 (ablkctx
->enckey_len
>> 1),
803 ablkctx
->enckey_len
>> 1);
804 memcpy(chcr_req
->key_ctx
.key
+
805 (ablkctx
->enckey_len
>> 1),
807 ablkctx
->enckey_len
>> 1);
810 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
811 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
812 chcr_add_cipher_src_ent(wrparam
->req
, ulptx
, wrparam
);
813 chcr_add_cipher_dst_ent(wrparam
->req
, phys_cpl
, wrparam
, wrparam
->qid
);
815 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
816 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ kctx_len
+ IV
817 + (reqctx
->imm
? (wrparam
->bytes
) : 0);
818 create_wreq(c_ctx(tfm
), chcr_req
, &(wrparam
->req
->base
), reqctx
->imm
, 0,
820 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
823 if (reqctx
->op
&& (ablkctx
->ciph_mode
==
824 CHCR_SCMD_CIPHER_MODE_AES_CBC
))
825 sg_pcopy_to_buffer(wrparam
->req
->src
,
826 sg_nents(wrparam
->req
->src
), wrparam
->req
->info
, 16,
827 reqctx
->processed
+ wrparam
->bytes
- AES_BLOCK_SIZE
);
831 return ERR_PTR(error
);
834 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
838 if (keylen
== AES_KEYSIZE_128
)
839 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
840 else if (keylen
== AES_KEYSIZE_192
)
841 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
842 else if (keylen
== AES_KEYSIZE_256
)
843 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
849 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher
*cipher
,
853 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
854 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
857 crypto_skcipher_clear_flags(ablkctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
858 crypto_skcipher_set_flags(ablkctx
->sw_cipher
, cipher
->base
.crt_flags
&
859 CRYPTO_TFM_REQ_MASK
);
860 err
= crypto_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
861 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
863 crypto_skcipher_get_flags(ablkctx
->sw_cipher
) &
868 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher
*cipher
,
872 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
873 unsigned int ck_size
, context_size
;
877 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
881 ck_size
= chcr_keyctx_ck_size(keylen
);
882 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
883 memcpy(ablkctx
->key
, key
, keylen
);
884 ablkctx
->enckey_len
= keylen
;
885 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
886 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
887 keylen
+ alignment
) >> 4;
889 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
891 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
894 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
895 ablkctx
->enckey_len
= 0;
900 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher
*cipher
,
904 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
905 unsigned int ck_size
, context_size
;
909 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
912 ck_size
= chcr_keyctx_ck_size(keylen
);
913 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
914 memcpy(ablkctx
->key
, key
, keylen
);
915 ablkctx
->enckey_len
= keylen
;
916 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
917 keylen
+ alignment
) >> 4;
919 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
921 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
925 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
926 ablkctx
->enckey_len
= 0;
931 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher
*cipher
,
935 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
936 unsigned int ck_size
, context_size
;
940 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
942 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
943 CTR_RFC3686_NONCE_SIZE
);
945 keylen
-= CTR_RFC3686_NONCE_SIZE
;
946 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
950 ck_size
= chcr_keyctx_ck_size(keylen
);
951 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
952 memcpy(ablkctx
->key
, key
, keylen
);
953 ablkctx
->enckey_len
= keylen
;
954 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
955 keylen
+ alignment
) >> 4;
957 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
959 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
963 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
964 ablkctx
->enckey_len
= 0;
968 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
970 unsigned int size
= AES_BLOCK_SIZE
;
971 __be32
*b
= (__be32
*)(dstiv
+ size
);
974 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
975 for (; size
>= 4; size
-= 4) {
976 prev
= be32_to_cpu(*--b
);
986 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
988 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
990 u32 temp
= be32_to_cpu(*--b
);
993 c
= (u64
)temp
+ 1; // No of block can processed withou overflow
994 if ((bytes
/ AES_BLOCK_SIZE
) > c
)
995 bytes
= c
* AES_BLOCK_SIZE
;
999 static int chcr_update_tweak(struct ablkcipher_request
*req
, u8
*iv
,
1002 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1003 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1004 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1005 struct crypto_cipher
*cipher
;
1008 unsigned int keylen
;
1009 int round
= reqctx
->last_req_len
/ AES_BLOCK_SIZE
;
1010 int round8
= round
/ 8;
1012 cipher
= ablkctx
->aes_generic
;
1013 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1015 keylen
= ablkctx
->enckey_len
/ 2;
1016 key
= ablkctx
->key
+ keylen
;
1017 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
1020 crypto_cipher_encrypt_one(cipher
, iv
, iv
);
1021 for (i
= 0; i
< round8
; i
++)
1022 gf128mul_x8_ble((le128
*)iv
, (le128
*)iv
);
1024 for (i
= 0; i
< (round
% 8); i
++)
1025 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
1028 crypto_cipher_decrypt_one(cipher
, iv
, iv
);
1033 static int chcr_update_cipher_iv(struct ablkcipher_request
*req
,
1034 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1036 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1037 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1038 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1041 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1042 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
1044 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
1045 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1046 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
1047 AES_BLOCK_SIZE
) + 1);
1048 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1049 ret
= chcr_update_tweak(req
, iv
, 0);
1050 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1052 /*Updated before sending last WR*/
1053 memcpy(iv
, req
->info
, AES_BLOCK_SIZE
);
1055 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1062 /* We need separate function for final iv because in rfc3686 Initial counter
1063 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1064 * for subsequent update requests
1067 static int chcr_final_cipher_iv(struct ablkcipher_request
*req
,
1068 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1070 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1071 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1072 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1075 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1076 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
1078 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1079 ret
= chcr_update_tweak(req
, iv
, 1);
1080 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1081 /*Already updated for Decrypt*/
1083 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1090 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
1091 unsigned char *input
, int err
)
1093 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1094 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1095 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1096 struct sk_buff
*skb
;
1097 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
1098 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1099 struct cipher_wr_param wrparam
;
1104 if (req
->nbytes
== reqctx
->processed
) {
1105 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1107 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->info
);
1112 bytes
= chcr_sg_ent_in_wr(reqctx
->srcsg
, reqctx
->dstsg
, 0,
1113 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1114 reqctx
->src_ofst
, reqctx
->dst_ofst
);
1115 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1116 bytes
= req
->nbytes
- reqctx
->processed
;
1118 bytes
= rounddown(bytes
, 16);
1120 /*CTR mode counter overfloa*/
1121 bytes
= req
->nbytes
- reqctx
->processed
;
1123 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1127 if (unlikely(bytes
== 0)) {
1128 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1130 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1140 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1141 CRYPTO_ALG_SUB_TYPE_CTR
)
1142 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1143 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
];
1145 wrparam
.bytes
= bytes
;
1146 skb
= create_cipher_wr(&wrparam
);
1148 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1152 skb
->dev
= u_ctx
->lldi
.ports
[0];
1153 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1155 reqctx
->last_req_len
= bytes
;
1156 reqctx
->processed
+= bytes
;
1159 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1161 req
->base
.complete(&req
->base
, err
);
1165 static int process_cipher(struct ablkcipher_request
*req
,
1167 struct sk_buff
**skb
,
1168 unsigned short op_type
)
1170 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1171 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
1172 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1173 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1174 struct cipher_wr_param wrparam
;
1175 int bytes
, err
= -EINVAL
;
1177 reqctx
->processed
= 0;
1180 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1181 (req
->nbytes
== 0) ||
1182 (req
->nbytes
% crypto_ablkcipher_blocksize(tfm
))) {
1183 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1184 ablkctx
->enckey_len
, req
->nbytes
, ivsize
);
1187 chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1188 if (req
->nbytes
< (SGE_MAX_WR_LEN
- (sizeof(struct chcr_wr
) +
1190 sizeof(struct cpl_rx_phys_dsgl
) +
1193 /* Can be sent as Imm*/
1194 unsigned int dnents
= 0, transhdr_len
, phys_dsgl
, kctx_len
;
1196 dnents
= sg_nents_xlen(req
->dst
, req
->nbytes
,
1197 CHCR_DST_SG_SIZE
, 0);
1198 phys_dsgl
= get_space_for_phys_dsgl(dnents
);
1199 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
1200 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
1201 reqctx
->imm
= (transhdr_len
+ IV
+ req
->nbytes
) <=
1203 bytes
= IV
+ req
->nbytes
;
1210 bytes
= chcr_sg_ent_in_wr(req
->src
, req
->dst
, 0,
1211 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1213 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1214 bytes
= req
->nbytes
- reqctx
->processed
;
1216 bytes
= rounddown(bytes
, 16);
1218 bytes
= req
->nbytes
;
1220 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1221 CRYPTO_ALG_SUB_TYPE_CTR
) {
1222 bytes
= adjust_ctr_overflow(req
->info
, bytes
);
1224 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1225 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1226 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1227 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->info
,
1228 CTR_RFC3686_IV_SIZE
);
1230 /* initialize counter portion of counter block */
1231 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1232 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1236 memcpy(reqctx
->iv
, req
->info
, IV
);
1238 if (unlikely(bytes
== 0)) {
1239 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1241 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1250 reqctx
->op
= op_type
;
1251 reqctx
->srcsg
= req
->src
;
1252 reqctx
->dstsg
= req
->dst
;
1253 reqctx
->src_ofst
= 0;
1254 reqctx
->dst_ofst
= 0;
1257 wrparam
.bytes
= bytes
;
1258 *skb
= create_cipher_wr(&wrparam
);
1260 err
= PTR_ERR(*skb
);
1263 reqctx
->processed
= bytes
;
1264 reqctx
->last_req_len
= bytes
;
1268 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1273 static int chcr_aes_encrypt(struct ablkcipher_request
*req
)
1275 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1276 struct sk_buff
*skb
= NULL
;
1277 int err
, isfull
= 0;
1278 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1280 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1281 c_ctx(tfm
)->tx_qidx
))) {
1283 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1287 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1288 &skb
, CHCR_ENCRYPT_OP
);
1291 skb
->dev
= u_ctx
->lldi
.ports
[0];
1292 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1294 return isfull
? -EBUSY
: -EINPROGRESS
;
1297 static int chcr_aes_decrypt(struct ablkcipher_request
*req
)
1299 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1300 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1301 struct sk_buff
*skb
= NULL
;
1302 int err
, isfull
= 0;
1304 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1305 c_ctx(tfm
)->tx_qidx
))) {
1307 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1311 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1312 &skb
, CHCR_DECRYPT_OP
);
1315 skb
->dev
= u_ctx
->lldi
.ports
[0];
1316 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1318 return isfull
? -EBUSY
: -EINPROGRESS
;
1321 static int chcr_device_init(struct chcr_context
*ctx
)
1323 struct uld_ctx
*u_ctx
= NULL
;
1324 struct adapter
*adap
;
1326 int txq_perchan
, txq_idx
, ntxq
;
1327 int err
= 0, rxq_perchan
, rxq_idx
;
1329 id
= smp_processor_id();
1331 u_ctx
= assign_chcr_device();
1333 pr_err("chcr device assignment fails\n");
1336 ctx
->dev
= u_ctx
->dev
;
1337 adap
= padap(ctx
->dev
);
1338 ntxq
= min_not_zero((unsigned int)u_ctx
->lldi
.nrxq
,
1339 adap
->vres
.ncrypto_fc
);
1340 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1341 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1342 rxq_idx
= ctx
->dev
->tx_channel_id
* rxq_perchan
;
1343 rxq_idx
+= id
% rxq_perchan
;
1344 txq_idx
= ctx
->dev
->tx_channel_id
* txq_perchan
;
1345 txq_idx
+= id
% txq_perchan
;
1346 spin_lock(&ctx
->dev
->lock_chcr_dev
);
1347 ctx
->rx_qidx
= rxq_idx
;
1348 ctx
->tx_qidx
= txq_idx
;
1349 ctx
->dev
->tx_channel_id
= !ctx
->dev
->tx_channel_id
;
1350 ctx
->dev
->rx_channel_id
= 0;
1351 spin_unlock(&ctx
->dev
->lock_chcr_dev
);
1357 static int chcr_cra_init(struct crypto_tfm
*tfm
)
1359 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1360 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1361 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1363 ablkctx
->sw_cipher
= crypto_alloc_skcipher(alg
->cra_name
, 0,
1364 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1365 if (IS_ERR(ablkctx
->sw_cipher
)) {
1366 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1367 return PTR_ERR(ablkctx
->sw_cipher
);
1370 if (get_cryptoalg_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_XTS
) {
1371 /* To update tweak*/
1372 ablkctx
->aes_generic
= crypto_alloc_cipher("aes-generic", 0, 0);
1373 if (IS_ERR(ablkctx
->aes_generic
)) {
1374 pr_err("failed to allocate aes cipher for tweak\n");
1375 return PTR_ERR(ablkctx
->aes_generic
);
1378 ablkctx
->aes_generic
= NULL
;
1380 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1381 return chcr_device_init(crypto_tfm_ctx(tfm
));
1384 static int chcr_rfc3686_init(struct crypto_tfm
*tfm
)
1386 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1387 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1388 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1390 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1391 * cannot be used as fallback in chcr_handle_cipher_response
1393 ablkctx
->sw_cipher
= crypto_alloc_skcipher("ctr(aes)", 0,
1394 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1395 if (IS_ERR(ablkctx
->sw_cipher
)) {
1396 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1397 return PTR_ERR(ablkctx
->sw_cipher
);
1399 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1400 return chcr_device_init(crypto_tfm_ctx(tfm
));
1404 static void chcr_cra_exit(struct crypto_tfm
*tfm
)
1406 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1407 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1409 crypto_free_skcipher(ablkctx
->sw_cipher
);
1410 if (ablkctx
->aes_generic
)
1411 crypto_free_cipher(ablkctx
->aes_generic
);
1414 static int get_alg_config(struct algo_param
*params
,
1415 unsigned int auth_size
)
1417 switch (auth_size
) {
1418 case SHA1_DIGEST_SIZE
:
1419 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1420 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1421 params
->result_size
= SHA1_DIGEST_SIZE
;
1423 case SHA224_DIGEST_SIZE
:
1424 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1425 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1426 params
->result_size
= SHA256_DIGEST_SIZE
;
1428 case SHA256_DIGEST_SIZE
:
1429 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1430 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1431 params
->result_size
= SHA256_DIGEST_SIZE
;
1433 case SHA384_DIGEST_SIZE
:
1434 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1435 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1436 params
->result_size
= SHA512_DIGEST_SIZE
;
1438 case SHA512_DIGEST_SIZE
:
1439 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1440 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1441 params
->result_size
= SHA512_DIGEST_SIZE
;
1444 pr_err("chcr : ERROR, unsupported digest size\n");
1450 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1452 crypto_free_shash(base_hash
);
1456 * create_hash_wr - Create hash work request
1457 * @req - Cipher req base
1459 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1460 struct hash_wr_param
*param
)
1462 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1463 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1464 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
1465 struct sk_buff
*skb
= NULL
;
1466 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
1467 struct chcr_wr
*chcr_req
;
1468 struct ulptx_sgl
*ulptx
;
1469 unsigned int nents
= 0, transhdr_len
;
1470 unsigned int temp
= 0;
1471 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1473 struct adapter
*adap
= padap(h_ctx(tfm
)->dev
);
1476 transhdr_len
= HASH_TRANSHDR_SIZE(param
->kctx_len
);
1477 req_ctx
->hctx_wr
.imm
= (transhdr_len
+ param
->bfr_len
+
1478 param
->sg_len
) <= SGE_MAX_WR_LEN
;
1479 nents
= sg_nents_xlen(req_ctx
->hctx_wr
.srcsg
, param
->sg_len
,
1480 CHCR_SRC_SG_SIZE
, req_ctx
->hctx_wr
.src_ofst
);
1481 nents
+= param
->bfr_len
? 1 : 0;
1482 transhdr_len
+= req_ctx
->hctx_wr
.imm
? roundup(param
->bfr_len
+
1483 param
->sg_len
, 16) : (sgl_len(nents
) * 8);
1484 transhdr_len
= roundup(transhdr_len
, 16);
1486 skb
= alloc_skb(transhdr_len
, flags
);
1488 return ERR_PTR(-ENOMEM
);
1489 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1491 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1492 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm
)->dev
->rx_channel_id
, 2, 0);
1493 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1495 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1496 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1497 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1498 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1499 chcr_req
->sec_cpl
.seqno_numivs
=
1500 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1501 param
->opad_needed
, 0);
1503 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1504 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1506 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1507 param
->alg_prm
.result_size
);
1509 if (param
->opad_needed
)
1510 memcpy(chcr_req
->key_ctx
.key
+
1511 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1512 CHCR_HASH_MAX_DIGEST_SIZE
),
1513 hmacctx
->opad
, param
->alg_prm
.result_size
);
1515 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1516 param
->alg_prm
.mk_size
, 0,
1519 sizeof(chcr_req
->key_ctx
)) >> 4));
1520 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1521 ulptx
= (struct ulptx_sgl
*)((u8
*)(chcr_req
+ 1) + param
->kctx_len
+
1523 if (param
->bfr_len
!= 0) {
1524 req_ctx
->hctx_wr
.dma_addr
=
1525 dma_map_single(&u_ctx
->lldi
.pdev
->dev
, req_ctx
->reqbfr
,
1526 param
->bfr_len
, DMA_TO_DEVICE
);
1527 if (dma_mapping_error(&u_ctx
->lldi
.pdev
->dev
,
1528 req_ctx
->hctx_wr
. dma_addr
)) {
1532 req_ctx
->hctx_wr
.dma_len
= param
->bfr_len
;
1534 req_ctx
->hctx_wr
.dma_addr
= 0;
1536 chcr_add_hash_src_ent(req
, ulptx
, param
);
1537 /* Request upto max wr size */
1538 temp
= param
->kctx_len
+ DUMMY_BYTES
+ (req_ctx
->hctx_wr
.imm
?
1539 (param
->sg_len
+ param
->bfr_len
) : 0);
1540 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1541 create_wreq(h_ctx(tfm
), chcr_req
, &req
->base
, req_ctx
->hctx_wr
.imm
,
1542 param
->hash_size
, transhdr_len
,
1544 req_ctx
->hctx_wr
.skb
= skb
;
1548 return ERR_PTR(error
);
1551 static int chcr_ahash_update(struct ahash_request
*req
)
1553 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1554 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1555 struct uld_ctx
*u_ctx
= NULL
;
1556 struct sk_buff
*skb
;
1557 u8 remainder
= 0, bs
;
1558 unsigned int nbytes
= req
->nbytes
;
1559 struct hash_wr_param params
;
1560 int error
, isfull
= 0;
1562 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1563 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1564 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1565 h_ctx(rtfm
)->tx_qidx
))) {
1567 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1571 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1572 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1573 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1575 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1576 + req_ctx
->reqlen
, nbytes
, 0);
1577 req_ctx
->reqlen
+= nbytes
;
1580 chcr_init_hctx_per_wr(req_ctx
);
1581 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1584 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1585 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1586 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1587 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1588 if (params
.sg_len
> req
->nbytes
)
1589 params
.sg_len
= req
->nbytes
;
1590 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
) -
1592 params
.opad_needed
= 0;
1595 params
.bfr_len
= req_ctx
->reqlen
;
1597 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1599 params
.hash_size
= params
.alg_prm
.result_size
;
1600 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1601 skb
= create_hash_wr(req
, ¶ms
);
1603 error
= PTR_ERR(skb
);
1607 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1610 swap(req_ctx
->reqbfr
, req_ctx
->skbfr
);
1611 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1612 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1615 req_ctx
->reqlen
= remainder
;
1616 skb
->dev
= u_ctx
->lldi
.ports
[0];
1617 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1620 return isfull
? -EBUSY
: -EINPROGRESS
;
1622 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1626 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1628 memset(bfr_ptr
, 0, bs
);
1631 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1633 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1636 static int chcr_ahash_final(struct ahash_request
*req
)
1638 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1639 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1640 struct hash_wr_param params
;
1641 struct sk_buff
*skb
;
1642 struct uld_ctx
*u_ctx
= NULL
;
1643 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1645 chcr_init_hctx_per_wr(req_ctx
);
1646 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1647 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1648 params
.opad_needed
= 1;
1650 params
.opad_needed
= 0;
1652 req_ctx
->hctx_wr
.isfinal
= 1;
1653 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1654 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1655 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1656 params
.opad_needed
= 1;
1657 params
.kctx_len
*= 2;
1659 params
.opad_needed
= 0;
1662 req_ctx
->hctx_wr
.result
= 1;
1663 params
.bfr_len
= req_ctx
->reqlen
;
1664 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1665 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1666 if (req_ctx
->reqlen
== 0) {
1667 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1671 params
.bfr_len
= bs
;
1674 params
.scmd1
= req_ctx
->data_len
;
1678 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1679 skb
= create_hash_wr(req
, ¶ms
);
1681 return PTR_ERR(skb
);
1682 req_ctx
->reqlen
= 0;
1683 skb
->dev
= u_ctx
->lldi
.ports
[0];
1684 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1686 return -EINPROGRESS
;
1689 static int chcr_ahash_finup(struct ahash_request
*req
)
1691 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1692 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1693 struct uld_ctx
*u_ctx
= NULL
;
1694 struct sk_buff
*skb
;
1695 struct hash_wr_param params
;
1697 int error
, isfull
= 0;
1699 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1700 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1702 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1703 h_ctx(rtfm
)->tx_qidx
))) {
1705 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1708 chcr_init_hctx_per_wr(req_ctx
);
1709 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1713 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1714 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1715 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1716 params
.kctx_len
*= 2;
1717 params
.opad_needed
= 1;
1719 params
.opad_needed
= 0;
1722 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1723 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1724 if (params
.sg_len
< req
->nbytes
) {
1725 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1726 params
.kctx_len
/= 2;
1727 params
.opad_needed
= 0;
1731 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
)
1733 params
.hash_size
= params
.alg_prm
.result_size
;
1738 params
.sg_len
= req
->nbytes
;
1739 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1740 params
.scmd1
= req_ctx
->data_len
+ req_ctx
->reqlen
+
1743 params
.bfr_len
= req_ctx
->reqlen
;
1744 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1745 req_ctx
->hctx_wr
.result
= 1;
1746 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1747 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1748 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1752 params
.bfr_len
= bs
;
1754 skb
= create_hash_wr(req
, ¶ms
);
1756 error
= PTR_ERR(skb
);
1759 req_ctx
->reqlen
= 0;
1760 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1761 skb
->dev
= u_ctx
->lldi
.ports
[0];
1762 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1765 return isfull
? -EBUSY
: -EINPROGRESS
;
1767 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1771 static int chcr_ahash_digest(struct ahash_request
*req
)
1773 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1774 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1775 struct uld_ctx
*u_ctx
= NULL
;
1776 struct sk_buff
*skb
;
1777 struct hash_wr_param params
;
1779 int error
, isfull
= 0;
1782 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1784 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1785 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1786 h_ctx(rtfm
)->tx_qidx
))) {
1788 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1792 chcr_init_hctx_per_wr(req_ctx
);
1793 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1797 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1798 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1799 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1800 params
.kctx_len
*= 2;
1801 params
.opad_needed
= 1;
1803 params
.opad_needed
= 0;
1805 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1806 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1807 if (params
.sg_len
< req
->nbytes
) {
1808 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1809 params
.kctx_len
/= 2;
1810 params
.opad_needed
= 0;
1815 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1816 params
.hash_size
= params
.alg_prm
.result_size
;
1818 params
.sg_len
= req
->nbytes
;
1819 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1822 params
.scmd1
= req
->nbytes
+ req_ctx
->data_len
;
1826 req_ctx
->hctx_wr
.result
= 1;
1827 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1828 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1830 if (req
->nbytes
== 0) {
1831 create_last_hash_block(req_ctx
->reqbfr
, bs
, 0);
1833 params
.bfr_len
= bs
;
1836 skb
= create_hash_wr(req
, ¶ms
);
1838 error
= PTR_ERR(skb
);
1841 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1842 skb
->dev
= u_ctx
->lldi
.ports
[0];
1843 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1845 return isfull
? -EBUSY
: -EINPROGRESS
;
1847 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1851 static int chcr_ahash_continue(struct ahash_request
*req
)
1853 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
1854 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
1855 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1856 struct uld_ctx
*u_ctx
= NULL
;
1857 struct sk_buff
*skb
;
1858 struct hash_wr_param params
;
1862 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1863 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1864 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1865 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1866 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1867 params
.kctx_len
*= 2;
1868 params
.opad_needed
= 1;
1870 params
.opad_needed
= 0;
1872 params
.sg_len
= chcr_hash_ent_in_wr(hctx_wr
->srcsg
, 0,
1873 HASH_SPACE_LEFT(params
.kctx_len
),
1875 if ((params
.sg_len
+ hctx_wr
->processed
) > req
->nbytes
)
1876 params
.sg_len
= req
->nbytes
- hctx_wr
->processed
;
1877 if (!hctx_wr
->result
||
1878 ((params
.sg_len
+ hctx_wr
->processed
) < req
->nbytes
)) {
1879 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1880 params
.kctx_len
/= 2;
1881 params
.opad_needed
= 0;
1885 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1886 params
.hash_size
= params
.alg_prm
.result_size
;
1891 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1892 params
.scmd1
= reqctx
->data_len
+ params
.sg_len
;
1895 reqctx
->data_len
+= params
.sg_len
;
1896 skb
= create_hash_wr(req
, ¶ms
);
1898 error
= PTR_ERR(skb
);
1901 hctx_wr
->processed
+= params
.sg_len
;
1902 skb
->dev
= u_ctx
->lldi
.ports
[0];
1903 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1910 static inline void chcr_handle_ahash_resp(struct ahash_request
*req
,
1911 unsigned char *input
,
1914 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
1915 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
1916 int digestsize
, updated_digestsize
;
1917 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1918 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
1922 digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
1923 updated_digestsize
= digestsize
;
1924 if (digestsize
== SHA224_DIGEST_SIZE
)
1925 updated_digestsize
= SHA256_DIGEST_SIZE
;
1926 else if (digestsize
== SHA384_DIGEST_SIZE
)
1927 updated_digestsize
= SHA512_DIGEST_SIZE
;
1929 if (hctx_wr
->dma_addr
) {
1930 dma_unmap_single(&u_ctx
->lldi
.pdev
->dev
, hctx_wr
->dma_addr
,
1931 hctx_wr
->dma_len
, DMA_TO_DEVICE
);
1932 hctx_wr
->dma_addr
= 0;
1934 if (hctx_wr
->isfinal
|| ((hctx_wr
->processed
+ reqctx
->reqlen
) ==
1936 if (hctx_wr
->result
== 1) {
1937 hctx_wr
->result
= 0;
1938 memcpy(req
->result
, input
+ sizeof(struct cpl_fw6_pld
),
1941 memcpy(reqctx
->partial_hash
,
1942 input
+ sizeof(struct cpl_fw6_pld
),
1943 updated_digestsize
);
1948 memcpy(reqctx
->partial_hash
, input
+ sizeof(struct cpl_fw6_pld
),
1949 updated_digestsize
);
1951 err
= chcr_ahash_continue(req
);
1956 if (hctx_wr
->is_sg_map
)
1957 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1961 req
->base
.complete(&req
->base
, err
);
1965 * chcr_handle_resp - Unmap the DMA buffers associated with the request
1966 * @req: crypto request
1968 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
1971 struct crypto_tfm
*tfm
= req
->tfm
;
1972 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1973 struct adapter
*adap
= padap(ctx
->dev
);
1975 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
1976 case CRYPTO_ALG_TYPE_AEAD
:
1977 chcr_handle_aead_resp(aead_request_cast(req
), input
, err
);
1980 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
1981 err
= chcr_handle_cipher_resp(ablkcipher_request_cast(req
),
1985 case CRYPTO_ALG_TYPE_AHASH
:
1986 chcr_handle_ahash_resp(ahash_request_cast(req
), input
, err
);
1988 atomic_inc(&adap
->chcr_stats
.complete
);
1991 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
1993 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1994 struct chcr_ahash_req_ctx
*state
= out
;
1996 state
->reqlen
= req_ctx
->reqlen
;
1997 state
->data_len
= req_ctx
->data_len
;
1998 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
1999 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
2000 CHCR_HASH_MAX_DIGEST_SIZE
);
2001 chcr_init_hctx_per_wr(state
);
2005 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
2007 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2008 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
2010 req_ctx
->reqlen
= state
->reqlen
;
2011 req_ctx
->data_len
= state
->data_len
;
2012 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2013 req_ctx
->skbfr
= req_ctx
->bfr2
;
2014 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
2015 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
2016 CHCR_HASH_MAX_DIGEST_SIZE
);
2017 chcr_init_hctx_per_wr(req_ctx
);
2021 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2022 unsigned int keylen
)
2024 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
2025 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2026 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2027 unsigned int i
, err
= 0, updated_digestsize
;
2029 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
2031 /* use the key to calculate the ipad and opad. ipad will sent with the
2032 * first request's data. opad will be sent with the final hash result
2033 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2035 shash
->tfm
= hmacctx
->base_hash
;
2036 shash
->flags
= crypto_shash_get_flags(hmacctx
->base_hash
);
2038 err
= crypto_shash_digest(shash
, key
, keylen
,
2042 keylen
= digestsize
;
2044 memcpy(hmacctx
->ipad
, key
, keylen
);
2046 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
2047 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
2049 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
2050 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
2051 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
2054 updated_digestsize
= digestsize
;
2055 if (digestsize
== SHA224_DIGEST_SIZE
)
2056 updated_digestsize
= SHA256_DIGEST_SIZE
;
2057 else if (digestsize
== SHA384_DIGEST_SIZE
)
2058 updated_digestsize
= SHA512_DIGEST_SIZE
;
2059 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
2060 hmacctx
->ipad
, digestsize
);
2063 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
2065 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
2066 hmacctx
->opad
, digestsize
);
2069 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
2074 static int chcr_aes_xts_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
2075 unsigned int key_len
)
2077 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
2078 unsigned short context_size
= 0;
2081 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
2085 memcpy(ablkctx
->key
, key
, key_len
);
2086 ablkctx
->enckey_len
= key_len
;
2087 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
2088 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
2089 ablkctx
->key_ctx_hdr
=
2090 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
2091 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
2092 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
2093 CHCR_KEYCTX_NO_KEY
, 1,
2095 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
2098 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2099 ablkctx
->enckey_len
= 0;
2104 static int chcr_sha_init(struct ahash_request
*areq
)
2106 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2107 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2108 int digestsize
= crypto_ahash_digestsize(tfm
);
2110 req_ctx
->data_len
= 0;
2111 req_ctx
->reqlen
= 0;
2112 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2113 req_ctx
->skbfr
= req_ctx
->bfr2
;
2114 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
2119 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
2121 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2122 sizeof(struct chcr_ahash_req_ctx
));
2123 return chcr_device_init(crypto_tfm_ctx(tfm
));
2126 static int chcr_hmac_init(struct ahash_request
*areq
)
2128 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2129 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
2130 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(rtfm
));
2131 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
2132 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2134 chcr_sha_init(areq
);
2135 req_ctx
->data_len
= bs
;
2136 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2137 if (digestsize
== SHA224_DIGEST_SIZE
)
2138 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2139 SHA256_DIGEST_SIZE
);
2140 else if (digestsize
== SHA384_DIGEST_SIZE
)
2141 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2142 SHA512_DIGEST_SIZE
);
2144 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2150 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
2152 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2153 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2154 unsigned int digestsize
=
2155 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
2157 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2158 sizeof(struct chcr_ahash_req_ctx
));
2159 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
2160 if (IS_ERR(hmacctx
->base_hash
))
2161 return PTR_ERR(hmacctx
->base_hash
);
2162 return chcr_device_init(crypto_tfm_ctx(tfm
));
2165 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
2167 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2168 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2170 if (hmacctx
->base_hash
) {
2171 chcr_free_shash(hmacctx
->base_hash
);
2172 hmacctx
->base_hash
= NULL
;
2176 inline void chcr_aead_common_exit(struct aead_request
*req
)
2178 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2179 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2180 struct uld_ctx
*u_ctx
= ULD_CTX(a_ctx(tfm
));
2182 chcr_aead_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
, reqctx
->op
);
2185 static int chcr_aead_common_init(struct aead_request
*req
)
2187 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2188 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2189 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2190 unsigned int authsize
= crypto_aead_authsize(tfm
);
2191 int error
= -EINVAL
;
2193 /* validate key size */
2194 if (aeadctx
->enckey_len
== 0)
2196 if (reqctx
->op
&& req
->cryptlen
< authsize
)
2199 reqctx
->scratch_pad
= reqctx
->iv
+ IV
;
2201 reqctx
->scratch_pad
= NULL
;
2203 error
= chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm
))->lldi
.pdev
->dev
, req
,
2209 reqctx
->aad_nents
= sg_nents_xlen(req
->src
, req
->assoclen
,
2210 CHCR_SRC_SG_SIZE
, 0);
2211 reqctx
->src_nents
= sg_nents_xlen(req
->src
, req
->cryptlen
,
2212 CHCR_SRC_SG_SIZE
, req
->assoclen
);
2218 static int chcr_aead_need_fallback(struct aead_request
*req
, int dst_nents
,
2219 int aadmax
, int wrlen
,
2220 unsigned short op_type
)
2222 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
2224 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
2225 dst_nents
> MAX_DSGL_ENT
||
2226 (req
->assoclen
> aadmax
) ||
2227 (wrlen
> SGE_MAX_WR_LEN
))
2232 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
2234 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2235 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2236 struct aead_request
*subreq
= aead_request_ctx(req
);
2238 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
2239 aead_request_set_callback(subreq
, req
->base
.flags
,
2240 req
->base
.complete
, req
->base
.data
);
2241 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
2243 aead_request_set_ad(subreq
, req
->assoclen
);
2244 return op_type
? crypto_aead_decrypt(subreq
) :
2245 crypto_aead_encrypt(subreq
);
2248 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
2252 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2253 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2254 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2255 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2256 struct sk_buff
*skb
= NULL
;
2257 struct chcr_wr
*chcr_req
;
2258 struct cpl_rx_phys_dsgl
*phys_cpl
;
2259 struct ulptx_sgl
*ulptx
;
2260 unsigned int transhdr_len
;
2261 unsigned int dst_size
= 0, temp
, subtype
= get_aead_subtype(tfm
);
2262 unsigned int kctx_len
= 0, dnents
;
2263 unsigned int assoclen
= req
->assoclen
;
2264 unsigned int authsize
= crypto_aead_authsize(tfm
);
2265 int error
= -EINVAL
;
2267 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2269 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2271 if (req
->cryptlen
== 0)
2275 error
= chcr_aead_common_init(req
);
2277 return ERR_PTR(error
);
2279 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
||
2280 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2283 reqctx
->aad_nents
= 0;
2285 dnents
= sg_nents_xlen(req
->dst
, assoclen
, CHCR_DST_SG_SIZE
, 0);
2286 dnents
+= sg_nents_xlen(req
->dst
, req
->cryptlen
+
2287 (reqctx
->op
? -authsize
: authsize
), CHCR_DST_SG_SIZE
,
2289 dnents
+= MIN_AUTH_SG
; // For IV
2291 dst_size
= get_space_for_phys_dsgl(dnents
);
2292 kctx_len
= (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx
->key_ctx_hdr
)) << 4)
2293 - sizeof(chcr_req
->key_ctx
);
2294 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2295 reqctx
->imm
= (transhdr_len
+ assoclen
+ IV
+ req
->cryptlen
) <
2297 temp
= reqctx
->imm
? roundup(assoclen
+ IV
+ req
->cryptlen
, 16)
2298 : (sgl_len(reqctx
->src_nents
+ reqctx
->aad_nents
2300 transhdr_len
+= temp
;
2301 transhdr_len
= roundup(transhdr_len
, 16);
2303 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2304 transhdr_len
, reqctx
->op
)) {
2305 atomic_inc(&adap
->chcr_stats
.fallback
);
2306 chcr_aead_common_exit(req
);
2307 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2309 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
2315 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2317 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2320 * Input order is AAD,IV and Payload. where IV should be included as
2321 * the part of authdata. All other fields should be filled according
2322 * to the hardware spec
2324 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2325 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm
)->dev
->rx_channel_id
, 2,
2327 chcr_req
->sec_cpl
.pldlen
= htonl(assoclen
+ IV
+ req
->cryptlen
);
2328 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2329 assoclen
? 1 : 0, assoclen
,
2331 (temp
& 0x1F0) >> 4);
2332 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2334 null
? 0 : assoclen
+ IV
+ 1,
2336 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
||
2337 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
)
2338 temp
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
2340 temp
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
2341 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
,
2342 (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2344 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2346 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2349 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2350 if (reqctx
->op
== CHCR_ENCRYPT_OP
||
2351 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2352 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
)
2353 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2354 aeadctx
->enckey_len
);
2356 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2357 aeadctx
->enckey_len
);
2359 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2360 actx
->h_iopad
, kctx_len
- roundup(aeadctx
->enckey_len
, 16));
2361 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2362 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2363 memcpy(reqctx
->iv
, aeadctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
2364 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
2365 CTR_RFC3686_IV_SIZE
);
2366 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
2367 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
2369 memcpy(reqctx
->iv
, req
->iv
, IV
);
2371 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2372 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
2373 chcr_add_aead_dst_ent(req
, phys_cpl
, assoclen
, qid
);
2374 chcr_add_aead_src_ent(req
, ulptx
, assoclen
);
2375 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2376 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+
2377 kctx_len
+ (reqctx
->imm
? (assoclen
+ IV
+ req
->cryptlen
) : 0);
2378 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
2379 transhdr_len
, temp
, 0);
2384 chcr_aead_common_exit(req
);
2386 return ERR_PTR(error
);
2389 int chcr_aead_dma_map(struct device
*dev
,
2390 struct aead_request
*req
,
2391 unsigned short op_type
)
2394 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2395 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2396 unsigned int authsize
= crypto_aead_authsize(tfm
);
2399 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2400 -authsize
: authsize
);
2401 if (!req
->cryptlen
|| !dst_size
)
2403 reqctx
->iv_dma
= dma_map_single(dev
, reqctx
->iv
, (IV
+ reqctx
->b0_len
),
2405 if (dma_mapping_error(dev
, reqctx
->iv_dma
))
2408 reqctx
->b0_dma
= reqctx
->iv_dma
+ IV
;
2411 if (req
->src
== req
->dst
) {
2412 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2417 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2421 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2424 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2432 dma_unmap_single(dev
, reqctx
->iv_dma
, IV
, DMA_BIDIRECTIONAL
);
2436 void chcr_aead_dma_unmap(struct device
*dev
,
2437 struct aead_request
*req
,
2438 unsigned short op_type
)
2440 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2441 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2442 unsigned int authsize
= crypto_aead_authsize(tfm
);
2445 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2446 -authsize
: authsize
);
2447 if (!req
->cryptlen
|| !dst_size
)
2450 dma_unmap_single(dev
, reqctx
->iv_dma
, (IV
+ reqctx
->b0_len
),
2452 if (req
->src
== req
->dst
) {
2453 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2456 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2458 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2463 void chcr_add_aead_src_ent(struct aead_request
*req
,
2464 struct ulptx_sgl
*ulptx
,
2465 unsigned int assoclen
)
2467 struct ulptx_walk ulp_walk
;
2468 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2471 u8
*buf
= (u8
*)ulptx
;
2473 if (reqctx
->b0_len
) {
2474 memcpy(buf
, reqctx
->scratch_pad
, reqctx
->b0_len
);
2475 buf
+= reqctx
->b0_len
;
2477 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2480 memcpy(buf
, reqctx
->iv
, IV
);
2482 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2483 buf
, req
->cryptlen
, req
->assoclen
);
2485 ulptx_walk_init(&ulp_walk
, ulptx
);
2487 ulptx_walk_add_page(&ulp_walk
, reqctx
->b0_len
,
2489 ulptx_walk_add_sg(&ulp_walk
, req
->src
, assoclen
, 0);
2490 ulptx_walk_add_page(&ulp_walk
, IV
, &reqctx
->iv_dma
);
2491 ulptx_walk_add_sg(&ulp_walk
, req
->src
, req
->cryptlen
,
2493 ulptx_walk_end(&ulp_walk
);
2497 void chcr_add_aead_dst_ent(struct aead_request
*req
,
2498 struct cpl_rx_phys_dsgl
*phys_cpl
,
2499 unsigned int assoclen
,
2502 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2503 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2504 struct dsgl_walk dsgl_walk
;
2505 unsigned int authsize
= crypto_aead_authsize(tfm
);
2508 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2510 dsgl_walk_add_page(&dsgl_walk
, reqctx
->b0_len
, &reqctx
->b0_dma
);
2511 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, assoclen
, 0);
2512 dsgl_walk_add_page(&dsgl_walk
, IV
, &reqctx
->iv_dma
);
2513 temp
= req
->cryptlen
+ (reqctx
->op
? -authsize
: authsize
);
2514 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, temp
, req
->assoclen
);
2515 dsgl_walk_end(&dsgl_walk
, qid
);
2518 void chcr_add_cipher_src_ent(struct ablkcipher_request
*req
,
2520 struct cipher_wr_param
*wrparam
)
2522 struct ulptx_walk ulp_walk
;
2523 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2526 memcpy(buf
, reqctx
->iv
, IV
);
2529 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2530 buf
, wrparam
->bytes
, reqctx
->processed
);
2532 ulptx_walk_init(&ulp_walk
, (struct ulptx_sgl
*)buf
);
2533 ulptx_walk_add_sg(&ulp_walk
, reqctx
->srcsg
, wrparam
->bytes
,
2535 reqctx
->srcsg
= ulp_walk
.last_sg
;
2536 reqctx
->src_ofst
= ulp_walk
.last_sg_len
;
2537 ulptx_walk_end(&ulp_walk
);
2541 void chcr_add_cipher_dst_ent(struct ablkcipher_request
*req
,
2542 struct cpl_rx_phys_dsgl
*phys_cpl
,
2543 struct cipher_wr_param
*wrparam
,
2546 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2547 struct dsgl_walk dsgl_walk
;
2549 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2550 dsgl_walk_add_sg(&dsgl_walk
, reqctx
->dstsg
, wrparam
->bytes
,
2552 reqctx
->dstsg
= dsgl_walk
.last_sg
;
2553 reqctx
->dst_ofst
= dsgl_walk
.last_sg_len
;
2555 dsgl_walk_end(&dsgl_walk
, qid
);
2558 void chcr_add_hash_src_ent(struct ahash_request
*req
,
2559 struct ulptx_sgl
*ulptx
,
2560 struct hash_wr_param
*param
)
2562 struct ulptx_walk ulp_walk
;
2563 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2565 if (reqctx
->hctx_wr
.imm
) {
2566 u8
*buf
= (u8
*)ulptx
;
2568 if (param
->bfr_len
) {
2569 memcpy(buf
, reqctx
->reqbfr
, param
->bfr_len
);
2570 buf
+= param
->bfr_len
;
2573 sg_pcopy_to_buffer(reqctx
->hctx_wr
.srcsg
,
2574 sg_nents(reqctx
->hctx_wr
.srcsg
), buf
,
2577 ulptx_walk_init(&ulp_walk
, ulptx
);
2579 ulptx_walk_add_page(&ulp_walk
, param
->bfr_len
,
2580 &reqctx
->hctx_wr
.dma_addr
);
2581 ulptx_walk_add_sg(&ulp_walk
, reqctx
->hctx_wr
.srcsg
,
2582 param
->sg_len
, reqctx
->hctx_wr
.src_ofst
);
2583 reqctx
->hctx_wr
.srcsg
= ulp_walk
.last_sg
;
2584 reqctx
->hctx_wr
.src_ofst
= ulp_walk
.last_sg_len
;
2585 ulptx_walk_end(&ulp_walk
);
2589 int chcr_hash_dma_map(struct device
*dev
,
2590 struct ahash_request
*req
)
2592 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2597 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2601 req_ctx
->hctx_wr
.is_sg_map
= 1;
2605 void chcr_hash_dma_unmap(struct device
*dev
,
2606 struct ahash_request
*req
)
2608 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2613 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2615 req_ctx
->hctx_wr
.is_sg_map
= 0;
2619 int chcr_cipher_dma_map(struct device
*dev
,
2620 struct ablkcipher_request
*req
)
2624 if (req
->src
== req
->dst
) {
2625 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2630 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2634 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2637 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2648 void chcr_cipher_dma_unmap(struct device
*dev
,
2649 struct ablkcipher_request
*req
)
2651 if (req
->src
== req
->dst
) {
2652 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2655 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2657 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2662 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2666 memset(block
, 0, csize
);
2671 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2674 data
= cpu_to_be32(msglen
);
2675 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2680 static void generate_b0(struct aead_request
*req
,
2681 struct chcr_aead_ctx
*aeadctx
,
2682 unsigned short op_type
)
2684 unsigned int l
, lp
, m
;
2686 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2687 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2688 u8
*b0
= reqctx
->scratch_pad
;
2690 m
= crypto_aead_authsize(aead
);
2692 memcpy(b0
, reqctx
->iv
, 16);
2697 /* set m, bits 3-5 */
2698 *b0
|= (8 * ((m
- 2) / 2));
2700 /* set adata, bit 6, if associated data is used */
2703 rc
= set_msg_len(b0
+ 16 - l
,
2704 (op_type
== CHCR_DECRYPT_OP
) ?
2705 req
->cryptlen
- m
: req
->cryptlen
, l
);
2708 static inline int crypto_ccm_check_iv(const u8
*iv
)
2710 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2711 if (iv
[0] < 1 || iv
[0] > 7)
2717 static int ccm_format_packet(struct aead_request
*req
,
2718 struct chcr_aead_ctx
*aeadctx
,
2719 unsigned int sub_type
,
2720 unsigned short op_type
,
2721 unsigned int assoclen
)
2723 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2726 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2728 memcpy(reqctx
->iv
+ 1, &aeadctx
->salt
[0], 3);
2729 memcpy(reqctx
->iv
+ 4, req
->iv
, 8);
2730 memset(reqctx
->iv
+ 12, 0, 4);
2732 memcpy(reqctx
->iv
, req
->iv
, 16);
2735 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2738 generate_b0(req
, aeadctx
, op_type
);
2739 /* zero the ctr value */
2740 memset(reqctx
->iv
+ 15 - reqctx
->iv
[0], 0, reqctx
->iv
[0] + 1);
2744 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2745 unsigned int dst_size
,
2746 struct aead_request
*req
,
2747 unsigned short op_type
)
2749 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2750 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2751 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2752 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2753 unsigned int c_id
= a_ctx(tfm
)->dev
->rx_channel_id
;
2754 unsigned int ccm_xtra
;
2755 unsigned char tag_offset
= 0, auth_offset
= 0;
2756 unsigned int assoclen
;
2758 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2759 assoclen
= req
->assoclen
- 8;
2761 assoclen
= req
->assoclen
;
2762 ccm_xtra
= CCM_B0_SIZE
+
2763 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2765 auth_offset
= req
->cryptlen
?
2766 (assoclen
+ IV
+ 1 + ccm_xtra
) : 0;
2767 if (op_type
== CHCR_DECRYPT_OP
) {
2768 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2769 tag_offset
= crypto_aead_authsize(tfm
);
2775 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(c_id
,
2776 2, assoclen
+ 1 + ccm_xtra
);
2778 htonl(assoclen
+ IV
+ req
->cryptlen
+ ccm_xtra
);
2779 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2780 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2781 1, assoclen
+ ccm_xtra
, assoclen
2782 + IV
+ 1 + ccm_xtra
, 0);
2784 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2785 auth_offset
, tag_offset
,
2786 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2787 crypto_aead_authsize(tfm
));
2788 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2789 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2790 cipher_mode
, mac_mode
,
2791 aeadctx
->hmac_ctrl
, IV
>> 1);
2793 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2797 static int aead_ccm_validate_input(unsigned short op_type
,
2798 struct aead_request
*req
,
2799 struct chcr_aead_ctx
*aeadctx
,
2800 unsigned int sub_type
)
2802 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2803 if (crypto_ccm_check_iv(req
->iv
)) {
2804 pr_err("CCM: IV check fails\n");
2808 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
2809 pr_err("RFC4309: Invalid AAD length %d\n",
2817 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
2821 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2822 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2823 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2824 struct sk_buff
*skb
= NULL
;
2825 struct chcr_wr
*chcr_req
;
2826 struct cpl_rx_phys_dsgl
*phys_cpl
;
2827 struct ulptx_sgl
*ulptx
;
2828 unsigned int transhdr_len
;
2829 unsigned int dst_size
= 0, kctx_len
, dnents
, temp
;
2830 unsigned int sub_type
, assoclen
= req
->assoclen
;
2831 unsigned int authsize
= crypto_aead_authsize(tfm
);
2832 int error
= -EINVAL
;
2833 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2835 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2837 sub_type
= get_aead_subtype(tfm
);
2838 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2840 reqctx
->b0_len
= CCM_B0_SIZE
+ (assoclen
? CCM_AAD_FIELD_SIZE
: 0);
2841 error
= chcr_aead_common_init(req
);
2843 return ERR_PTR(error
);
2845 error
= aead_ccm_validate_input(reqctx
->op
, req
, aeadctx
, sub_type
);
2848 dnents
= sg_nents_xlen(req
->dst
, assoclen
, CHCR_DST_SG_SIZE
, 0);
2849 dnents
+= sg_nents_xlen(req
->dst
, req
->cryptlen
2850 + (reqctx
->op
? -authsize
: authsize
),
2851 CHCR_DST_SG_SIZE
, req
->assoclen
);
2852 dnents
+= MIN_CCM_SG
; // For IV and B0
2853 dst_size
= get_space_for_phys_dsgl(dnents
);
2854 kctx_len
= roundup(aeadctx
->enckey_len
, 16) * 2;
2855 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2856 reqctx
->imm
= (transhdr_len
+ assoclen
+ IV
+ req
->cryptlen
+
2857 reqctx
->b0_len
) <= SGE_MAX_WR_LEN
;
2858 temp
= reqctx
->imm
? roundup(assoclen
+ IV
+ req
->cryptlen
+
2859 reqctx
->b0_len
, 16) :
2860 (sgl_len(reqctx
->src_nents
+ reqctx
->aad_nents
+
2862 transhdr_len
+= temp
;
2863 transhdr_len
= roundup(transhdr_len
, 16);
2865 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
-
2866 reqctx
->b0_len
, transhdr_len
, reqctx
->op
)) {
2867 atomic_inc(&adap
->chcr_stats
.fallback
);
2868 chcr_aead_common_exit(req
);
2869 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2871 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
2878 chcr_req
= (struct chcr_wr
*) __skb_put_zero(skb
, transhdr_len
);
2880 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, reqctx
->op
);
2882 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2883 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2884 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2885 aeadctx
->key
, aeadctx
->enckey_len
);
2887 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2888 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
2889 error
= ccm_format_packet(req
, aeadctx
, sub_type
, reqctx
->op
, assoclen
);
2892 chcr_add_aead_dst_ent(req
, phys_cpl
, assoclen
, qid
);
2893 chcr_add_aead_src_ent(req
, ulptx
, assoclen
);
2895 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2896 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+
2897 kctx_len
+ (reqctx
->imm
? (assoclen
+ IV
+ req
->cryptlen
+
2898 reqctx
->b0_len
) : 0);
2899 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, 0,
2900 transhdr_len
, temp
, 0);
2907 chcr_aead_common_exit(req
);
2908 return ERR_PTR(error
);
2911 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
2915 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2916 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2917 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2918 struct sk_buff
*skb
= NULL
;
2919 struct chcr_wr
*chcr_req
;
2920 struct cpl_rx_phys_dsgl
*phys_cpl
;
2921 struct ulptx_sgl
*ulptx
;
2922 unsigned int transhdr_len
, dnents
= 0;
2923 unsigned int dst_size
= 0, temp
= 0, kctx_len
, assoclen
= req
->assoclen
;
2924 unsigned int authsize
= crypto_aead_authsize(tfm
);
2925 int error
= -EINVAL
;
2926 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2928 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2930 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
2931 assoclen
= req
->assoclen
- 8;
2934 error
= chcr_aead_common_init(req
);
2936 return ERR_PTR(error
);
2937 dnents
= sg_nents_xlen(req
->dst
, assoclen
, CHCR_DST_SG_SIZE
, 0);
2938 dnents
+= sg_nents_xlen(req
->dst
, req
->cryptlen
+
2939 (reqctx
->op
? -authsize
: authsize
),
2940 CHCR_DST_SG_SIZE
, req
->assoclen
);
2941 dnents
+= MIN_GCM_SG
; // For IV
2942 dst_size
= get_space_for_phys_dsgl(dnents
);
2943 kctx_len
= roundup(aeadctx
->enckey_len
, 16) + AEAD_H_SIZE
;
2944 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2945 reqctx
->imm
= (transhdr_len
+ assoclen
+ IV
+ req
->cryptlen
) <=
2947 temp
= reqctx
->imm
? roundup(assoclen
+ IV
+ req
->cryptlen
, 16) :
2948 (sgl_len(reqctx
->src_nents
+
2949 reqctx
->aad_nents
+ MIN_GCM_SG
) * 8);
2950 transhdr_len
+= temp
;
2951 transhdr_len
= roundup(transhdr_len
, 16);
2952 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2953 transhdr_len
, reqctx
->op
)) {
2955 atomic_inc(&adap
->chcr_stats
.fallback
);
2956 chcr_aead_common_exit(req
);
2957 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2959 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
2965 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2967 //Offset of tag from end
2968 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2969 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
2970 a_ctx(tfm
)->dev
->rx_channel_id
, 2,
2972 chcr_req
->sec_cpl
.pldlen
=
2973 htonl(assoclen
+ IV
+ req
->cryptlen
);
2974 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2975 assoclen
? 1 : 0, assoclen
,
2976 assoclen
+ IV
+ 1, 0);
2977 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
2978 FILL_SEC_CPL_AUTHINSERT(0, assoclen
+ IV
+ 1,
2980 chcr_req
->sec_cpl
.seqno_numivs
=
2981 FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, (reqctx
->op
==
2982 CHCR_ENCRYPT_OP
) ? 1 : 0,
2983 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
2984 CHCR_SCMD_AUTH_MODE_GHASH
,
2985 aeadctx
->hmac_ctrl
, IV
>> 1);
2986 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2988 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2989 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2990 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2991 GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
2993 /* prepare a 16 byte iv */
2994 /* S A L T | IV | 0x00000001 */
2995 if (get_aead_subtype(tfm
) ==
2996 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
2997 memcpy(reqctx
->iv
, aeadctx
->salt
, 4);
2998 memcpy(reqctx
->iv
+ 4, req
->iv
, GCM_RFC4106_IV_SIZE
);
3000 memcpy(reqctx
->iv
, req
->iv
, GCM_AES_IV_SIZE
);
3002 *((unsigned int *)(reqctx
->iv
+ 12)) = htonl(0x01);
3004 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3005 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
3007 chcr_add_aead_dst_ent(req
, phys_cpl
, assoclen
, qid
);
3008 chcr_add_aead_src_ent(req
, ulptx
, assoclen
);
3009 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3010 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+
3011 kctx_len
+ (reqctx
->imm
? (assoclen
+ IV
+ req
->cryptlen
) : 0);
3012 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
3013 transhdr_len
, temp
, reqctx
->verify
);
3018 chcr_aead_common_exit(req
);
3019 return ERR_PTR(error
);
3024 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
3026 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3027 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3029 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
3030 CRYPTO_ALG_NEED_FALLBACK
|
3032 if (IS_ERR(aeadctx
->sw_cipher
))
3033 return PTR_ERR(aeadctx
->sw_cipher
);
3034 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
3035 sizeof(struct aead_request
) +
3036 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
3037 return chcr_device_init(a_ctx(tfm
));
3040 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
3042 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3044 crypto_free_aead(aeadctx
->sw_cipher
);
3047 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
3048 unsigned int authsize
)
3050 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3052 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
3053 aeadctx
->mayverify
= VERIFY_HW
;
3054 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3056 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
3057 unsigned int authsize
)
3059 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3060 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
3062 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3063 * true for sha1. authsize == 12 condition should be before
3064 * authsize == (maxauth >> 1)
3066 if (authsize
== ICV_4
) {
3067 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3068 aeadctx
->mayverify
= VERIFY_HW
;
3069 } else if (authsize
== ICV_6
) {
3070 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3071 aeadctx
->mayverify
= VERIFY_HW
;
3072 } else if (authsize
== ICV_10
) {
3073 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3074 aeadctx
->mayverify
= VERIFY_HW
;
3075 } else if (authsize
== ICV_12
) {
3076 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3077 aeadctx
->mayverify
= VERIFY_HW
;
3078 } else if (authsize
== ICV_14
) {
3079 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3080 aeadctx
->mayverify
= VERIFY_HW
;
3081 } else if (authsize
== (maxauth
>> 1)) {
3082 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3083 aeadctx
->mayverify
= VERIFY_HW
;
3084 } else if (authsize
== maxauth
) {
3085 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3086 aeadctx
->mayverify
= VERIFY_HW
;
3088 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3089 aeadctx
->mayverify
= VERIFY_SW
;
3091 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3095 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
3097 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3101 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3102 aeadctx
->mayverify
= VERIFY_HW
;
3105 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3106 aeadctx
->mayverify
= VERIFY_HW
;
3109 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3110 aeadctx
->mayverify
= VERIFY_HW
;
3113 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3114 aeadctx
->mayverify
= VERIFY_HW
;
3117 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3118 aeadctx
->mayverify
= VERIFY_HW
;
3122 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3123 aeadctx
->mayverify
= VERIFY_SW
;
3127 crypto_tfm_set_flags((struct crypto_tfm
*) tfm
,
3128 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3131 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3134 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
3135 unsigned int authsize
)
3137 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3141 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3142 aeadctx
->mayverify
= VERIFY_HW
;
3145 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3146 aeadctx
->mayverify
= VERIFY_HW
;
3149 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3150 aeadctx
->mayverify
= VERIFY_HW
;
3153 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
3154 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3157 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3160 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
3161 unsigned int authsize
)
3163 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3167 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3168 aeadctx
->mayverify
= VERIFY_HW
;
3171 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3172 aeadctx
->mayverify
= VERIFY_HW
;
3175 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3176 aeadctx
->mayverify
= VERIFY_HW
;
3179 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3180 aeadctx
->mayverify
= VERIFY_HW
;
3183 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3184 aeadctx
->mayverify
= VERIFY_HW
;
3187 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3188 aeadctx
->mayverify
= VERIFY_HW
;
3191 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3192 aeadctx
->mayverify
= VERIFY_HW
;
3195 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
3196 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3199 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3202 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
3204 unsigned int keylen
)
3206 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3207 unsigned char ck_size
, mk_size
;
3208 int key_ctx_size
= 0;
3210 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) * 2;
3211 if (keylen
== AES_KEYSIZE_128
) {
3212 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3213 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
3214 } else if (keylen
== AES_KEYSIZE_192
) {
3215 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3216 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
3217 } else if (keylen
== AES_KEYSIZE_256
) {
3218 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3219 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
3221 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3222 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3223 aeadctx
->enckey_len
= 0;
3226 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
3228 memcpy(aeadctx
->key
, key
, keylen
);
3229 aeadctx
->enckey_len
= keylen
;
3234 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
3236 unsigned int keylen
)
3238 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3241 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3242 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3243 CRYPTO_TFM_REQ_MASK
);
3244 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3245 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3246 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3247 CRYPTO_TFM_RES_MASK
);
3250 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3253 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
3254 unsigned int keylen
)
3256 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3260 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3261 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3262 aeadctx
->enckey_len
= 0;
3265 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3266 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3267 CRYPTO_TFM_REQ_MASK
);
3268 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3269 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3270 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3271 CRYPTO_TFM_RES_MASK
);
3275 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
3276 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3279 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
3280 unsigned int keylen
)
3282 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3283 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
3284 struct crypto_cipher
*cipher
;
3285 unsigned int ck_size
;
3286 int ret
= 0, key_ctx_size
= 0;
3288 aeadctx
->enckey_len
= 0;
3289 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3290 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
3291 & CRYPTO_TFM_REQ_MASK
);
3292 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3293 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3294 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3295 CRYPTO_TFM_RES_MASK
);
3299 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3301 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
3302 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
3304 if (keylen
== AES_KEYSIZE_128
) {
3305 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3306 } else if (keylen
== AES_KEYSIZE_192
) {
3307 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3308 } else if (keylen
== AES_KEYSIZE_256
) {
3309 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3311 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
3312 CRYPTO_TFM_RES_BAD_KEY_LEN
);
3313 pr_err("GCM: Invalid key length %d\n", keylen
);
3318 memcpy(aeadctx
->key
, key
, keylen
);
3319 aeadctx
->enckey_len
= keylen
;
3320 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) +
3322 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
3323 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
3326 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3327 * It will go in key context
3329 cipher
= crypto_alloc_cipher("aes-generic", 0, 0);
3330 if (IS_ERR(cipher
)) {
3331 aeadctx
->enckey_len
= 0;
3336 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
3338 aeadctx
->enckey_len
= 0;
3341 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
3342 crypto_cipher_encrypt_one(cipher
, gctx
->ghash_h
, gctx
->ghash_h
);
3345 crypto_free_cipher(cipher
);
3350 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
3351 unsigned int keylen
)
3353 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3354 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3355 /* it contains auth and cipher key both*/
3356 struct crypto_authenc_keys keys
;
3357 unsigned int bs
, subtype
;
3358 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
3359 int err
= 0, i
, key_ctx_len
= 0;
3360 unsigned char ck_size
= 0;
3361 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
3362 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
3363 struct algo_param param
;
3367 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3368 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3369 & CRYPTO_TFM_REQ_MASK
);
3370 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3371 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3372 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3373 & CRYPTO_TFM_RES_MASK
);
3377 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3378 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3382 if (get_alg_config(¶m
, max_authsize
)) {
3383 pr_err("chcr : Unsupported digest size\n");
3386 subtype
= get_aead_subtype(authenc
);
3387 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3388 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3389 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3391 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3392 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3393 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3395 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3396 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3397 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3398 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3399 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3400 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3402 pr_err("chcr : Unsupported cipher key\n");
3406 /* Copy only encryption key. We use authkey to generate h(ipad) and
3407 * h(opad) so authkey is not needed again. authkeylen size have the
3408 * size of the hash digest size.
3410 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3411 aeadctx
->enckey_len
= keys
.enckeylen
;
3412 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3413 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3415 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3416 aeadctx
->enckey_len
<< 3);
3418 base_hash
= chcr_alloc_shash(max_authsize
);
3419 if (IS_ERR(base_hash
)) {
3420 pr_err("chcr : Base driver cannot be loaded\n");
3421 aeadctx
->enckey_len
= 0;
3422 memzero_explicit(&keys
, sizeof(keys
));
3426 SHASH_DESC_ON_STACK(shash
, base_hash
);
3428 shash
->tfm
= base_hash
;
3429 shash
->flags
= crypto_shash_get_flags(base_hash
);
3430 bs
= crypto_shash_blocksize(base_hash
);
3431 align
= KEYCTX_ALIGN_PAD(max_authsize
);
3432 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
3434 if (keys
.authkeylen
> bs
) {
3435 err
= crypto_shash_digest(shash
, keys
.authkey
,
3439 pr_err("chcr : Base driver cannot be loaded\n");
3442 keys
.authkeylen
= max_authsize
;
3444 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
3446 /* Compute the ipad-digest*/
3447 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3448 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3449 for (i
= 0; i
< bs
>> 2; i
++)
3450 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3452 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3455 /* Compute the opad-digest */
3456 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3457 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3458 for (i
= 0; i
< bs
>> 2; i
++)
3459 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3461 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3464 /* convert the ipad and opad digest to network order */
3465 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3466 chcr_change_order(o_ptr
, param
.result_size
);
3467 key_ctx_len
= sizeof(struct _key_ctx
) +
3468 roundup(keys
.enckeylen
, 16) +
3469 (param
.result_size
+ align
) * 2;
3470 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3471 0, 1, key_ctx_len
>> 4);
3472 actx
->auth_mode
= param
.auth_mode
;
3473 chcr_free_shash(base_hash
);
3475 memzero_explicit(&keys
, sizeof(keys
));
3479 aeadctx
->enckey_len
= 0;
3480 memzero_explicit(&keys
, sizeof(keys
));
3481 if (!IS_ERR(base_hash
))
3482 chcr_free_shash(base_hash
);
3486 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3487 const u8
*key
, unsigned int keylen
)
3489 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3490 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3491 struct crypto_authenc_keys keys
;
3493 /* it contains auth and cipher key both*/
3494 unsigned int subtype
;
3495 int key_ctx_len
= 0;
3496 unsigned char ck_size
= 0;
3498 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3499 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3500 & CRYPTO_TFM_REQ_MASK
);
3501 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3502 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3503 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3504 & CRYPTO_TFM_RES_MASK
);
3508 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3509 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3512 subtype
= get_aead_subtype(authenc
);
3513 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3514 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3515 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3517 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3518 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3519 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3521 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3522 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3523 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3524 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3525 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3526 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3528 pr_err("chcr : Unsupported cipher key %d\n", keys
.enckeylen
);
3531 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3532 aeadctx
->enckey_len
= keys
.enckeylen
;
3533 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3534 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3535 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3536 aeadctx
->enckey_len
<< 3);
3538 key_ctx_len
= sizeof(struct _key_ctx
) + roundup(keys
.enckeylen
, 16);
3540 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3541 0, key_ctx_len
>> 4);
3542 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3543 memzero_explicit(&keys
, sizeof(keys
));
3546 aeadctx
->enckey_len
= 0;
3547 memzero_explicit(&keys
, sizeof(keys
));
3551 static int chcr_aead_op(struct aead_request
*req
,
3553 create_wr_t create_wr_fn
)
3555 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3556 struct uld_ctx
*u_ctx
;
3557 struct sk_buff
*skb
;
3560 if (!a_ctx(tfm
)->dev
) {
3561 pr_err("chcr : %s : No crypto device.\n", __func__
);
3564 u_ctx
= ULD_CTX(a_ctx(tfm
));
3565 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3566 a_ctx(tfm
)->tx_qidx
)) {
3568 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3572 /* Form a WR from req */
3573 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[a_ctx(tfm
)->rx_qidx
], size
);
3575 if (IS_ERR(skb
) || !skb
)
3576 return PTR_ERR(skb
);
3578 skb
->dev
= u_ctx
->lldi
.ports
[0];
3579 set_wr_txq(skb
, CPL_PRIORITY_DATA
, a_ctx(tfm
)->tx_qidx
);
3581 return isfull
? -EBUSY
: -EINPROGRESS
;
3584 static int chcr_aead_encrypt(struct aead_request
*req
)
3586 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3587 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3589 reqctx
->verify
= VERIFY_HW
;
3590 reqctx
->op
= CHCR_ENCRYPT_OP
;
3592 switch (get_aead_subtype(tfm
)) {
3593 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3594 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3595 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3596 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3597 return chcr_aead_op(req
, 0, create_authenc_wr
);
3598 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3599 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3600 return chcr_aead_op(req
, 0, create_aead_ccm_wr
);
3602 return chcr_aead_op(req
, 0, create_gcm_wr
);
3606 static int chcr_aead_decrypt(struct aead_request
*req
)
3608 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3609 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3610 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3613 if (aeadctx
->mayverify
== VERIFY_SW
) {
3614 size
= crypto_aead_maxauthsize(tfm
);
3615 reqctx
->verify
= VERIFY_SW
;
3618 reqctx
->verify
= VERIFY_HW
;
3620 reqctx
->op
= CHCR_DECRYPT_OP
;
3621 switch (get_aead_subtype(tfm
)) {
3622 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3623 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3624 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3625 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3626 return chcr_aead_op(req
, size
, create_authenc_wr
);
3627 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3628 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3629 return chcr_aead_op(req
, size
, create_aead_ccm_wr
);
3631 return chcr_aead_op(req
, size
, create_gcm_wr
);
3635 static struct chcr_alg_template driver_algs
[] = {
3638 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3641 .cra_name
= "cbc(aes)",
3642 .cra_driver_name
= "cbc-aes-chcr",
3643 .cra_blocksize
= AES_BLOCK_SIZE
,
3644 .cra_init
= chcr_cra_init
,
3645 .cra_exit
= chcr_cra_exit
,
3646 .cra_u
.ablkcipher
= {
3647 .min_keysize
= AES_MIN_KEY_SIZE
,
3648 .max_keysize
= AES_MAX_KEY_SIZE
,
3649 .ivsize
= AES_BLOCK_SIZE
,
3650 .setkey
= chcr_aes_cbc_setkey
,
3651 .encrypt
= chcr_aes_encrypt
,
3652 .decrypt
= chcr_aes_decrypt
,
3657 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3660 .cra_name
= "xts(aes)",
3661 .cra_driver_name
= "xts-aes-chcr",
3662 .cra_blocksize
= AES_BLOCK_SIZE
,
3663 .cra_init
= chcr_cra_init
,
3665 .cra_u
.ablkcipher
= {
3666 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3667 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3668 .ivsize
= AES_BLOCK_SIZE
,
3669 .setkey
= chcr_aes_xts_setkey
,
3670 .encrypt
= chcr_aes_encrypt
,
3671 .decrypt
= chcr_aes_decrypt
,
3676 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3679 .cra_name
= "ctr(aes)",
3680 .cra_driver_name
= "ctr-aes-chcr",
3682 .cra_init
= chcr_cra_init
,
3683 .cra_exit
= chcr_cra_exit
,
3684 .cra_u
.ablkcipher
= {
3685 .min_keysize
= AES_MIN_KEY_SIZE
,
3686 .max_keysize
= AES_MAX_KEY_SIZE
,
3687 .ivsize
= AES_BLOCK_SIZE
,
3688 .setkey
= chcr_aes_ctr_setkey
,
3689 .encrypt
= chcr_aes_encrypt
,
3690 .decrypt
= chcr_aes_decrypt
,
3695 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
3696 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3699 .cra_name
= "rfc3686(ctr(aes))",
3700 .cra_driver_name
= "rfc3686-ctr-aes-chcr",
3702 .cra_init
= chcr_rfc3686_init
,
3703 .cra_exit
= chcr_cra_exit
,
3704 .cra_u
.ablkcipher
= {
3705 .min_keysize
= AES_MIN_KEY_SIZE
+
3706 CTR_RFC3686_NONCE_SIZE
,
3707 .max_keysize
= AES_MAX_KEY_SIZE
+
3708 CTR_RFC3686_NONCE_SIZE
,
3709 .ivsize
= CTR_RFC3686_IV_SIZE
,
3710 .setkey
= chcr_aes_rfc3686_setkey
,
3711 .encrypt
= chcr_aes_encrypt
,
3712 .decrypt
= chcr_aes_decrypt
,
3719 .type
= CRYPTO_ALG_TYPE_AHASH
,
3722 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3725 .cra_driver_name
= "sha1-chcr",
3726 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3731 .type
= CRYPTO_ALG_TYPE_AHASH
,
3734 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3736 .cra_name
= "sha256",
3737 .cra_driver_name
= "sha256-chcr",
3738 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3743 .type
= CRYPTO_ALG_TYPE_AHASH
,
3746 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3748 .cra_name
= "sha224",
3749 .cra_driver_name
= "sha224-chcr",
3750 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3755 .type
= CRYPTO_ALG_TYPE_AHASH
,
3758 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3760 .cra_name
= "sha384",
3761 .cra_driver_name
= "sha384-chcr",
3762 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3767 .type
= CRYPTO_ALG_TYPE_AHASH
,
3770 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3772 .cra_name
= "sha512",
3773 .cra_driver_name
= "sha512-chcr",
3774 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3780 .type
= CRYPTO_ALG_TYPE_HMAC
,
3783 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3785 .cra_name
= "hmac(sha1)",
3786 .cra_driver_name
= "hmac-sha1-chcr",
3787 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3792 .type
= CRYPTO_ALG_TYPE_HMAC
,
3795 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3797 .cra_name
= "hmac(sha224)",
3798 .cra_driver_name
= "hmac-sha224-chcr",
3799 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3804 .type
= CRYPTO_ALG_TYPE_HMAC
,
3807 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3809 .cra_name
= "hmac(sha256)",
3810 .cra_driver_name
= "hmac-sha256-chcr",
3811 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3816 .type
= CRYPTO_ALG_TYPE_HMAC
,
3819 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3821 .cra_name
= "hmac(sha384)",
3822 .cra_driver_name
= "hmac-sha384-chcr",
3823 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3828 .type
= CRYPTO_ALG_TYPE_HMAC
,
3831 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3833 .cra_name
= "hmac(sha512)",
3834 .cra_driver_name
= "hmac-sha512-chcr",
3835 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3839 /* Add AEAD Algorithms */
3841 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
3845 .cra_name
= "gcm(aes)",
3846 .cra_driver_name
= "gcm-aes-chcr",
3848 .cra_priority
= CHCR_AEAD_PRIORITY
,
3849 .cra_ctxsize
= sizeof(struct chcr_context
) +
3850 sizeof(struct chcr_aead_ctx
) +
3851 sizeof(struct chcr_gcm_ctx
),
3853 .ivsize
= GCM_AES_IV_SIZE
,
3854 .maxauthsize
= GHASH_DIGEST_SIZE
,
3855 .setkey
= chcr_gcm_setkey
,
3856 .setauthsize
= chcr_gcm_setauthsize
,
3860 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
3864 .cra_name
= "rfc4106(gcm(aes))",
3865 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
3867 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3868 .cra_ctxsize
= sizeof(struct chcr_context
) +
3869 sizeof(struct chcr_aead_ctx
) +
3870 sizeof(struct chcr_gcm_ctx
),
3873 .ivsize
= GCM_RFC4106_IV_SIZE
,
3874 .maxauthsize
= GHASH_DIGEST_SIZE
,
3875 .setkey
= chcr_gcm_setkey
,
3876 .setauthsize
= chcr_4106_4309_setauthsize
,
3880 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
3884 .cra_name
= "ccm(aes)",
3885 .cra_driver_name
= "ccm-aes-chcr",
3887 .cra_priority
= CHCR_AEAD_PRIORITY
,
3888 .cra_ctxsize
= sizeof(struct chcr_context
) +
3889 sizeof(struct chcr_aead_ctx
),
3892 .ivsize
= AES_BLOCK_SIZE
,
3893 .maxauthsize
= GHASH_DIGEST_SIZE
,
3894 .setkey
= chcr_aead_ccm_setkey
,
3895 .setauthsize
= chcr_ccm_setauthsize
,
3899 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
3903 .cra_name
= "rfc4309(ccm(aes))",
3904 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
3906 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3907 .cra_ctxsize
= sizeof(struct chcr_context
) +
3908 sizeof(struct chcr_aead_ctx
),
3912 .maxauthsize
= GHASH_DIGEST_SIZE
,
3913 .setkey
= chcr_aead_rfc4309_setkey
,
3914 .setauthsize
= chcr_4106_4309_setauthsize
,
3918 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3922 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
3924 "authenc-hmac-sha1-cbc-aes-chcr",
3925 .cra_blocksize
= AES_BLOCK_SIZE
,
3926 .cra_priority
= CHCR_AEAD_PRIORITY
,
3927 .cra_ctxsize
= sizeof(struct chcr_context
) +
3928 sizeof(struct chcr_aead_ctx
) +
3929 sizeof(struct chcr_authenc_ctx
),
3932 .ivsize
= AES_BLOCK_SIZE
,
3933 .maxauthsize
= SHA1_DIGEST_SIZE
,
3934 .setkey
= chcr_authenc_setkey
,
3935 .setauthsize
= chcr_authenc_setauthsize
,
3939 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3944 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
3946 "authenc-hmac-sha256-cbc-aes-chcr",
3947 .cra_blocksize
= AES_BLOCK_SIZE
,
3948 .cra_priority
= CHCR_AEAD_PRIORITY
,
3949 .cra_ctxsize
= sizeof(struct chcr_context
) +
3950 sizeof(struct chcr_aead_ctx
) +
3951 sizeof(struct chcr_authenc_ctx
),
3954 .ivsize
= AES_BLOCK_SIZE
,
3955 .maxauthsize
= SHA256_DIGEST_SIZE
,
3956 .setkey
= chcr_authenc_setkey
,
3957 .setauthsize
= chcr_authenc_setauthsize
,
3961 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3965 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
3967 "authenc-hmac-sha224-cbc-aes-chcr",
3968 .cra_blocksize
= AES_BLOCK_SIZE
,
3969 .cra_priority
= CHCR_AEAD_PRIORITY
,
3970 .cra_ctxsize
= sizeof(struct chcr_context
) +
3971 sizeof(struct chcr_aead_ctx
) +
3972 sizeof(struct chcr_authenc_ctx
),
3974 .ivsize
= AES_BLOCK_SIZE
,
3975 .maxauthsize
= SHA224_DIGEST_SIZE
,
3976 .setkey
= chcr_authenc_setkey
,
3977 .setauthsize
= chcr_authenc_setauthsize
,
3981 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3985 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
3987 "authenc-hmac-sha384-cbc-aes-chcr",
3988 .cra_blocksize
= AES_BLOCK_SIZE
,
3989 .cra_priority
= CHCR_AEAD_PRIORITY
,
3990 .cra_ctxsize
= sizeof(struct chcr_context
) +
3991 sizeof(struct chcr_aead_ctx
) +
3992 sizeof(struct chcr_authenc_ctx
),
3995 .ivsize
= AES_BLOCK_SIZE
,
3996 .maxauthsize
= SHA384_DIGEST_SIZE
,
3997 .setkey
= chcr_authenc_setkey
,
3998 .setauthsize
= chcr_authenc_setauthsize
,
4002 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4006 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
4008 "authenc-hmac-sha512-cbc-aes-chcr",
4009 .cra_blocksize
= AES_BLOCK_SIZE
,
4010 .cra_priority
= CHCR_AEAD_PRIORITY
,
4011 .cra_ctxsize
= sizeof(struct chcr_context
) +
4012 sizeof(struct chcr_aead_ctx
) +
4013 sizeof(struct chcr_authenc_ctx
),
4016 .ivsize
= AES_BLOCK_SIZE
,
4017 .maxauthsize
= SHA512_DIGEST_SIZE
,
4018 .setkey
= chcr_authenc_setkey
,
4019 .setauthsize
= chcr_authenc_setauthsize
,
4023 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_NULL
,
4027 .cra_name
= "authenc(digest_null,cbc(aes))",
4029 "authenc-digest_null-cbc-aes-chcr",
4030 .cra_blocksize
= AES_BLOCK_SIZE
,
4031 .cra_priority
= CHCR_AEAD_PRIORITY
,
4032 .cra_ctxsize
= sizeof(struct chcr_context
) +
4033 sizeof(struct chcr_aead_ctx
) +
4034 sizeof(struct chcr_authenc_ctx
),
4037 .ivsize
= AES_BLOCK_SIZE
,
4039 .setkey
= chcr_aead_digest_null_setkey
,
4040 .setauthsize
= chcr_authenc_null_setauthsize
,
4044 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4048 .cra_name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4050 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4052 .cra_priority
= CHCR_AEAD_PRIORITY
,
4053 .cra_ctxsize
= sizeof(struct chcr_context
) +
4054 sizeof(struct chcr_aead_ctx
) +
4055 sizeof(struct chcr_authenc_ctx
),
4058 .ivsize
= CTR_RFC3686_IV_SIZE
,
4059 .maxauthsize
= SHA1_DIGEST_SIZE
,
4060 .setkey
= chcr_authenc_setkey
,
4061 .setauthsize
= chcr_authenc_setauthsize
,
4065 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4070 .cra_name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4072 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4074 .cra_priority
= CHCR_AEAD_PRIORITY
,
4075 .cra_ctxsize
= sizeof(struct chcr_context
) +
4076 sizeof(struct chcr_aead_ctx
) +
4077 sizeof(struct chcr_authenc_ctx
),
4080 .ivsize
= CTR_RFC3686_IV_SIZE
,
4081 .maxauthsize
= SHA256_DIGEST_SIZE
,
4082 .setkey
= chcr_authenc_setkey
,
4083 .setauthsize
= chcr_authenc_setauthsize
,
4087 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4091 .cra_name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4093 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4095 .cra_priority
= CHCR_AEAD_PRIORITY
,
4096 .cra_ctxsize
= sizeof(struct chcr_context
) +
4097 sizeof(struct chcr_aead_ctx
) +
4098 sizeof(struct chcr_authenc_ctx
),
4100 .ivsize
= CTR_RFC3686_IV_SIZE
,
4101 .maxauthsize
= SHA224_DIGEST_SIZE
,
4102 .setkey
= chcr_authenc_setkey
,
4103 .setauthsize
= chcr_authenc_setauthsize
,
4107 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4111 .cra_name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4113 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4115 .cra_priority
= CHCR_AEAD_PRIORITY
,
4116 .cra_ctxsize
= sizeof(struct chcr_context
) +
4117 sizeof(struct chcr_aead_ctx
) +
4118 sizeof(struct chcr_authenc_ctx
),
4121 .ivsize
= CTR_RFC3686_IV_SIZE
,
4122 .maxauthsize
= SHA384_DIGEST_SIZE
,
4123 .setkey
= chcr_authenc_setkey
,
4124 .setauthsize
= chcr_authenc_setauthsize
,
4128 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4132 .cra_name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4134 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4136 .cra_priority
= CHCR_AEAD_PRIORITY
,
4137 .cra_ctxsize
= sizeof(struct chcr_context
) +
4138 sizeof(struct chcr_aead_ctx
) +
4139 sizeof(struct chcr_authenc_ctx
),
4142 .ivsize
= CTR_RFC3686_IV_SIZE
,
4143 .maxauthsize
= SHA512_DIGEST_SIZE
,
4144 .setkey
= chcr_authenc_setkey
,
4145 .setauthsize
= chcr_authenc_setauthsize
,
4149 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_NULL
,
4153 .cra_name
= "authenc(digest_null,rfc3686(ctr(aes)))",
4155 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4157 .cra_priority
= CHCR_AEAD_PRIORITY
,
4158 .cra_ctxsize
= sizeof(struct chcr_context
) +
4159 sizeof(struct chcr_aead_ctx
) +
4160 sizeof(struct chcr_authenc_ctx
),
4163 .ivsize
= CTR_RFC3686_IV_SIZE
,
4165 .setkey
= chcr_aead_digest_null_setkey
,
4166 .setauthsize
= chcr_authenc_null_setauthsize
,
4173 * chcr_unregister_alg - Deregister crypto algorithms with
4176 static int chcr_unregister_alg(void)
4180 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4181 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4182 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4183 if (driver_algs
[i
].is_registered
)
4184 crypto_unregister_alg(
4185 &driver_algs
[i
].alg
.crypto
);
4187 case CRYPTO_ALG_TYPE_AEAD
:
4188 if (driver_algs
[i
].is_registered
)
4189 crypto_unregister_aead(
4190 &driver_algs
[i
].alg
.aead
);
4192 case CRYPTO_ALG_TYPE_AHASH
:
4193 if (driver_algs
[i
].is_registered
)
4194 crypto_unregister_ahash(
4195 &driver_algs
[i
].alg
.hash
);
4198 driver_algs
[i
].is_registered
= 0;
4203 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4204 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4205 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4208 * chcr_register_alg - Register crypto algorithms with kernel framework.
4210 static int chcr_register_alg(void)
4212 struct crypto_alg ai
;
4213 struct ahash_alg
*a_hash
;
4217 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4218 if (driver_algs
[i
].is_registered
)
4220 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4221 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4222 driver_algs
[i
].alg
.crypto
.cra_priority
=
4224 driver_algs
[i
].alg
.crypto
.cra_module
= THIS_MODULE
;
4225 driver_algs
[i
].alg
.crypto
.cra_flags
=
4226 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
4227 CRYPTO_ALG_NEED_FALLBACK
;
4228 driver_algs
[i
].alg
.crypto
.cra_ctxsize
=
4229 sizeof(struct chcr_context
) +
4230 sizeof(struct ablk_ctx
);
4231 driver_algs
[i
].alg
.crypto
.cra_alignmask
= 0;
4232 driver_algs
[i
].alg
.crypto
.cra_type
=
4233 &crypto_ablkcipher_type
;
4234 err
= crypto_register_alg(&driver_algs
[i
].alg
.crypto
);
4235 name
= driver_algs
[i
].alg
.crypto
.cra_driver_name
;
4237 case CRYPTO_ALG_TYPE_AEAD
:
4238 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
4239 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
;
4240 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
4241 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
4242 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
4243 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
4244 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
4245 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
4246 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
4248 case CRYPTO_ALG_TYPE_AHASH
:
4249 a_hash
= &driver_algs
[i
].alg
.hash
;
4250 a_hash
->update
= chcr_ahash_update
;
4251 a_hash
->final
= chcr_ahash_final
;
4252 a_hash
->finup
= chcr_ahash_finup
;
4253 a_hash
->digest
= chcr_ahash_digest
;
4254 a_hash
->export
= chcr_ahash_export
;
4255 a_hash
->import
= chcr_ahash_import
;
4256 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
4257 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
4258 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
4259 a_hash
->halg
.base
.cra_flags
= CRYPTO_ALG_ASYNC
;
4260 a_hash
->halg
.base
.cra_alignmask
= 0;
4261 a_hash
->halg
.base
.cra_exit
= NULL
;
4263 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
4264 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
4265 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
4266 a_hash
->init
= chcr_hmac_init
;
4267 a_hash
->setkey
= chcr_ahash_setkey
;
4268 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
4270 a_hash
->init
= chcr_sha_init
;
4271 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
4272 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
4274 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
4275 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
4276 name
= ai
.cra_driver_name
;
4280 pr_err("chcr : %s : Algorithm registration failed\n",
4284 driver_algs
[i
].is_registered
= 1;
4290 chcr_unregister_alg();
4295 * start_crypto - Register the crypto algorithms.
4296 * This should called once when the first device comesup. After this
4297 * kernel will start calling driver APIs for crypto operations.
4299 int start_crypto(void)
4301 return chcr_register_alg();
4305 * stop_crypto - Deregister all the crypto algorithms with kernel.
4306 * This should be called once when the last device goes down. After this
4307 * kernel will not call the driver API for crypto operations.
4309 int stop_crypto(void)
4311 chcr_unregister_alg();