2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len
[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len
[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant
[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
97 unsigned char *input
, int err
);
99 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
101 return ctx
->crypto_ctx
->aeadctx
;
104 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
106 return ctx
->crypto_ctx
->ablkctx
;
109 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
111 return ctx
->crypto_ctx
->hmacctx
;
114 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
116 return gctx
->ctx
->gcm
;
119 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
121 return gctx
->ctx
->authenc
;
124 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
126 return ctx
->dev
->u_ctx
;
129 static inline int is_ofld_imm(const struct sk_buff
*skb
)
131 return (skb
->len
<= SGE_MAX_WR_LEN
);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx
*reqctx
)
136 memset(&reqctx
->hctx_wr
, 0, sizeof(struct chcr_hctx_per_wr
));
139 static int sg_nents_xlen(struct scatterlist
*sg
, unsigned int reqlen
,
145 unsigned int skip_len
= 0;
148 if (sg_dma_len(sg
) <= skip
) {
149 skip
-= sg_dma_len(sg
);
158 while (sg
&& reqlen
) {
159 less
= min(reqlen
, sg_dma_len(sg
) - skip_len
);
160 nents
+= DIV_ROUND_UP(less
, entlen
);
168 static inline int get_aead_subtype(struct crypto_aead
*aead
)
170 struct aead_alg
*alg
= crypto_aead_alg(aead
);
171 struct chcr_alg_template
*chcr_crypto_alg
=
172 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
173 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
176 void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
178 u8 temp
[SHA512_DIGEST_SIZE
];
179 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
180 int authsize
= crypto_aead_authsize(tfm
);
181 struct cpl_fw6_pld
*fw6_pld
;
184 fw6_pld
= (struct cpl_fw6_pld
*)input
;
185 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
186 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
187 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
190 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
191 authsize
, req
->assoclen
+
192 req
->cryptlen
- authsize
);
193 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
201 static inline void chcr_handle_aead_resp(struct aead_request
*req
,
202 unsigned char *input
,
205 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
207 chcr_aead_common_exit(req
);
208 if (reqctx
->verify
== VERIFY_SW
) {
209 chcr_verify_tag(req
, input
, &err
);
210 reqctx
->verify
= VERIFY_HW
;
212 req
->base
.complete(&req
->base
, err
);
215 static void get_aes_decrypt_key(unsigned char *dec_key
,
216 const unsigned char *key
,
217 unsigned int keylength
)
225 case AES_KEYLENGTH_128BIT
:
226 nk
= KEYLENGTH_4BYTES
;
227 nr
= NUMBER_OF_ROUNDS_10
;
229 case AES_KEYLENGTH_192BIT
:
230 nk
= KEYLENGTH_6BYTES
;
231 nr
= NUMBER_OF_ROUNDS_12
;
233 case AES_KEYLENGTH_256BIT
:
234 nk
= KEYLENGTH_8BYTES
;
235 nr
= NUMBER_OF_ROUNDS_14
;
240 for (i
= 0; i
< nk
; i
++)
241 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4 * i
]);
244 temp
= w_ring
[nk
- 1];
245 while (i
+ nk
< (nr
+ 1) * 4) {
248 temp
= (temp
<< 8) | (temp
>> 24);
249 temp
= aes_ks_subword(temp
);
250 temp
^= round_constant
[i
/ nk
];
251 } else if (nk
== 8 && (i
% 4 == 0)) {
252 temp
= aes_ks_subword(temp
);
254 w_ring
[i
% nk
] ^= temp
;
255 temp
= w_ring
[i
% nk
];
259 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
260 *((u32
*)dec_key
+ k
) = htonl(w_ring
[j
]);
267 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
269 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
272 case SHA1_DIGEST_SIZE
:
273 base_hash
= crypto_alloc_shash("sha1", 0, 0);
275 case SHA224_DIGEST_SIZE
:
276 base_hash
= crypto_alloc_shash("sha224", 0, 0);
278 case SHA256_DIGEST_SIZE
:
279 base_hash
= crypto_alloc_shash("sha256", 0, 0);
281 case SHA384_DIGEST_SIZE
:
282 base_hash
= crypto_alloc_shash("sha384", 0, 0);
284 case SHA512_DIGEST_SIZE
:
285 base_hash
= crypto_alloc_shash("sha512", 0, 0);
292 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
293 char *iopad
, char *result_hash
,
296 struct sha1_state sha1_st
;
297 struct sha256_state sha256_st
;
298 struct sha512_state sha512_st
;
301 if (digest_size
== SHA1_DIGEST_SIZE
) {
302 error
= crypto_shash_init(desc
) ?:
303 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
304 crypto_shash_export(desc
, (void *)&sha1_st
);
305 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
306 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
307 error
= crypto_shash_init(desc
) ?:
308 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
309 crypto_shash_export(desc
, (void *)&sha256_st
);
310 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
312 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
313 error
= crypto_shash_init(desc
) ?:
314 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
315 crypto_shash_export(desc
, (void *)&sha256_st
);
316 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
318 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
319 error
= crypto_shash_init(desc
) ?:
320 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
321 crypto_shash_export(desc
, (void *)&sha512_st
);
322 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
324 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
325 error
= crypto_shash_init(desc
) ?:
326 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
327 crypto_shash_export(desc
, (void *)&sha512_st
);
328 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
331 pr_err("Unknown digest size %d\n", digest_size
);
336 static void chcr_change_order(char *buf
, int ds
)
340 if (ds
== SHA512_DIGEST_SIZE
) {
341 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
342 *((__be64
*)buf
+ i
) =
343 cpu_to_be64(*((u64
*)buf
+ i
));
345 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
346 *((__be32
*)buf
+ i
) =
347 cpu_to_be32(*((u32
*)buf
+ i
));
351 static inline int is_hmac(struct crypto_tfm
*tfm
)
353 struct crypto_alg
*alg
= tfm
->__crt_alg
;
354 struct chcr_alg_template
*chcr_crypto_alg
=
355 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
357 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
362 static inline void dsgl_walk_init(struct dsgl_walk
*walk
,
363 struct cpl_rx_phys_dsgl
*dsgl
)
367 walk
->to
= (struct phys_sge_pairs
*)(dsgl
+ 1);
370 static inline void dsgl_walk_end(struct dsgl_walk
*walk
, unsigned short qid
,
373 struct cpl_rx_phys_dsgl
*phys_cpl
;
375 phys_cpl
= walk
->dsgl
;
377 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
378 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
379 phys_cpl
->pcirlxorder_to_noofsgentr
=
380 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
381 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
382 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
383 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
384 CPL_RX_PHYS_DSGL_DCAID_V(0) |
385 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk
->nents
));
386 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
387 phys_cpl
->rss_hdr_int
.qid
= htons(qid
);
388 phys_cpl
->rss_hdr_int
.hash_val
= 0;
389 phys_cpl
->rss_hdr_int
.channel
= pci_chan_id
;
392 static inline void dsgl_walk_add_page(struct dsgl_walk
*walk
,
401 walk
->to
->len
[j
% 8] = htons(size
);
402 walk
->to
->addr
[j
% 8] = cpu_to_be64(*addr
);
409 static void dsgl_walk_add_sg(struct dsgl_walk
*walk
,
410 struct scatterlist
*sg
,
415 unsigned int left_size
= slen
, len
= 0;
416 unsigned int j
= walk
->nents
;
422 if (sg_dma_len(sg
) <= skip
) {
423 skip
-= sg_dma_len(sg
);
432 while (left_size
&& sg
) {
433 len
= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
436 ent_len
= min_t(u32
, len
, CHCR_DST_SG_SIZE
);
437 walk
->to
->len
[j
% 8] = htons(ent_len
);
438 walk
->to
->addr
[j
% 8] = cpu_to_be64(sg_dma_address(sg
) +
447 walk
->last_sg_len
= min_t(u32
, left_size
, sg_dma_len(sg
) -
448 skip_len
) + skip_len
;
449 left_size
-= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
456 static inline void ulptx_walk_init(struct ulptx_walk
*walk
,
457 struct ulptx_sgl
*ulp
)
462 walk
->pair
= ulp
->sge
;
463 walk
->last_sg
= NULL
;
464 walk
->last_sg_len
= 0;
467 static inline void ulptx_walk_end(struct ulptx_walk
*walk
)
469 walk
->sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
470 ULPTX_NSGE_V(walk
->nents
));
474 static inline void ulptx_walk_add_page(struct ulptx_walk
*walk
,
481 if (walk
->nents
== 0) {
482 walk
->sgl
->len0
= cpu_to_be32(size
);
483 walk
->sgl
->addr0
= cpu_to_be64(*addr
);
485 walk
->pair
->addr
[walk
->pair_idx
] = cpu_to_be64(*addr
);
486 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(size
);
487 walk
->pair_idx
= !walk
->pair_idx
;
494 static void ulptx_walk_add_sg(struct ulptx_walk
*walk
,
495 struct scatterlist
*sg
,
506 if (sg_dma_len(sg
) <= skip
) {
507 skip
-= sg_dma_len(sg
);
515 WARN(!sg
, "SG should not be null here\n");
516 if (sg
&& (walk
->nents
== 0)) {
517 small
= min_t(unsigned int, sg_dma_len(sg
) - skip_len
, len
);
518 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
519 walk
->sgl
->len0
= cpu_to_be32(sgmin
);
520 walk
->sgl
->addr0
= cpu_to_be64(sg_dma_address(sg
) + skip_len
);
524 walk
->last_sg_len
= sgmin
+ skip_len
;
526 if (sg_dma_len(sg
) == skip_len
) {
533 small
= min(sg_dma_len(sg
) - skip_len
, len
);
534 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
535 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(sgmin
);
536 walk
->pair
->addr
[walk
->pair_idx
] =
537 cpu_to_be64(sg_dma_address(sg
) + skip_len
);
538 walk
->pair_idx
= !walk
->pair_idx
;
545 walk
->last_sg_len
= skip_len
;
546 if (sg_dma_len(sg
) == skip_len
) {
553 static inline int get_cryptoalg_subtype(struct crypto_tfm
*tfm
)
555 struct crypto_alg
*alg
= tfm
->__crt_alg
;
556 struct chcr_alg_template
*chcr_crypto_alg
=
557 container_of(alg
, struct chcr_alg_template
, alg
.crypto
);
559 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
562 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
564 struct adapter
*adap
= netdev2adap(dev
);
565 struct sge_uld_txq_info
*txq_info
=
566 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
567 struct sge_uld_txq
*txq
;
571 txq
= &txq_info
->uldtxq
[idx
];
572 spin_lock(&txq
->sendq
.lock
);
575 spin_unlock(&txq
->sendq
.lock
);
580 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
581 struct _key_ctx
*key_ctx
)
583 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
584 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
587 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
588 ablkctx
->enckey_len
>> 1);
589 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
590 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
595 static int chcr_hash_ent_in_wr(struct scatterlist
*src
,
598 unsigned int srcskip
)
602 int soffset
= 0, sless
;
604 if (sg_dma_len(src
) == srcskip
) {
608 while (src
&& space
> (sgl_ent_len
[srcsg
+ 1])) {
609 sless
= min_t(unsigned int, sg_dma_len(src
) - soffset
- srcskip
,
614 if (sg_dma_len(src
) == (soffset
+ srcskip
)) {
623 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
624 struct scatterlist
*dst
,
627 unsigned int srcskip
,
628 unsigned int dstskip
)
630 int srclen
= 0, dstlen
= 0;
631 int srcsg
= minsg
, dstsg
= minsg
;
632 int offset
= 0, soffset
= 0, less
, sless
= 0;
634 if (sg_dma_len(src
) == srcskip
) {
638 if (sg_dma_len(dst
) == dstskip
) {
644 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
645 sless
= min_t(unsigned int, sg_dma_len(src
) - srcskip
- soffset
,
650 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
651 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
652 if (srclen
<= dstlen
)
654 less
= min_t(unsigned int, sg_dma_len(dst
) - offset
-
655 dstskip
, CHCR_DST_SG_SIZE
);
658 if ((offset
+ dstskip
) == sg_dma_len(dst
)) {
666 if ((soffset
+ srcskip
) == sg_dma_len(src
)) {
673 return min(srclen
, dstlen
);
676 static int chcr_cipher_fallback(struct crypto_skcipher
*cipher
,
678 struct scatterlist
*src
,
679 struct scatterlist
*dst
,
682 unsigned short op_type
)
686 SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
688 skcipher_request_set_tfm(subreq
, cipher
);
689 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
690 skcipher_request_set_crypt(subreq
, src
, dst
,
693 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
694 crypto_skcipher_encrypt(subreq
);
695 skcipher_request_zero(subreq
);
700 static inline void create_wreq(struct chcr_context
*ctx
,
701 struct chcr_wr
*chcr_req
,
702 struct crypto_async_request
*req
,
709 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
710 int qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
713 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE
;
714 chcr_req
->wreq
.pld_size_hash_size
=
715 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
716 chcr_req
->wreq
.len16_pkd
=
717 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16
, 16)));
718 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
719 chcr_req
->wreq
.rx_chid_to_rx_q_id
=
720 FILL_WR_RX_Q_ID(ctx
->dev
->rx_channel_id
, qid
,
721 !!lcb
, ctx
->tx_qidx
);
723 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(ctx
->tx_chan_id
,
725 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP(len16
, 16) -
726 ((sizeof(chcr_req
->wreq
)) >> 4)));
728 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(!imm
);
729 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
730 sizeof(chcr_req
->key_ctx
) + sc_len
);
734 * create_cipher_wr - form the WR for cipher operations
736 * @ctx: crypto driver context of the request.
737 * @qid: ingress qid where response of this WR should be received.
738 * @op_type: encryption or decryption
740 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
742 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
743 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
744 struct sk_buff
*skb
= NULL
;
745 struct chcr_wr
*chcr_req
;
746 struct cpl_rx_phys_dsgl
*phys_cpl
;
747 struct ulptx_sgl
*ulptx
;
748 struct chcr_blkcipher_req_ctx
*reqctx
=
749 ablkcipher_request_ctx(wrparam
->req
);
750 unsigned int temp
= 0, transhdr_len
, dst_size
;
753 unsigned int kctx_len
;
754 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
755 GFP_KERNEL
: GFP_ATOMIC
;
756 struct adapter
*adap
= padap(c_ctx(tfm
)->dev
);
758 nents
= sg_nents_xlen(reqctx
->dstsg
, wrparam
->bytes
, CHCR_DST_SG_SIZE
,
760 dst_size
= get_space_for_phys_dsgl(nents
);
761 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
762 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
763 nents
= sg_nents_xlen(reqctx
->srcsg
, wrparam
->bytes
,
764 CHCR_SRC_SG_SIZE
, reqctx
->src_ofst
);
765 temp
= reqctx
->imm
? roundup(wrparam
->bytes
, 16) :
766 (sgl_len(nents
) * 8);
767 transhdr_len
+= temp
;
768 transhdr_len
= roundup(transhdr_len
, 16);
769 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
774 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
775 chcr_req
->sec_cpl
.op_ivinsrtofst
=
776 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm
)->dev
->rx_channel_id
, 2, 1);
778 chcr_req
->sec_cpl
.pldlen
= htonl(IV
+ wrparam
->bytes
);
779 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
780 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV
+ 1, 0);
782 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
783 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
784 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
787 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
790 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
791 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
792 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
793 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
794 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
795 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
796 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
798 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
799 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
800 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
801 ablkctx
->enckey_len
);
803 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
804 (ablkctx
->enckey_len
>> 1),
805 ablkctx
->enckey_len
>> 1);
806 memcpy(chcr_req
->key_ctx
.key
+
807 (ablkctx
->enckey_len
>> 1),
809 ablkctx
->enckey_len
>> 1);
812 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
813 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
814 chcr_add_cipher_src_ent(wrparam
->req
, ulptx
, wrparam
);
815 chcr_add_cipher_dst_ent(wrparam
->req
, phys_cpl
, wrparam
, wrparam
->qid
);
817 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
818 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ kctx_len
+ IV
819 + (reqctx
->imm
? (wrparam
->bytes
) : 0);
820 create_wreq(c_ctx(tfm
), chcr_req
, &(wrparam
->req
->base
), reqctx
->imm
, 0,
822 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
825 if (reqctx
->op
&& (ablkctx
->ciph_mode
==
826 CHCR_SCMD_CIPHER_MODE_AES_CBC
))
827 sg_pcopy_to_buffer(wrparam
->req
->src
,
828 sg_nents(wrparam
->req
->src
), wrparam
->req
->info
, 16,
829 reqctx
->processed
+ wrparam
->bytes
- AES_BLOCK_SIZE
);
833 return ERR_PTR(error
);
836 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
840 if (keylen
== AES_KEYSIZE_128
)
841 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
842 else if (keylen
== AES_KEYSIZE_192
)
843 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
844 else if (keylen
== AES_KEYSIZE_256
)
845 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
851 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher
*cipher
,
855 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
856 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
859 crypto_skcipher_clear_flags(ablkctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
860 crypto_skcipher_set_flags(ablkctx
->sw_cipher
, cipher
->base
.crt_flags
&
861 CRYPTO_TFM_REQ_MASK
);
862 err
= crypto_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
863 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
865 crypto_skcipher_get_flags(ablkctx
->sw_cipher
) &
870 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher
*cipher
,
874 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
875 unsigned int ck_size
, context_size
;
879 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
883 ck_size
= chcr_keyctx_ck_size(keylen
);
884 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
885 memcpy(ablkctx
->key
, key
, keylen
);
886 ablkctx
->enckey_len
= keylen
;
887 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
888 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
889 keylen
+ alignment
) >> 4;
891 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
893 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
896 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
897 ablkctx
->enckey_len
= 0;
902 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher
*cipher
,
906 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
907 unsigned int ck_size
, context_size
;
911 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
914 ck_size
= chcr_keyctx_ck_size(keylen
);
915 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
916 memcpy(ablkctx
->key
, key
, keylen
);
917 ablkctx
->enckey_len
= keylen
;
918 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
919 keylen
+ alignment
) >> 4;
921 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
923 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
927 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
928 ablkctx
->enckey_len
= 0;
933 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher
*cipher
,
937 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
938 unsigned int ck_size
, context_size
;
942 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
944 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
945 CTR_RFC3686_NONCE_SIZE
);
947 keylen
-= CTR_RFC3686_NONCE_SIZE
;
948 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
952 ck_size
= chcr_keyctx_ck_size(keylen
);
953 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
954 memcpy(ablkctx
->key
, key
, keylen
);
955 ablkctx
->enckey_len
= keylen
;
956 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
957 keylen
+ alignment
) >> 4;
959 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
961 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
965 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
966 ablkctx
->enckey_len
= 0;
970 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
972 unsigned int size
= AES_BLOCK_SIZE
;
973 __be32
*b
= (__be32
*)(dstiv
+ size
);
976 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
977 for (; size
>= 4; size
-= 4) {
978 prev
= be32_to_cpu(*--b
);
988 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
990 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
992 u32 temp
= be32_to_cpu(*--b
);
995 c
= (u64
)temp
+ 1; // No of block can processed withou overflow
996 if ((bytes
/ AES_BLOCK_SIZE
) > c
)
997 bytes
= c
* AES_BLOCK_SIZE
;
1001 static int chcr_update_tweak(struct ablkcipher_request
*req
, u8
*iv
,
1004 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1005 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1006 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1007 struct crypto_cipher
*cipher
;
1010 unsigned int keylen
;
1011 int round
= reqctx
->last_req_len
/ AES_BLOCK_SIZE
;
1012 int round8
= round
/ 8;
1014 cipher
= ablkctx
->aes_generic
;
1015 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1017 keylen
= ablkctx
->enckey_len
/ 2;
1018 key
= ablkctx
->key
+ keylen
;
1019 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
1022 crypto_cipher_encrypt_one(cipher
, iv
, iv
);
1023 for (i
= 0; i
< round8
; i
++)
1024 gf128mul_x8_ble((le128
*)iv
, (le128
*)iv
);
1026 for (i
= 0; i
< (round
% 8); i
++)
1027 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
1030 crypto_cipher_decrypt_one(cipher
, iv
, iv
);
1035 static int chcr_update_cipher_iv(struct ablkcipher_request
*req
,
1036 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1038 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1039 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1040 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1043 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1044 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
1046 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
1047 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1048 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
1049 AES_BLOCK_SIZE
) + 1);
1050 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1051 ret
= chcr_update_tweak(req
, iv
, 0);
1052 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1054 /*Updated before sending last WR*/
1055 memcpy(iv
, req
->info
, AES_BLOCK_SIZE
);
1057 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1064 /* We need separate function for final iv because in rfc3686 Initial counter
1065 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1066 * for subsequent update requests
1069 static int chcr_final_cipher_iv(struct ablkcipher_request
*req
,
1070 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1072 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1073 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1074 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
1077 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1078 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
1080 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1081 ret
= chcr_update_tweak(req
, iv
, 1);
1082 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1083 /*Already updated for Decrypt*/
1085 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1092 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
1093 unsigned char *input
, int err
)
1095 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1096 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1097 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1098 struct sk_buff
*skb
;
1099 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
1100 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1101 struct cipher_wr_param wrparam
;
1106 if (req
->nbytes
== reqctx
->processed
) {
1107 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1109 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->info
);
1114 bytes
= chcr_sg_ent_in_wr(reqctx
->srcsg
, reqctx
->dstsg
, 0,
1115 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1116 reqctx
->src_ofst
, reqctx
->dst_ofst
);
1117 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1118 bytes
= req
->nbytes
- reqctx
->processed
;
1120 bytes
= rounddown(bytes
, 16);
1122 /*CTR mode counter overfloa*/
1123 bytes
= req
->nbytes
- reqctx
->processed
;
1125 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1129 if (unlikely(bytes
== 0)) {
1130 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1132 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1142 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1143 CRYPTO_ALG_SUB_TYPE_CTR
)
1144 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1145 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
];
1147 wrparam
.bytes
= bytes
;
1148 skb
= create_cipher_wr(&wrparam
);
1150 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1154 skb
->dev
= u_ctx
->lldi
.ports
[0];
1155 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1157 reqctx
->last_req_len
= bytes
;
1158 reqctx
->processed
+= bytes
;
1161 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1163 req
->base
.complete(&req
->base
, err
);
1167 static int process_cipher(struct ablkcipher_request
*req
,
1169 struct sk_buff
**skb
,
1170 unsigned short op_type
)
1172 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1173 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
1174 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1175 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1176 struct cipher_wr_param wrparam
;
1177 int bytes
, err
= -EINVAL
;
1179 reqctx
->processed
= 0;
1182 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1183 (req
->nbytes
== 0) ||
1184 (req
->nbytes
% crypto_ablkcipher_blocksize(tfm
))) {
1185 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1186 ablkctx
->enckey_len
, req
->nbytes
, ivsize
);
1189 chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1190 if (req
->nbytes
< (SGE_MAX_WR_LEN
- (sizeof(struct chcr_wr
) +
1192 sizeof(struct cpl_rx_phys_dsgl
) +
1195 /* Can be sent as Imm*/
1196 unsigned int dnents
= 0, transhdr_len
, phys_dsgl
, kctx_len
;
1198 dnents
= sg_nents_xlen(req
->dst
, req
->nbytes
,
1199 CHCR_DST_SG_SIZE
, 0);
1200 phys_dsgl
= get_space_for_phys_dsgl(dnents
);
1201 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
1202 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
1203 reqctx
->imm
= (transhdr_len
+ IV
+ req
->nbytes
) <=
1205 bytes
= IV
+ req
->nbytes
;
1212 bytes
= chcr_sg_ent_in_wr(req
->src
, req
->dst
, 0,
1213 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1215 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1216 bytes
= req
->nbytes
- reqctx
->processed
;
1218 bytes
= rounddown(bytes
, 16);
1220 bytes
= req
->nbytes
;
1222 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1223 CRYPTO_ALG_SUB_TYPE_CTR
) {
1224 bytes
= adjust_ctr_overflow(req
->info
, bytes
);
1226 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1227 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1228 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1229 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->info
,
1230 CTR_RFC3686_IV_SIZE
);
1232 /* initialize counter portion of counter block */
1233 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1234 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1238 memcpy(reqctx
->iv
, req
->info
, IV
);
1240 if (unlikely(bytes
== 0)) {
1241 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1243 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1252 reqctx
->op
= op_type
;
1253 reqctx
->srcsg
= req
->src
;
1254 reqctx
->dstsg
= req
->dst
;
1255 reqctx
->src_ofst
= 0;
1256 reqctx
->dst_ofst
= 0;
1259 wrparam
.bytes
= bytes
;
1260 *skb
= create_cipher_wr(&wrparam
);
1262 err
= PTR_ERR(*skb
);
1265 reqctx
->processed
= bytes
;
1266 reqctx
->last_req_len
= bytes
;
1270 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1275 static int chcr_aes_encrypt(struct ablkcipher_request
*req
)
1277 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1278 struct sk_buff
*skb
= NULL
;
1279 int err
, isfull
= 0;
1280 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1282 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1283 c_ctx(tfm
)->tx_qidx
))) {
1285 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1289 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1290 &skb
, CHCR_ENCRYPT_OP
);
1293 skb
->dev
= u_ctx
->lldi
.ports
[0];
1294 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1296 return isfull
? -EBUSY
: -EINPROGRESS
;
1299 static int chcr_aes_decrypt(struct ablkcipher_request
*req
)
1301 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1302 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1303 struct sk_buff
*skb
= NULL
;
1304 int err
, isfull
= 0;
1306 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1307 c_ctx(tfm
)->tx_qidx
))) {
1309 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1313 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[c_ctx(tfm
)->rx_qidx
],
1314 &skb
, CHCR_DECRYPT_OP
);
1317 skb
->dev
= u_ctx
->lldi
.ports
[0];
1318 set_wr_txq(skb
, CPL_PRIORITY_DATA
, c_ctx(tfm
)->tx_qidx
);
1320 return isfull
? -EBUSY
: -EINPROGRESS
;
1323 static int chcr_device_init(struct chcr_context
*ctx
)
1325 struct uld_ctx
*u_ctx
= NULL
;
1326 struct adapter
*adap
;
1328 int txq_perchan
, txq_idx
, ntxq
;
1329 int err
= 0, rxq_perchan
, rxq_idx
;
1331 id
= smp_processor_id();
1333 u_ctx
= assign_chcr_device();
1335 pr_err("chcr device assignment fails\n");
1338 ctx
->dev
= u_ctx
->dev
;
1339 adap
= padap(ctx
->dev
);
1340 ntxq
= min_not_zero((unsigned int)u_ctx
->lldi
.nrxq
,
1341 adap
->vres
.ncrypto_fc
);
1342 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1343 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1344 spin_lock(&ctx
->dev
->lock_chcr_dev
);
1345 ctx
->tx_chan_id
= ctx
->dev
->tx_channel_id
;
1346 ctx
->dev
->tx_channel_id
= !ctx
->dev
->tx_channel_id
;
1347 ctx
->dev
->rx_channel_id
= 0;
1348 spin_unlock(&ctx
->dev
->lock_chcr_dev
);
1349 rxq_idx
= ctx
->tx_chan_id
* rxq_perchan
;
1350 rxq_idx
+= id
% rxq_perchan
;
1351 txq_idx
= ctx
->tx_chan_id
* txq_perchan
;
1352 txq_idx
+= id
% txq_perchan
;
1353 ctx
->rx_qidx
= rxq_idx
;
1354 ctx
->tx_qidx
= txq_idx
;
1355 /* Channel Id used by SGE to forward packet to Host.
1356 * Same value should be used in cpl_fw6_pld RSS_CH field
1357 * by FW. Driver programs PCI channel ID to be used in fw
1358 * at the time of queue allocation with value "pi->tx_chan"
1360 ctx
->pci_chan_id
= txq_idx
/ txq_perchan
;
1366 static int chcr_cra_init(struct crypto_tfm
*tfm
)
1368 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1369 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1370 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1372 ablkctx
->sw_cipher
= crypto_alloc_skcipher(alg
->cra_name
, 0,
1373 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1374 if (IS_ERR(ablkctx
->sw_cipher
)) {
1375 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1376 return PTR_ERR(ablkctx
->sw_cipher
);
1379 if (get_cryptoalg_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_XTS
) {
1380 /* To update tweak*/
1381 ablkctx
->aes_generic
= crypto_alloc_cipher("aes-generic", 0, 0);
1382 if (IS_ERR(ablkctx
->aes_generic
)) {
1383 pr_err("failed to allocate aes cipher for tweak\n");
1384 return PTR_ERR(ablkctx
->aes_generic
);
1387 ablkctx
->aes_generic
= NULL
;
1389 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1390 return chcr_device_init(crypto_tfm_ctx(tfm
));
1393 static int chcr_rfc3686_init(struct crypto_tfm
*tfm
)
1395 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1396 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1397 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1399 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1400 * cannot be used as fallback in chcr_handle_cipher_response
1402 ablkctx
->sw_cipher
= crypto_alloc_skcipher("ctr(aes)", 0,
1403 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1404 if (IS_ERR(ablkctx
->sw_cipher
)) {
1405 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1406 return PTR_ERR(ablkctx
->sw_cipher
);
1408 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1409 return chcr_device_init(crypto_tfm_ctx(tfm
));
1413 static void chcr_cra_exit(struct crypto_tfm
*tfm
)
1415 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1416 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1418 crypto_free_skcipher(ablkctx
->sw_cipher
);
1419 if (ablkctx
->aes_generic
)
1420 crypto_free_cipher(ablkctx
->aes_generic
);
1423 static int get_alg_config(struct algo_param
*params
,
1424 unsigned int auth_size
)
1426 switch (auth_size
) {
1427 case SHA1_DIGEST_SIZE
:
1428 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1429 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1430 params
->result_size
= SHA1_DIGEST_SIZE
;
1432 case SHA224_DIGEST_SIZE
:
1433 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1434 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1435 params
->result_size
= SHA256_DIGEST_SIZE
;
1437 case SHA256_DIGEST_SIZE
:
1438 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1439 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1440 params
->result_size
= SHA256_DIGEST_SIZE
;
1442 case SHA384_DIGEST_SIZE
:
1443 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1444 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1445 params
->result_size
= SHA512_DIGEST_SIZE
;
1447 case SHA512_DIGEST_SIZE
:
1448 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1449 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1450 params
->result_size
= SHA512_DIGEST_SIZE
;
1453 pr_err("chcr : ERROR, unsupported digest size\n");
1459 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1461 crypto_free_shash(base_hash
);
1465 * create_hash_wr - Create hash work request
1466 * @req - Cipher req base
1468 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1469 struct hash_wr_param
*param
)
1471 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1472 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1473 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
1474 struct sk_buff
*skb
= NULL
;
1475 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
1476 struct chcr_wr
*chcr_req
;
1477 struct ulptx_sgl
*ulptx
;
1478 unsigned int nents
= 0, transhdr_len
;
1479 unsigned int temp
= 0;
1480 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1482 struct adapter
*adap
= padap(h_ctx(tfm
)->dev
);
1485 transhdr_len
= HASH_TRANSHDR_SIZE(param
->kctx_len
);
1486 req_ctx
->hctx_wr
.imm
= (transhdr_len
+ param
->bfr_len
+
1487 param
->sg_len
) <= SGE_MAX_WR_LEN
;
1488 nents
= sg_nents_xlen(req_ctx
->hctx_wr
.srcsg
, param
->sg_len
,
1489 CHCR_SRC_SG_SIZE
, req_ctx
->hctx_wr
.src_ofst
);
1490 nents
+= param
->bfr_len
? 1 : 0;
1491 transhdr_len
+= req_ctx
->hctx_wr
.imm
? roundup(param
->bfr_len
+
1492 param
->sg_len
, 16) : (sgl_len(nents
) * 8);
1493 transhdr_len
= roundup(transhdr_len
, 16);
1495 skb
= alloc_skb(transhdr_len
, flags
);
1497 return ERR_PTR(-ENOMEM
);
1498 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1500 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1501 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm
)->dev
->rx_channel_id
, 2, 0);
1502 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1504 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1505 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1506 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1507 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1508 chcr_req
->sec_cpl
.seqno_numivs
=
1509 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1510 param
->opad_needed
, 0);
1512 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1513 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1515 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1516 param
->alg_prm
.result_size
);
1518 if (param
->opad_needed
)
1519 memcpy(chcr_req
->key_ctx
.key
+
1520 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1521 CHCR_HASH_MAX_DIGEST_SIZE
),
1522 hmacctx
->opad
, param
->alg_prm
.result_size
);
1524 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1525 param
->alg_prm
.mk_size
, 0,
1528 sizeof(chcr_req
->key_ctx
)) >> 4));
1529 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1530 ulptx
= (struct ulptx_sgl
*)((u8
*)(chcr_req
+ 1) + param
->kctx_len
+
1532 if (param
->bfr_len
!= 0) {
1533 req_ctx
->hctx_wr
.dma_addr
=
1534 dma_map_single(&u_ctx
->lldi
.pdev
->dev
, req_ctx
->reqbfr
,
1535 param
->bfr_len
, DMA_TO_DEVICE
);
1536 if (dma_mapping_error(&u_ctx
->lldi
.pdev
->dev
,
1537 req_ctx
->hctx_wr
. dma_addr
)) {
1541 req_ctx
->hctx_wr
.dma_len
= param
->bfr_len
;
1543 req_ctx
->hctx_wr
.dma_addr
= 0;
1545 chcr_add_hash_src_ent(req
, ulptx
, param
);
1546 /* Request upto max wr size */
1547 temp
= param
->kctx_len
+ DUMMY_BYTES
+ (req_ctx
->hctx_wr
.imm
?
1548 (param
->sg_len
+ param
->bfr_len
) : 0);
1549 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1550 create_wreq(h_ctx(tfm
), chcr_req
, &req
->base
, req_ctx
->hctx_wr
.imm
,
1551 param
->hash_size
, transhdr_len
,
1553 req_ctx
->hctx_wr
.skb
= skb
;
1557 return ERR_PTR(error
);
1560 static int chcr_ahash_update(struct ahash_request
*req
)
1562 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1563 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1564 struct uld_ctx
*u_ctx
= NULL
;
1565 struct sk_buff
*skb
;
1566 u8 remainder
= 0, bs
;
1567 unsigned int nbytes
= req
->nbytes
;
1568 struct hash_wr_param params
;
1569 int error
, isfull
= 0;
1571 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1572 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1573 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1574 h_ctx(rtfm
)->tx_qidx
))) {
1576 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1580 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1581 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1582 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1584 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1585 + req_ctx
->reqlen
, nbytes
, 0);
1586 req_ctx
->reqlen
+= nbytes
;
1589 chcr_init_hctx_per_wr(req_ctx
);
1590 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1593 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1594 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1595 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1596 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1597 if (params
.sg_len
> req
->nbytes
)
1598 params
.sg_len
= req
->nbytes
;
1599 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
) -
1601 params
.opad_needed
= 0;
1604 params
.bfr_len
= req_ctx
->reqlen
;
1606 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1608 params
.hash_size
= params
.alg_prm
.result_size
;
1609 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1610 skb
= create_hash_wr(req
, ¶ms
);
1612 error
= PTR_ERR(skb
);
1616 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1619 swap(req_ctx
->reqbfr
, req_ctx
->skbfr
);
1620 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1621 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1624 req_ctx
->reqlen
= remainder
;
1625 skb
->dev
= u_ctx
->lldi
.ports
[0];
1626 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1629 return isfull
? -EBUSY
: -EINPROGRESS
;
1631 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1635 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1637 memset(bfr_ptr
, 0, bs
);
1640 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1642 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1645 static int chcr_ahash_final(struct ahash_request
*req
)
1647 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1648 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1649 struct hash_wr_param params
;
1650 struct sk_buff
*skb
;
1651 struct uld_ctx
*u_ctx
= NULL
;
1652 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1654 chcr_init_hctx_per_wr(req_ctx
);
1655 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1656 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1657 params
.opad_needed
= 1;
1659 params
.opad_needed
= 0;
1661 req_ctx
->hctx_wr
.isfinal
= 1;
1662 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1663 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1664 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1665 params
.opad_needed
= 1;
1666 params
.kctx_len
*= 2;
1668 params
.opad_needed
= 0;
1671 req_ctx
->hctx_wr
.result
= 1;
1672 params
.bfr_len
= req_ctx
->reqlen
;
1673 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1674 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1675 if (req_ctx
->reqlen
== 0) {
1676 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1680 params
.bfr_len
= bs
;
1683 params
.scmd1
= req_ctx
->data_len
;
1687 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1688 skb
= create_hash_wr(req
, ¶ms
);
1690 return PTR_ERR(skb
);
1691 req_ctx
->reqlen
= 0;
1692 skb
->dev
= u_ctx
->lldi
.ports
[0];
1693 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1695 return -EINPROGRESS
;
1698 static int chcr_ahash_finup(struct ahash_request
*req
)
1700 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1701 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1702 struct uld_ctx
*u_ctx
= NULL
;
1703 struct sk_buff
*skb
;
1704 struct hash_wr_param params
;
1706 int error
, isfull
= 0;
1708 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1709 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1711 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1712 h_ctx(rtfm
)->tx_qidx
))) {
1714 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1717 chcr_init_hctx_per_wr(req_ctx
);
1718 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1722 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1723 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1724 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1725 params
.kctx_len
*= 2;
1726 params
.opad_needed
= 1;
1728 params
.opad_needed
= 0;
1731 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1732 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1733 if (params
.sg_len
< req
->nbytes
) {
1734 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1735 params
.kctx_len
/= 2;
1736 params
.opad_needed
= 0;
1740 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
)
1742 params
.hash_size
= params
.alg_prm
.result_size
;
1747 params
.sg_len
= req
->nbytes
;
1748 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1749 params
.scmd1
= req_ctx
->data_len
+ req_ctx
->reqlen
+
1752 params
.bfr_len
= req_ctx
->reqlen
;
1753 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1754 req_ctx
->hctx_wr
.result
= 1;
1755 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1756 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1757 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1761 params
.bfr_len
= bs
;
1763 skb
= create_hash_wr(req
, ¶ms
);
1765 error
= PTR_ERR(skb
);
1768 req_ctx
->reqlen
= 0;
1769 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1770 skb
->dev
= u_ctx
->lldi
.ports
[0];
1771 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1774 return isfull
? -EBUSY
: -EINPROGRESS
;
1776 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1780 static int chcr_ahash_digest(struct ahash_request
*req
)
1782 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1783 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1784 struct uld_ctx
*u_ctx
= NULL
;
1785 struct sk_buff
*skb
;
1786 struct hash_wr_param params
;
1788 int error
, isfull
= 0;
1791 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1793 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1794 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1795 h_ctx(rtfm
)->tx_qidx
))) {
1797 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1801 chcr_init_hctx_per_wr(req_ctx
);
1802 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1806 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1807 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1808 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1809 params
.kctx_len
*= 2;
1810 params
.opad_needed
= 1;
1812 params
.opad_needed
= 0;
1814 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1815 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1816 if (params
.sg_len
< req
->nbytes
) {
1817 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1818 params
.kctx_len
/= 2;
1819 params
.opad_needed
= 0;
1824 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1825 params
.hash_size
= params
.alg_prm
.result_size
;
1827 params
.sg_len
= req
->nbytes
;
1828 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1831 params
.scmd1
= req
->nbytes
+ req_ctx
->data_len
;
1835 req_ctx
->hctx_wr
.result
= 1;
1836 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1837 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1839 if (req
->nbytes
== 0) {
1840 create_last_hash_block(req_ctx
->reqbfr
, bs
, 0);
1842 params
.bfr_len
= bs
;
1845 skb
= create_hash_wr(req
, ¶ms
);
1847 error
= PTR_ERR(skb
);
1850 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1851 skb
->dev
= u_ctx
->lldi
.ports
[0];
1852 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1854 return isfull
? -EBUSY
: -EINPROGRESS
;
1856 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1860 static int chcr_ahash_continue(struct ahash_request
*req
)
1862 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
1863 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
1864 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1865 struct uld_ctx
*u_ctx
= NULL
;
1866 struct sk_buff
*skb
;
1867 struct hash_wr_param params
;
1871 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1872 u_ctx
= ULD_CTX(h_ctx(rtfm
));
1873 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1874 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1875 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1876 params
.kctx_len
*= 2;
1877 params
.opad_needed
= 1;
1879 params
.opad_needed
= 0;
1881 params
.sg_len
= chcr_hash_ent_in_wr(hctx_wr
->srcsg
, 0,
1882 HASH_SPACE_LEFT(params
.kctx_len
),
1884 if ((params
.sg_len
+ hctx_wr
->processed
) > req
->nbytes
)
1885 params
.sg_len
= req
->nbytes
- hctx_wr
->processed
;
1886 if (!hctx_wr
->result
||
1887 ((params
.sg_len
+ hctx_wr
->processed
) < req
->nbytes
)) {
1888 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1889 params
.kctx_len
/= 2;
1890 params
.opad_needed
= 0;
1894 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1895 params
.hash_size
= params
.alg_prm
.result_size
;
1900 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1901 params
.scmd1
= reqctx
->data_len
+ params
.sg_len
;
1904 reqctx
->data_len
+= params
.sg_len
;
1905 skb
= create_hash_wr(req
, ¶ms
);
1907 error
= PTR_ERR(skb
);
1910 hctx_wr
->processed
+= params
.sg_len
;
1911 skb
->dev
= u_ctx
->lldi
.ports
[0];
1912 set_wr_txq(skb
, CPL_PRIORITY_DATA
, h_ctx(rtfm
)->tx_qidx
);
1919 static inline void chcr_handle_ahash_resp(struct ahash_request
*req
,
1920 unsigned char *input
,
1923 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
1924 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
1925 int digestsize
, updated_digestsize
;
1926 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1927 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
1931 digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
1932 updated_digestsize
= digestsize
;
1933 if (digestsize
== SHA224_DIGEST_SIZE
)
1934 updated_digestsize
= SHA256_DIGEST_SIZE
;
1935 else if (digestsize
== SHA384_DIGEST_SIZE
)
1936 updated_digestsize
= SHA512_DIGEST_SIZE
;
1938 if (hctx_wr
->dma_addr
) {
1939 dma_unmap_single(&u_ctx
->lldi
.pdev
->dev
, hctx_wr
->dma_addr
,
1940 hctx_wr
->dma_len
, DMA_TO_DEVICE
);
1941 hctx_wr
->dma_addr
= 0;
1943 if (hctx_wr
->isfinal
|| ((hctx_wr
->processed
+ reqctx
->reqlen
) ==
1945 if (hctx_wr
->result
== 1) {
1946 hctx_wr
->result
= 0;
1947 memcpy(req
->result
, input
+ sizeof(struct cpl_fw6_pld
),
1950 memcpy(reqctx
->partial_hash
,
1951 input
+ sizeof(struct cpl_fw6_pld
),
1952 updated_digestsize
);
1957 memcpy(reqctx
->partial_hash
, input
+ sizeof(struct cpl_fw6_pld
),
1958 updated_digestsize
);
1960 err
= chcr_ahash_continue(req
);
1965 if (hctx_wr
->is_sg_map
)
1966 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1970 req
->base
.complete(&req
->base
, err
);
1974 * chcr_handle_resp - Unmap the DMA buffers associated with the request
1975 * @req: crypto request
1977 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
1980 struct crypto_tfm
*tfm
= req
->tfm
;
1981 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1982 struct adapter
*adap
= padap(ctx
->dev
);
1984 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
1985 case CRYPTO_ALG_TYPE_AEAD
:
1986 chcr_handle_aead_resp(aead_request_cast(req
), input
, err
);
1989 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
1990 err
= chcr_handle_cipher_resp(ablkcipher_request_cast(req
),
1994 case CRYPTO_ALG_TYPE_AHASH
:
1995 chcr_handle_ahash_resp(ahash_request_cast(req
), input
, err
);
1997 atomic_inc(&adap
->chcr_stats
.complete
);
2000 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
2002 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2003 struct chcr_ahash_req_ctx
*state
= out
;
2005 state
->reqlen
= req_ctx
->reqlen
;
2006 state
->data_len
= req_ctx
->data_len
;
2007 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
2008 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
2009 CHCR_HASH_MAX_DIGEST_SIZE
);
2010 chcr_init_hctx_per_wr(state
);
2014 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
2016 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2017 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
2019 req_ctx
->reqlen
= state
->reqlen
;
2020 req_ctx
->data_len
= state
->data_len
;
2021 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2022 req_ctx
->skbfr
= req_ctx
->bfr2
;
2023 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
2024 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
2025 CHCR_HASH_MAX_DIGEST_SIZE
);
2026 chcr_init_hctx_per_wr(req_ctx
);
2030 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2031 unsigned int keylen
)
2033 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
2034 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2035 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2036 unsigned int i
, err
= 0, updated_digestsize
;
2038 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
2040 /* use the key to calculate the ipad and opad. ipad will sent with the
2041 * first request's data. opad will be sent with the final hash result
2042 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2044 shash
->tfm
= hmacctx
->base_hash
;
2045 shash
->flags
= crypto_shash_get_flags(hmacctx
->base_hash
);
2047 err
= crypto_shash_digest(shash
, key
, keylen
,
2051 keylen
= digestsize
;
2053 memcpy(hmacctx
->ipad
, key
, keylen
);
2055 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
2056 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
2058 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
2059 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
2060 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
2063 updated_digestsize
= digestsize
;
2064 if (digestsize
== SHA224_DIGEST_SIZE
)
2065 updated_digestsize
= SHA256_DIGEST_SIZE
;
2066 else if (digestsize
== SHA384_DIGEST_SIZE
)
2067 updated_digestsize
= SHA512_DIGEST_SIZE
;
2068 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
2069 hmacctx
->ipad
, digestsize
);
2072 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
2074 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
2075 hmacctx
->opad
, digestsize
);
2078 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
2083 static int chcr_aes_xts_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
2084 unsigned int key_len
)
2086 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
2087 unsigned short context_size
= 0;
2090 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
2094 memcpy(ablkctx
->key
, key
, key_len
);
2095 ablkctx
->enckey_len
= key_len
;
2096 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
2097 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
2098 ablkctx
->key_ctx_hdr
=
2099 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
2100 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
2101 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
2102 CHCR_KEYCTX_NO_KEY
, 1,
2104 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
2107 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2108 ablkctx
->enckey_len
= 0;
2113 static int chcr_sha_init(struct ahash_request
*areq
)
2115 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2116 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2117 int digestsize
= crypto_ahash_digestsize(tfm
);
2119 req_ctx
->data_len
= 0;
2120 req_ctx
->reqlen
= 0;
2121 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2122 req_ctx
->skbfr
= req_ctx
->bfr2
;
2123 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
2128 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
2130 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2131 sizeof(struct chcr_ahash_req_ctx
));
2132 return chcr_device_init(crypto_tfm_ctx(tfm
));
2135 static int chcr_hmac_init(struct ahash_request
*areq
)
2137 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2138 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
2139 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(rtfm
));
2140 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
2141 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2143 chcr_sha_init(areq
);
2144 req_ctx
->data_len
= bs
;
2145 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2146 if (digestsize
== SHA224_DIGEST_SIZE
)
2147 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2148 SHA256_DIGEST_SIZE
);
2149 else if (digestsize
== SHA384_DIGEST_SIZE
)
2150 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2151 SHA512_DIGEST_SIZE
);
2153 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2159 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
2161 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2162 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2163 unsigned int digestsize
=
2164 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
2166 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2167 sizeof(struct chcr_ahash_req_ctx
));
2168 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
2169 if (IS_ERR(hmacctx
->base_hash
))
2170 return PTR_ERR(hmacctx
->base_hash
);
2171 return chcr_device_init(crypto_tfm_ctx(tfm
));
2174 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
2176 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2177 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2179 if (hmacctx
->base_hash
) {
2180 chcr_free_shash(hmacctx
->base_hash
);
2181 hmacctx
->base_hash
= NULL
;
2185 inline void chcr_aead_common_exit(struct aead_request
*req
)
2187 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2188 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2189 struct uld_ctx
*u_ctx
= ULD_CTX(a_ctx(tfm
));
2191 chcr_aead_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
, reqctx
->op
);
2194 static int chcr_aead_common_init(struct aead_request
*req
)
2196 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2197 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2198 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2199 unsigned int authsize
= crypto_aead_authsize(tfm
);
2200 int error
= -EINVAL
;
2202 /* validate key size */
2203 if (aeadctx
->enckey_len
== 0)
2205 if (reqctx
->op
&& req
->cryptlen
< authsize
)
2208 reqctx
->scratch_pad
= reqctx
->iv
+ IV
;
2210 reqctx
->scratch_pad
= NULL
;
2212 error
= chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm
))->lldi
.pdev
->dev
, req
,
2218 reqctx
->aad_nents
= sg_nents_xlen(req
->src
, req
->assoclen
,
2219 CHCR_SRC_SG_SIZE
, 0);
2220 reqctx
->src_nents
= sg_nents_xlen(req
->src
, req
->cryptlen
,
2221 CHCR_SRC_SG_SIZE
, req
->assoclen
);
2227 static int chcr_aead_need_fallback(struct aead_request
*req
, int dst_nents
,
2228 int aadmax
, int wrlen
,
2229 unsigned short op_type
)
2231 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
2233 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
2234 dst_nents
> MAX_DSGL_ENT
||
2235 (req
->assoclen
> aadmax
) ||
2236 (wrlen
> SGE_MAX_WR_LEN
))
2241 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
2243 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2244 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2245 struct aead_request
*subreq
= aead_request_ctx(req
);
2247 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
2248 aead_request_set_callback(subreq
, req
->base
.flags
,
2249 req
->base
.complete
, req
->base
.data
);
2250 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
2252 aead_request_set_ad(subreq
, req
->assoclen
);
2253 return op_type
? crypto_aead_decrypt(subreq
) :
2254 crypto_aead_encrypt(subreq
);
2257 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
2261 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2262 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2263 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2264 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2265 struct sk_buff
*skb
= NULL
;
2266 struct chcr_wr
*chcr_req
;
2267 struct cpl_rx_phys_dsgl
*phys_cpl
;
2268 struct ulptx_sgl
*ulptx
;
2269 unsigned int transhdr_len
;
2270 unsigned int dst_size
= 0, temp
, subtype
= get_aead_subtype(tfm
);
2271 unsigned int kctx_len
= 0, dnents
;
2272 unsigned int assoclen
= req
->assoclen
;
2273 unsigned int authsize
= crypto_aead_authsize(tfm
);
2274 int error
= -EINVAL
;
2276 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2278 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2280 if (req
->cryptlen
== 0)
2284 error
= chcr_aead_common_init(req
);
2286 return ERR_PTR(error
);
2288 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
||
2289 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2292 reqctx
->aad_nents
= 0;
2294 dnents
= sg_nents_xlen(req
->dst
, assoclen
, CHCR_DST_SG_SIZE
, 0);
2295 dnents
+= sg_nents_xlen(req
->dst
, req
->cryptlen
+
2296 (reqctx
->op
? -authsize
: authsize
), CHCR_DST_SG_SIZE
,
2298 dnents
+= MIN_AUTH_SG
; // For IV
2300 dst_size
= get_space_for_phys_dsgl(dnents
);
2301 kctx_len
= (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx
->key_ctx_hdr
)) << 4)
2302 - sizeof(chcr_req
->key_ctx
);
2303 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2304 reqctx
->imm
= (transhdr_len
+ assoclen
+ IV
+ req
->cryptlen
) <
2306 temp
= reqctx
->imm
? roundup(assoclen
+ IV
+ req
->cryptlen
, 16)
2307 : (sgl_len(reqctx
->src_nents
+ reqctx
->aad_nents
2309 transhdr_len
+= temp
;
2310 transhdr_len
= roundup(transhdr_len
, 16);
2312 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2313 transhdr_len
, reqctx
->op
)) {
2314 atomic_inc(&adap
->chcr_stats
.fallback
);
2315 chcr_aead_common_exit(req
);
2316 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2318 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
2324 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2326 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2329 * Input order is AAD,IV and Payload. where IV should be included as
2330 * the part of authdata. All other fields should be filled according
2331 * to the hardware spec
2333 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2334 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm
)->dev
->rx_channel_id
, 2,
2336 chcr_req
->sec_cpl
.pldlen
= htonl(assoclen
+ IV
+ req
->cryptlen
);
2337 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2338 assoclen
? 1 : 0, assoclen
,
2340 (temp
& 0x1F0) >> 4);
2341 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2343 null
? 0 : assoclen
+ IV
+ 1,
2345 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
||
2346 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
)
2347 temp
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
2349 temp
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
2350 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
,
2351 (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2353 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2355 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2358 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2359 if (reqctx
->op
== CHCR_ENCRYPT_OP
||
2360 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2361 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
)
2362 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2363 aeadctx
->enckey_len
);
2365 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2366 aeadctx
->enckey_len
);
2368 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2369 actx
->h_iopad
, kctx_len
- roundup(aeadctx
->enckey_len
, 16));
2370 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2371 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2372 memcpy(reqctx
->iv
, aeadctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
2373 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
2374 CTR_RFC3686_IV_SIZE
);
2375 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
2376 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
2378 memcpy(reqctx
->iv
, req
->iv
, IV
);
2380 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2381 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
2382 chcr_add_aead_dst_ent(req
, phys_cpl
, assoclen
, qid
);
2383 chcr_add_aead_src_ent(req
, ulptx
, assoclen
);
2384 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2385 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+
2386 kctx_len
+ (reqctx
->imm
? (assoclen
+ IV
+ req
->cryptlen
) : 0);
2387 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
2388 transhdr_len
, temp
, 0);
2393 chcr_aead_common_exit(req
);
2395 return ERR_PTR(error
);
2398 int chcr_aead_dma_map(struct device
*dev
,
2399 struct aead_request
*req
,
2400 unsigned short op_type
)
2403 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2404 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2405 unsigned int authsize
= crypto_aead_authsize(tfm
);
2408 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2409 -authsize
: authsize
);
2410 if (!req
->cryptlen
|| !dst_size
)
2412 reqctx
->iv_dma
= dma_map_single(dev
, reqctx
->iv
, (IV
+ reqctx
->b0_len
),
2414 if (dma_mapping_error(dev
, reqctx
->iv_dma
))
2417 reqctx
->b0_dma
= reqctx
->iv_dma
+ IV
;
2420 if (req
->src
== req
->dst
) {
2421 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2426 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2430 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2433 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2441 dma_unmap_single(dev
, reqctx
->iv_dma
, IV
, DMA_BIDIRECTIONAL
);
2445 void chcr_aead_dma_unmap(struct device
*dev
,
2446 struct aead_request
*req
,
2447 unsigned short op_type
)
2449 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2450 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2451 unsigned int authsize
= crypto_aead_authsize(tfm
);
2454 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2455 -authsize
: authsize
);
2456 if (!req
->cryptlen
|| !dst_size
)
2459 dma_unmap_single(dev
, reqctx
->iv_dma
, (IV
+ reqctx
->b0_len
),
2461 if (req
->src
== req
->dst
) {
2462 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2465 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2467 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2472 void chcr_add_aead_src_ent(struct aead_request
*req
,
2473 struct ulptx_sgl
*ulptx
,
2474 unsigned int assoclen
)
2476 struct ulptx_walk ulp_walk
;
2477 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2480 u8
*buf
= (u8
*)ulptx
;
2482 if (reqctx
->b0_len
) {
2483 memcpy(buf
, reqctx
->scratch_pad
, reqctx
->b0_len
);
2484 buf
+= reqctx
->b0_len
;
2486 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2489 memcpy(buf
, reqctx
->iv
, IV
);
2491 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2492 buf
, req
->cryptlen
, req
->assoclen
);
2494 ulptx_walk_init(&ulp_walk
, ulptx
);
2496 ulptx_walk_add_page(&ulp_walk
, reqctx
->b0_len
,
2498 ulptx_walk_add_sg(&ulp_walk
, req
->src
, assoclen
, 0);
2499 ulptx_walk_add_page(&ulp_walk
, IV
, &reqctx
->iv_dma
);
2500 ulptx_walk_add_sg(&ulp_walk
, req
->src
, req
->cryptlen
,
2502 ulptx_walk_end(&ulp_walk
);
2506 void chcr_add_aead_dst_ent(struct aead_request
*req
,
2507 struct cpl_rx_phys_dsgl
*phys_cpl
,
2508 unsigned int assoclen
,
2511 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2512 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2513 struct dsgl_walk dsgl_walk
;
2514 unsigned int authsize
= crypto_aead_authsize(tfm
);
2515 struct chcr_context
*ctx
= a_ctx(tfm
);
2518 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2520 dsgl_walk_add_page(&dsgl_walk
, reqctx
->b0_len
, &reqctx
->b0_dma
);
2521 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, assoclen
, 0);
2522 dsgl_walk_add_page(&dsgl_walk
, IV
, &reqctx
->iv_dma
);
2523 temp
= req
->cryptlen
+ (reqctx
->op
? -authsize
: authsize
);
2524 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, temp
, req
->assoclen
);
2525 dsgl_walk_end(&dsgl_walk
, qid
, ctx
->pci_chan_id
);
2528 void chcr_add_cipher_src_ent(struct ablkcipher_request
*req
,
2530 struct cipher_wr_param
*wrparam
)
2532 struct ulptx_walk ulp_walk
;
2533 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2536 memcpy(buf
, reqctx
->iv
, IV
);
2539 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2540 buf
, wrparam
->bytes
, reqctx
->processed
);
2542 ulptx_walk_init(&ulp_walk
, (struct ulptx_sgl
*)buf
);
2543 ulptx_walk_add_sg(&ulp_walk
, reqctx
->srcsg
, wrparam
->bytes
,
2545 reqctx
->srcsg
= ulp_walk
.last_sg
;
2546 reqctx
->src_ofst
= ulp_walk
.last_sg_len
;
2547 ulptx_walk_end(&ulp_walk
);
2551 void chcr_add_cipher_dst_ent(struct ablkcipher_request
*req
,
2552 struct cpl_rx_phys_dsgl
*phys_cpl
,
2553 struct cipher_wr_param
*wrparam
,
2556 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
2557 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
2558 struct chcr_context
*ctx
= c_ctx(tfm
);
2559 struct dsgl_walk dsgl_walk
;
2561 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2562 dsgl_walk_add_sg(&dsgl_walk
, reqctx
->dstsg
, wrparam
->bytes
,
2564 reqctx
->dstsg
= dsgl_walk
.last_sg
;
2565 reqctx
->dst_ofst
= dsgl_walk
.last_sg_len
;
2567 dsgl_walk_end(&dsgl_walk
, qid
, ctx
->pci_chan_id
);
2570 void chcr_add_hash_src_ent(struct ahash_request
*req
,
2571 struct ulptx_sgl
*ulptx
,
2572 struct hash_wr_param
*param
)
2574 struct ulptx_walk ulp_walk
;
2575 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2577 if (reqctx
->hctx_wr
.imm
) {
2578 u8
*buf
= (u8
*)ulptx
;
2580 if (param
->bfr_len
) {
2581 memcpy(buf
, reqctx
->reqbfr
, param
->bfr_len
);
2582 buf
+= param
->bfr_len
;
2585 sg_pcopy_to_buffer(reqctx
->hctx_wr
.srcsg
,
2586 sg_nents(reqctx
->hctx_wr
.srcsg
), buf
,
2589 ulptx_walk_init(&ulp_walk
, ulptx
);
2591 ulptx_walk_add_page(&ulp_walk
, param
->bfr_len
,
2592 &reqctx
->hctx_wr
.dma_addr
);
2593 ulptx_walk_add_sg(&ulp_walk
, reqctx
->hctx_wr
.srcsg
,
2594 param
->sg_len
, reqctx
->hctx_wr
.src_ofst
);
2595 reqctx
->hctx_wr
.srcsg
= ulp_walk
.last_sg
;
2596 reqctx
->hctx_wr
.src_ofst
= ulp_walk
.last_sg_len
;
2597 ulptx_walk_end(&ulp_walk
);
2601 int chcr_hash_dma_map(struct device
*dev
,
2602 struct ahash_request
*req
)
2604 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2609 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2613 req_ctx
->hctx_wr
.is_sg_map
= 1;
2617 void chcr_hash_dma_unmap(struct device
*dev
,
2618 struct ahash_request
*req
)
2620 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2625 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2627 req_ctx
->hctx_wr
.is_sg_map
= 0;
2631 int chcr_cipher_dma_map(struct device
*dev
,
2632 struct ablkcipher_request
*req
)
2636 if (req
->src
== req
->dst
) {
2637 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2642 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2646 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2649 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2660 void chcr_cipher_dma_unmap(struct device
*dev
,
2661 struct ablkcipher_request
*req
)
2663 if (req
->src
== req
->dst
) {
2664 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2667 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2669 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2674 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2678 memset(block
, 0, csize
);
2683 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2686 data
= cpu_to_be32(msglen
);
2687 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2692 static void generate_b0(struct aead_request
*req
,
2693 struct chcr_aead_ctx
*aeadctx
,
2694 unsigned short op_type
)
2696 unsigned int l
, lp
, m
;
2698 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2699 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2700 u8
*b0
= reqctx
->scratch_pad
;
2702 m
= crypto_aead_authsize(aead
);
2704 memcpy(b0
, reqctx
->iv
, 16);
2709 /* set m, bits 3-5 */
2710 *b0
|= (8 * ((m
- 2) / 2));
2712 /* set adata, bit 6, if associated data is used */
2715 rc
= set_msg_len(b0
+ 16 - l
,
2716 (op_type
== CHCR_DECRYPT_OP
) ?
2717 req
->cryptlen
- m
: req
->cryptlen
, l
);
2720 static inline int crypto_ccm_check_iv(const u8
*iv
)
2722 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2723 if (iv
[0] < 1 || iv
[0] > 7)
2729 static int ccm_format_packet(struct aead_request
*req
,
2730 struct chcr_aead_ctx
*aeadctx
,
2731 unsigned int sub_type
,
2732 unsigned short op_type
,
2733 unsigned int assoclen
)
2735 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2738 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2740 memcpy(reqctx
->iv
+ 1, &aeadctx
->salt
[0], 3);
2741 memcpy(reqctx
->iv
+ 4, req
->iv
, 8);
2742 memset(reqctx
->iv
+ 12, 0, 4);
2744 memcpy(reqctx
->iv
, req
->iv
, 16);
2747 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2750 generate_b0(req
, aeadctx
, op_type
);
2751 /* zero the ctr value */
2752 memset(reqctx
->iv
+ 15 - reqctx
->iv
[0], 0, reqctx
->iv
[0] + 1);
2756 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2757 unsigned int dst_size
,
2758 struct aead_request
*req
,
2759 unsigned short op_type
)
2761 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2762 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2763 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2764 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2765 unsigned int c_id
= a_ctx(tfm
)->dev
->rx_channel_id
;
2766 unsigned int ccm_xtra
;
2767 unsigned int tag_offset
= 0, auth_offset
= 0;
2768 unsigned int assoclen
;
2770 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2771 assoclen
= req
->assoclen
- 8;
2773 assoclen
= req
->assoclen
;
2774 ccm_xtra
= CCM_B0_SIZE
+
2775 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2777 auth_offset
= req
->cryptlen
?
2778 (assoclen
+ IV
+ 1 + ccm_xtra
) : 0;
2779 if (op_type
== CHCR_DECRYPT_OP
) {
2780 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2781 tag_offset
= crypto_aead_authsize(tfm
);
2787 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(c_id
,
2788 2, assoclen
+ 1 + ccm_xtra
);
2790 htonl(assoclen
+ IV
+ req
->cryptlen
+ ccm_xtra
);
2791 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2792 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2793 1, assoclen
+ ccm_xtra
, assoclen
2794 + IV
+ 1 + ccm_xtra
, 0);
2796 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2797 auth_offset
, tag_offset
,
2798 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2799 crypto_aead_authsize(tfm
));
2800 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2801 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2802 cipher_mode
, mac_mode
,
2803 aeadctx
->hmac_ctrl
, IV
>> 1);
2805 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2809 static int aead_ccm_validate_input(unsigned short op_type
,
2810 struct aead_request
*req
,
2811 struct chcr_aead_ctx
*aeadctx
,
2812 unsigned int sub_type
)
2814 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2815 if (crypto_ccm_check_iv(req
->iv
)) {
2816 pr_err("CCM: IV check fails\n");
2820 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
2821 pr_err("RFC4309: Invalid AAD length %d\n",
2829 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
2833 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2834 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2835 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2836 struct sk_buff
*skb
= NULL
;
2837 struct chcr_wr
*chcr_req
;
2838 struct cpl_rx_phys_dsgl
*phys_cpl
;
2839 struct ulptx_sgl
*ulptx
;
2840 unsigned int transhdr_len
;
2841 unsigned int dst_size
= 0, kctx_len
, dnents
, temp
;
2842 unsigned int sub_type
, assoclen
= req
->assoclen
;
2843 unsigned int authsize
= crypto_aead_authsize(tfm
);
2844 int error
= -EINVAL
;
2845 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2847 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2849 sub_type
= get_aead_subtype(tfm
);
2850 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2852 reqctx
->b0_len
= CCM_B0_SIZE
+ (assoclen
? CCM_AAD_FIELD_SIZE
: 0);
2853 error
= chcr_aead_common_init(req
);
2855 return ERR_PTR(error
);
2857 error
= aead_ccm_validate_input(reqctx
->op
, req
, aeadctx
, sub_type
);
2860 dnents
= sg_nents_xlen(req
->dst
, assoclen
, CHCR_DST_SG_SIZE
, 0);
2861 dnents
+= sg_nents_xlen(req
->dst
, req
->cryptlen
2862 + (reqctx
->op
? -authsize
: authsize
),
2863 CHCR_DST_SG_SIZE
, req
->assoclen
);
2864 dnents
+= MIN_CCM_SG
; // For IV and B0
2865 dst_size
= get_space_for_phys_dsgl(dnents
);
2866 kctx_len
= roundup(aeadctx
->enckey_len
, 16) * 2;
2867 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2868 reqctx
->imm
= (transhdr_len
+ assoclen
+ IV
+ req
->cryptlen
+
2869 reqctx
->b0_len
) <= SGE_MAX_WR_LEN
;
2870 temp
= reqctx
->imm
? roundup(assoclen
+ IV
+ req
->cryptlen
+
2871 reqctx
->b0_len
, 16) :
2872 (sgl_len(reqctx
->src_nents
+ reqctx
->aad_nents
+
2874 transhdr_len
+= temp
;
2875 transhdr_len
= roundup(transhdr_len
, 16);
2877 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
-
2878 reqctx
->b0_len
, transhdr_len
, reqctx
->op
)) {
2879 atomic_inc(&adap
->chcr_stats
.fallback
);
2880 chcr_aead_common_exit(req
);
2881 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2883 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
2890 chcr_req
= (struct chcr_wr
*) __skb_put_zero(skb
, transhdr_len
);
2892 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, reqctx
->op
);
2894 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2895 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2896 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2897 aeadctx
->key
, aeadctx
->enckey_len
);
2899 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2900 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
2901 error
= ccm_format_packet(req
, aeadctx
, sub_type
, reqctx
->op
, assoclen
);
2904 chcr_add_aead_dst_ent(req
, phys_cpl
, assoclen
, qid
);
2905 chcr_add_aead_src_ent(req
, ulptx
, assoclen
);
2907 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2908 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+
2909 kctx_len
+ (reqctx
->imm
? (assoclen
+ IV
+ req
->cryptlen
+
2910 reqctx
->b0_len
) : 0);
2911 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, 0,
2912 transhdr_len
, temp
, 0);
2919 chcr_aead_common_exit(req
);
2920 return ERR_PTR(error
);
2923 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
2927 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2928 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2929 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2930 struct sk_buff
*skb
= NULL
;
2931 struct chcr_wr
*chcr_req
;
2932 struct cpl_rx_phys_dsgl
*phys_cpl
;
2933 struct ulptx_sgl
*ulptx
;
2934 unsigned int transhdr_len
, dnents
= 0;
2935 unsigned int dst_size
= 0, temp
= 0, kctx_len
, assoclen
= req
->assoclen
;
2936 unsigned int authsize
= crypto_aead_authsize(tfm
);
2937 int error
= -EINVAL
;
2938 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2940 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
2942 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
2943 assoclen
= req
->assoclen
- 8;
2946 error
= chcr_aead_common_init(req
);
2948 return ERR_PTR(error
);
2949 dnents
= sg_nents_xlen(req
->dst
, assoclen
, CHCR_DST_SG_SIZE
, 0);
2950 dnents
+= sg_nents_xlen(req
->dst
, req
->cryptlen
+
2951 (reqctx
->op
? -authsize
: authsize
),
2952 CHCR_DST_SG_SIZE
, req
->assoclen
);
2953 dnents
+= MIN_GCM_SG
; // For IV
2954 dst_size
= get_space_for_phys_dsgl(dnents
);
2955 kctx_len
= roundup(aeadctx
->enckey_len
, 16) + AEAD_H_SIZE
;
2956 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2957 reqctx
->imm
= (transhdr_len
+ assoclen
+ IV
+ req
->cryptlen
) <=
2959 temp
= reqctx
->imm
? roundup(assoclen
+ IV
+ req
->cryptlen
, 16) :
2960 (sgl_len(reqctx
->src_nents
+
2961 reqctx
->aad_nents
+ MIN_GCM_SG
) * 8);
2962 transhdr_len
+= temp
;
2963 transhdr_len
= roundup(transhdr_len
, 16);
2964 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2965 transhdr_len
, reqctx
->op
)) {
2967 atomic_inc(&adap
->chcr_stats
.fallback
);
2968 chcr_aead_common_exit(req
);
2969 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2971 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
2977 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2979 //Offset of tag from end
2980 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2981 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
2982 a_ctx(tfm
)->dev
->rx_channel_id
, 2,
2984 chcr_req
->sec_cpl
.pldlen
=
2985 htonl(assoclen
+ IV
+ req
->cryptlen
);
2986 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2987 assoclen
? 1 : 0, assoclen
,
2988 assoclen
+ IV
+ 1, 0);
2989 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
2990 FILL_SEC_CPL_AUTHINSERT(0, assoclen
+ IV
+ 1,
2992 chcr_req
->sec_cpl
.seqno_numivs
=
2993 FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, (reqctx
->op
==
2994 CHCR_ENCRYPT_OP
) ? 1 : 0,
2995 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
2996 CHCR_SCMD_AUTH_MODE_GHASH
,
2997 aeadctx
->hmac_ctrl
, IV
>> 1);
2998 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3000 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3001 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3002 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3003 GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
3005 /* prepare a 16 byte iv */
3006 /* S A L T | IV | 0x00000001 */
3007 if (get_aead_subtype(tfm
) ==
3008 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
3009 memcpy(reqctx
->iv
, aeadctx
->salt
, 4);
3010 memcpy(reqctx
->iv
+ 4, req
->iv
, GCM_RFC4106_IV_SIZE
);
3012 memcpy(reqctx
->iv
, req
->iv
, GCM_AES_IV_SIZE
);
3014 *((unsigned int *)(reqctx
->iv
+ 12)) = htonl(0x01);
3016 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3017 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
3019 chcr_add_aead_dst_ent(req
, phys_cpl
, assoclen
, qid
);
3020 chcr_add_aead_src_ent(req
, ulptx
, assoclen
);
3021 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3022 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+
3023 kctx_len
+ (reqctx
->imm
? (assoclen
+ IV
+ req
->cryptlen
) : 0);
3024 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
3025 transhdr_len
, temp
, reqctx
->verify
);
3030 chcr_aead_common_exit(req
);
3031 return ERR_PTR(error
);
3036 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
3038 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3039 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3041 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
3042 CRYPTO_ALG_NEED_FALLBACK
|
3044 if (IS_ERR(aeadctx
->sw_cipher
))
3045 return PTR_ERR(aeadctx
->sw_cipher
);
3046 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
3047 sizeof(struct aead_request
) +
3048 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
3049 return chcr_device_init(a_ctx(tfm
));
3052 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
3054 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3056 crypto_free_aead(aeadctx
->sw_cipher
);
3059 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
3060 unsigned int authsize
)
3062 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3064 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
3065 aeadctx
->mayverify
= VERIFY_HW
;
3066 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3068 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
3069 unsigned int authsize
)
3071 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3072 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
3074 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3075 * true for sha1. authsize == 12 condition should be before
3076 * authsize == (maxauth >> 1)
3078 if (authsize
== ICV_4
) {
3079 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3080 aeadctx
->mayverify
= VERIFY_HW
;
3081 } else if (authsize
== ICV_6
) {
3082 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3083 aeadctx
->mayverify
= VERIFY_HW
;
3084 } else if (authsize
== ICV_10
) {
3085 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3086 aeadctx
->mayverify
= VERIFY_HW
;
3087 } else if (authsize
== ICV_12
) {
3088 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3089 aeadctx
->mayverify
= VERIFY_HW
;
3090 } else if (authsize
== ICV_14
) {
3091 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3092 aeadctx
->mayverify
= VERIFY_HW
;
3093 } else if (authsize
== (maxauth
>> 1)) {
3094 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3095 aeadctx
->mayverify
= VERIFY_HW
;
3096 } else if (authsize
== maxauth
) {
3097 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3098 aeadctx
->mayverify
= VERIFY_HW
;
3100 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3101 aeadctx
->mayverify
= VERIFY_SW
;
3103 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3107 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
3109 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3113 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3114 aeadctx
->mayverify
= VERIFY_HW
;
3117 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3118 aeadctx
->mayverify
= VERIFY_HW
;
3121 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3122 aeadctx
->mayverify
= VERIFY_HW
;
3125 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3126 aeadctx
->mayverify
= VERIFY_HW
;
3129 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3130 aeadctx
->mayverify
= VERIFY_HW
;
3134 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3135 aeadctx
->mayverify
= VERIFY_SW
;
3140 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3143 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
3144 unsigned int authsize
)
3146 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3150 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3151 aeadctx
->mayverify
= VERIFY_HW
;
3154 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3155 aeadctx
->mayverify
= VERIFY_HW
;
3158 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3159 aeadctx
->mayverify
= VERIFY_HW
;
3164 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3167 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
3168 unsigned int authsize
)
3170 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3174 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3175 aeadctx
->mayverify
= VERIFY_HW
;
3178 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3179 aeadctx
->mayverify
= VERIFY_HW
;
3182 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3183 aeadctx
->mayverify
= VERIFY_HW
;
3186 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3187 aeadctx
->mayverify
= VERIFY_HW
;
3190 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3191 aeadctx
->mayverify
= VERIFY_HW
;
3194 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3195 aeadctx
->mayverify
= VERIFY_HW
;
3198 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3199 aeadctx
->mayverify
= VERIFY_HW
;
3204 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3207 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
3209 unsigned int keylen
)
3211 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3212 unsigned char ck_size
, mk_size
;
3213 int key_ctx_size
= 0;
3215 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) * 2;
3216 if (keylen
== AES_KEYSIZE_128
) {
3217 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3218 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
3219 } else if (keylen
== AES_KEYSIZE_192
) {
3220 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3221 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
3222 } else if (keylen
== AES_KEYSIZE_256
) {
3223 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3224 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
3226 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3227 aeadctx
->enckey_len
= 0;
3230 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
3232 memcpy(aeadctx
->key
, key
, keylen
);
3233 aeadctx
->enckey_len
= keylen
;
3238 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
3240 unsigned int keylen
)
3242 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3245 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3246 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3247 CRYPTO_TFM_REQ_MASK
);
3248 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3249 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3250 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3251 CRYPTO_TFM_RES_MASK
);
3254 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3257 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
3258 unsigned int keylen
)
3260 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3264 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3265 aeadctx
->enckey_len
= 0;
3268 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3269 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3270 CRYPTO_TFM_REQ_MASK
);
3271 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3272 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3273 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3274 CRYPTO_TFM_RES_MASK
);
3278 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
3279 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3282 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
3283 unsigned int keylen
)
3285 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3286 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
3287 struct crypto_cipher
*cipher
;
3288 unsigned int ck_size
;
3289 int ret
= 0, key_ctx_size
= 0;
3291 aeadctx
->enckey_len
= 0;
3292 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3293 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
3294 & CRYPTO_TFM_REQ_MASK
);
3295 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3296 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
3297 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
3298 CRYPTO_TFM_RES_MASK
);
3302 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3304 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
3305 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
3307 if (keylen
== AES_KEYSIZE_128
) {
3308 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3309 } else if (keylen
== AES_KEYSIZE_192
) {
3310 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3311 } else if (keylen
== AES_KEYSIZE_256
) {
3312 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3314 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3315 pr_err("GCM: Invalid key length %d\n", keylen
);
3320 memcpy(aeadctx
->key
, key
, keylen
);
3321 aeadctx
->enckey_len
= keylen
;
3322 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) +
3324 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
3325 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
3328 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3329 * It will go in key context
3331 cipher
= crypto_alloc_cipher("aes-generic", 0, 0);
3332 if (IS_ERR(cipher
)) {
3333 aeadctx
->enckey_len
= 0;
3338 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
3340 aeadctx
->enckey_len
= 0;
3343 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
3344 crypto_cipher_encrypt_one(cipher
, gctx
->ghash_h
, gctx
->ghash_h
);
3347 crypto_free_cipher(cipher
);
3352 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
3353 unsigned int keylen
)
3355 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3356 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3357 /* it contains auth and cipher key both*/
3358 struct crypto_authenc_keys keys
;
3359 unsigned int bs
, subtype
;
3360 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
3361 int err
= 0, i
, key_ctx_len
= 0;
3362 unsigned char ck_size
= 0;
3363 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
3364 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
3365 struct algo_param param
;
3369 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3370 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3371 & CRYPTO_TFM_REQ_MASK
);
3372 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3373 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3374 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3375 & CRYPTO_TFM_RES_MASK
);
3379 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3380 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3384 if (get_alg_config(¶m
, max_authsize
)) {
3385 pr_err("chcr : Unsupported digest size\n");
3388 subtype
= get_aead_subtype(authenc
);
3389 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3390 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3391 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3393 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3394 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3395 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3397 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3398 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3399 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3400 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3401 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3402 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3404 pr_err("chcr : Unsupported cipher key\n");
3408 /* Copy only encryption key. We use authkey to generate h(ipad) and
3409 * h(opad) so authkey is not needed again. authkeylen size have the
3410 * size of the hash digest size.
3412 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3413 aeadctx
->enckey_len
= keys
.enckeylen
;
3414 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3415 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3417 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3418 aeadctx
->enckey_len
<< 3);
3420 base_hash
= chcr_alloc_shash(max_authsize
);
3421 if (IS_ERR(base_hash
)) {
3422 pr_err("chcr : Base driver cannot be loaded\n");
3423 aeadctx
->enckey_len
= 0;
3424 memzero_explicit(&keys
, sizeof(keys
));
3428 SHASH_DESC_ON_STACK(shash
, base_hash
);
3430 shash
->tfm
= base_hash
;
3431 shash
->flags
= crypto_shash_get_flags(base_hash
);
3432 bs
= crypto_shash_blocksize(base_hash
);
3433 align
= KEYCTX_ALIGN_PAD(max_authsize
);
3434 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
3436 if (keys
.authkeylen
> bs
) {
3437 err
= crypto_shash_digest(shash
, keys
.authkey
,
3441 pr_err("chcr : Base driver cannot be loaded\n");
3444 keys
.authkeylen
= max_authsize
;
3446 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
3448 /* Compute the ipad-digest*/
3449 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3450 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3451 for (i
= 0; i
< bs
>> 2; i
++)
3452 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3454 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3457 /* Compute the opad-digest */
3458 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3459 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3460 for (i
= 0; i
< bs
>> 2; i
++)
3461 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3463 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3466 /* convert the ipad and opad digest to network order */
3467 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3468 chcr_change_order(o_ptr
, param
.result_size
);
3469 key_ctx_len
= sizeof(struct _key_ctx
) +
3470 roundup(keys
.enckeylen
, 16) +
3471 (param
.result_size
+ align
) * 2;
3472 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3473 0, 1, key_ctx_len
>> 4);
3474 actx
->auth_mode
= param
.auth_mode
;
3475 chcr_free_shash(base_hash
);
3477 memzero_explicit(&keys
, sizeof(keys
));
3481 aeadctx
->enckey_len
= 0;
3482 memzero_explicit(&keys
, sizeof(keys
));
3483 if (!IS_ERR(base_hash
))
3484 chcr_free_shash(base_hash
);
3488 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3489 const u8
*key
, unsigned int keylen
)
3491 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3492 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3493 struct crypto_authenc_keys keys
;
3495 /* it contains auth and cipher key both*/
3496 unsigned int subtype
;
3497 int key_ctx_len
= 0;
3498 unsigned char ck_size
= 0;
3500 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3501 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3502 & CRYPTO_TFM_REQ_MASK
);
3503 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3504 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3505 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3506 & CRYPTO_TFM_RES_MASK
);
3510 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3511 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3514 subtype
= get_aead_subtype(authenc
);
3515 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3516 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3517 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3519 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3520 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3521 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3523 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3524 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3525 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3526 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3527 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3528 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3530 pr_err("chcr : Unsupported cipher key %d\n", keys
.enckeylen
);
3533 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3534 aeadctx
->enckey_len
= keys
.enckeylen
;
3535 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3536 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3537 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3538 aeadctx
->enckey_len
<< 3);
3540 key_ctx_len
= sizeof(struct _key_ctx
) + roundup(keys
.enckeylen
, 16);
3542 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3543 0, key_ctx_len
>> 4);
3544 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3545 memzero_explicit(&keys
, sizeof(keys
));
3548 aeadctx
->enckey_len
= 0;
3549 memzero_explicit(&keys
, sizeof(keys
));
3553 static int chcr_aead_op(struct aead_request
*req
,
3555 create_wr_t create_wr_fn
)
3557 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3558 struct uld_ctx
*u_ctx
;
3559 struct sk_buff
*skb
;
3562 if (!a_ctx(tfm
)->dev
) {
3563 pr_err("chcr : %s : No crypto device.\n", __func__
);
3566 u_ctx
= ULD_CTX(a_ctx(tfm
));
3567 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3568 a_ctx(tfm
)->tx_qidx
)) {
3570 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3574 /* Form a WR from req */
3575 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[a_ctx(tfm
)->rx_qidx
], size
);
3577 if (IS_ERR(skb
) || !skb
)
3578 return PTR_ERR(skb
);
3580 skb
->dev
= u_ctx
->lldi
.ports
[0];
3581 set_wr_txq(skb
, CPL_PRIORITY_DATA
, a_ctx(tfm
)->tx_qidx
);
3583 return isfull
? -EBUSY
: -EINPROGRESS
;
3586 static int chcr_aead_encrypt(struct aead_request
*req
)
3588 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3589 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3591 reqctx
->verify
= VERIFY_HW
;
3592 reqctx
->op
= CHCR_ENCRYPT_OP
;
3594 switch (get_aead_subtype(tfm
)) {
3595 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3596 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3597 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3598 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3599 return chcr_aead_op(req
, 0, create_authenc_wr
);
3600 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3601 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3602 return chcr_aead_op(req
, 0, create_aead_ccm_wr
);
3604 return chcr_aead_op(req
, 0, create_gcm_wr
);
3608 static int chcr_aead_decrypt(struct aead_request
*req
)
3610 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3611 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3612 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3615 if (aeadctx
->mayverify
== VERIFY_SW
) {
3616 size
= crypto_aead_maxauthsize(tfm
);
3617 reqctx
->verify
= VERIFY_SW
;
3620 reqctx
->verify
= VERIFY_HW
;
3622 reqctx
->op
= CHCR_DECRYPT_OP
;
3623 switch (get_aead_subtype(tfm
)) {
3624 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3625 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3626 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3627 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3628 return chcr_aead_op(req
, size
, create_authenc_wr
);
3629 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3630 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3631 return chcr_aead_op(req
, size
, create_aead_ccm_wr
);
3633 return chcr_aead_op(req
, size
, create_gcm_wr
);
3637 static struct chcr_alg_template driver_algs
[] = {
3640 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3643 .cra_name
= "cbc(aes)",
3644 .cra_driver_name
= "cbc-aes-chcr",
3645 .cra_blocksize
= AES_BLOCK_SIZE
,
3646 .cra_init
= chcr_cra_init
,
3647 .cra_exit
= chcr_cra_exit
,
3648 .cra_u
.ablkcipher
= {
3649 .min_keysize
= AES_MIN_KEY_SIZE
,
3650 .max_keysize
= AES_MAX_KEY_SIZE
,
3651 .ivsize
= AES_BLOCK_SIZE
,
3652 .setkey
= chcr_aes_cbc_setkey
,
3653 .encrypt
= chcr_aes_encrypt
,
3654 .decrypt
= chcr_aes_decrypt
,
3659 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3662 .cra_name
= "xts(aes)",
3663 .cra_driver_name
= "xts-aes-chcr",
3664 .cra_blocksize
= AES_BLOCK_SIZE
,
3665 .cra_init
= chcr_cra_init
,
3667 .cra_u
.ablkcipher
= {
3668 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3669 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3670 .ivsize
= AES_BLOCK_SIZE
,
3671 .setkey
= chcr_aes_xts_setkey
,
3672 .encrypt
= chcr_aes_encrypt
,
3673 .decrypt
= chcr_aes_decrypt
,
3678 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3681 .cra_name
= "ctr(aes)",
3682 .cra_driver_name
= "ctr-aes-chcr",
3684 .cra_init
= chcr_cra_init
,
3685 .cra_exit
= chcr_cra_exit
,
3686 .cra_u
.ablkcipher
= {
3687 .min_keysize
= AES_MIN_KEY_SIZE
,
3688 .max_keysize
= AES_MAX_KEY_SIZE
,
3689 .ivsize
= AES_BLOCK_SIZE
,
3690 .setkey
= chcr_aes_ctr_setkey
,
3691 .encrypt
= chcr_aes_encrypt
,
3692 .decrypt
= chcr_aes_decrypt
,
3697 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
3698 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3701 .cra_name
= "rfc3686(ctr(aes))",
3702 .cra_driver_name
= "rfc3686-ctr-aes-chcr",
3704 .cra_init
= chcr_rfc3686_init
,
3705 .cra_exit
= chcr_cra_exit
,
3706 .cra_u
.ablkcipher
= {
3707 .min_keysize
= AES_MIN_KEY_SIZE
+
3708 CTR_RFC3686_NONCE_SIZE
,
3709 .max_keysize
= AES_MAX_KEY_SIZE
+
3710 CTR_RFC3686_NONCE_SIZE
,
3711 .ivsize
= CTR_RFC3686_IV_SIZE
,
3712 .setkey
= chcr_aes_rfc3686_setkey
,
3713 .encrypt
= chcr_aes_encrypt
,
3714 .decrypt
= chcr_aes_decrypt
,
3721 .type
= CRYPTO_ALG_TYPE_AHASH
,
3724 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3727 .cra_driver_name
= "sha1-chcr",
3728 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3733 .type
= CRYPTO_ALG_TYPE_AHASH
,
3736 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3738 .cra_name
= "sha256",
3739 .cra_driver_name
= "sha256-chcr",
3740 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3745 .type
= CRYPTO_ALG_TYPE_AHASH
,
3748 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3750 .cra_name
= "sha224",
3751 .cra_driver_name
= "sha224-chcr",
3752 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3757 .type
= CRYPTO_ALG_TYPE_AHASH
,
3760 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3762 .cra_name
= "sha384",
3763 .cra_driver_name
= "sha384-chcr",
3764 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3769 .type
= CRYPTO_ALG_TYPE_AHASH
,
3772 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3774 .cra_name
= "sha512",
3775 .cra_driver_name
= "sha512-chcr",
3776 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3782 .type
= CRYPTO_ALG_TYPE_HMAC
,
3785 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3787 .cra_name
= "hmac(sha1)",
3788 .cra_driver_name
= "hmac-sha1-chcr",
3789 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3794 .type
= CRYPTO_ALG_TYPE_HMAC
,
3797 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3799 .cra_name
= "hmac(sha224)",
3800 .cra_driver_name
= "hmac-sha224-chcr",
3801 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3806 .type
= CRYPTO_ALG_TYPE_HMAC
,
3809 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3811 .cra_name
= "hmac(sha256)",
3812 .cra_driver_name
= "hmac-sha256-chcr",
3813 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3818 .type
= CRYPTO_ALG_TYPE_HMAC
,
3821 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3823 .cra_name
= "hmac(sha384)",
3824 .cra_driver_name
= "hmac-sha384-chcr",
3825 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3830 .type
= CRYPTO_ALG_TYPE_HMAC
,
3833 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3835 .cra_name
= "hmac(sha512)",
3836 .cra_driver_name
= "hmac-sha512-chcr",
3837 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3841 /* Add AEAD Algorithms */
3843 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
3847 .cra_name
= "gcm(aes)",
3848 .cra_driver_name
= "gcm-aes-chcr",
3850 .cra_priority
= CHCR_AEAD_PRIORITY
,
3851 .cra_ctxsize
= sizeof(struct chcr_context
) +
3852 sizeof(struct chcr_aead_ctx
) +
3853 sizeof(struct chcr_gcm_ctx
),
3855 .ivsize
= GCM_AES_IV_SIZE
,
3856 .maxauthsize
= GHASH_DIGEST_SIZE
,
3857 .setkey
= chcr_gcm_setkey
,
3858 .setauthsize
= chcr_gcm_setauthsize
,
3862 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
3866 .cra_name
= "rfc4106(gcm(aes))",
3867 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
3869 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3870 .cra_ctxsize
= sizeof(struct chcr_context
) +
3871 sizeof(struct chcr_aead_ctx
) +
3872 sizeof(struct chcr_gcm_ctx
),
3875 .ivsize
= GCM_RFC4106_IV_SIZE
,
3876 .maxauthsize
= GHASH_DIGEST_SIZE
,
3877 .setkey
= chcr_gcm_setkey
,
3878 .setauthsize
= chcr_4106_4309_setauthsize
,
3882 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
3886 .cra_name
= "ccm(aes)",
3887 .cra_driver_name
= "ccm-aes-chcr",
3889 .cra_priority
= CHCR_AEAD_PRIORITY
,
3890 .cra_ctxsize
= sizeof(struct chcr_context
) +
3891 sizeof(struct chcr_aead_ctx
),
3894 .ivsize
= AES_BLOCK_SIZE
,
3895 .maxauthsize
= GHASH_DIGEST_SIZE
,
3896 .setkey
= chcr_aead_ccm_setkey
,
3897 .setauthsize
= chcr_ccm_setauthsize
,
3901 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
3905 .cra_name
= "rfc4309(ccm(aes))",
3906 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
3908 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3909 .cra_ctxsize
= sizeof(struct chcr_context
) +
3910 sizeof(struct chcr_aead_ctx
),
3914 .maxauthsize
= GHASH_DIGEST_SIZE
,
3915 .setkey
= chcr_aead_rfc4309_setkey
,
3916 .setauthsize
= chcr_4106_4309_setauthsize
,
3920 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3924 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
3926 "authenc-hmac-sha1-cbc-aes-chcr",
3927 .cra_blocksize
= AES_BLOCK_SIZE
,
3928 .cra_priority
= CHCR_AEAD_PRIORITY
,
3929 .cra_ctxsize
= sizeof(struct chcr_context
) +
3930 sizeof(struct chcr_aead_ctx
) +
3931 sizeof(struct chcr_authenc_ctx
),
3934 .ivsize
= AES_BLOCK_SIZE
,
3935 .maxauthsize
= SHA1_DIGEST_SIZE
,
3936 .setkey
= chcr_authenc_setkey
,
3937 .setauthsize
= chcr_authenc_setauthsize
,
3941 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3946 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
3948 "authenc-hmac-sha256-cbc-aes-chcr",
3949 .cra_blocksize
= AES_BLOCK_SIZE
,
3950 .cra_priority
= CHCR_AEAD_PRIORITY
,
3951 .cra_ctxsize
= sizeof(struct chcr_context
) +
3952 sizeof(struct chcr_aead_ctx
) +
3953 sizeof(struct chcr_authenc_ctx
),
3956 .ivsize
= AES_BLOCK_SIZE
,
3957 .maxauthsize
= SHA256_DIGEST_SIZE
,
3958 .setkey
= chcr_authenc_setkey
,
3959 .setauthsize
= chcr_authenc_setauthsize
,
3963 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3967 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
3969 "authenc-hmac-sha224-cbc-aes-chcr",
3970 .cra_blocksize
= AES_BLOCK_SIZE
,
3971 .cra_priority
= CHCR_AEAD_PRIORITY
,
3972 .cra_ctxsize
= sizeof(struct chcr_context
) +
3973 sizeof(struct chcr_aead_ctx
) +
3974 sizeof(struct chcr_authenc_ctx
),
3976 .ivsize
= AES_BLOCK_SIZE
,
3977 .maxauthsize
= SHA224_DIGEST_SIZE
,
3978 .setkey
= chcr_authenc_setkey
,
3979 .setauthsize
= chcr_authenc_setauthsize
,
3983 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
3987 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
3989 "authenc-hmac-sha384-cbc-aes-chcr",
3990 .cra_blocksize
= AES_BLOCK_SIZE
,
3991 .cra_priority
= CHCR_AEAD_PRIORITY
,
3992 .cra_ctxsize
= sizeof(struct chcr_context
) +
3993 sizeof(struct chcr_aead_ctx
) +
3994 sizeof(struct chcr_authenc_ctx
),
3997 .ivsize
= AES_BLOCK_SIZE
,
3998 .maxauthsize
= SHA384_DIGEST_SIZE
,
3999 .setkey
= chcr_authenc_setkey
,
4000 .setauthsize
= chcr_authenc_setauthsize
,
4004 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4008 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
4010 "authenc-hmac-sha512-cbc-aes-chcr",
4011 .cra_blocksize
= AES_BLOCK_SIZE
,
4012 .cra_priority
= CHCR_AEAD_PRIORITY
,
4013 .cra_ctxsize
= sizeof(struct chcr_context
) +
4014 sizeof(struct chcr_aead_ctx
) +
4015 sizeof(struct chcr_authenc_ctx
),
4018 .ivsize
= AES_BLOCK_SIZE
,
4019 .maxauthsize
= SHA512_DIGEST_SIZE
,
4020 .setkey
= chcr_authenc_setkey
,
4021 .setauthsize
= chcr_authenc_setauthsize
,
4025 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_NULL
,
4029 .cra_name
= "authenc(digest_null,cbc(aes))",
4031 "authenc-digest_null-cbc-aes-chcr",
4032 .cra_blocksize
= AES_BLOCK_SIZE
,
4033 .cra_priority
= CHCR_AEAD_PRIORITY
,
4034 .cra_ctxsize
= sizeof(struct chcr_context
) +
4035 sizeof(struct chcr_aead_ctx
) +
4036 sizeof(struct chcr_authenc_ctx
),
4039 .ivsize
= AES_BLOCK_SIZE
,
4041 .setkey
= chcr_aead_digest_null_setkey
,
4042 .setauthsize
= chcr_authenc_null_setauthsize
,
4046 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4050 .cra_name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4052 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4054 .cra_priority
= CHCR_AEAD_PRIORITY
,
4055 .cra_ctxsize
= sizeof(struct chcr_context
) +
4056 sizeof(struct chcr_aead_ctx
) +
4057 sizeof(struct chcr_authenc_ctx
),
4060 .ivsize
= CTR_RFC3686_IV_SIZE
,
4061 .maxauthsize
= SHA1_DIGEST_SIZE
,
4062 .setkey
= chcr_authenc_setkey
,
4063 .setauthsize
= chcr_authenc_setauthsize
,
4067 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4072 .cra_name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4074 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4076 .cra_priority
= CHCR_AEAD_PRIORITY
,
4077 .cra_ctxsize
= sizeof(struct chcr_context
) +
4078 sizeof(struct chcr_aead_ctx
) +
4079 sizeof(struct chcr_authenc_ctx
),
4082 .ivsize
= CTR_RFC3686_IV_SIZE
,
4083 .maxauthsize
= SHA256_DIGEST_SIZE
,
4084 .setkey
= chcr_authenc_setkey
,
4085 .setauthsize
= chcr_authenc_setauthsize
,
4089 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4093 .cra_name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4095 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4097 .cra_priority
= CHCR_AEAD_PRIORITY
,
4098 .cra_ctxsize
= sizeof(struct chcr_context
) +
4099 sizeof(struct chcr_aead_ctx
) +
4100 sizeof(struct chcr_authenc_ctx
),
4102 .ivsize
= CTR_RFC3686_IV_SIZE
,
4103 .maxauthsize
= SHA224_DIGEST_SIZE
,
4104 .setkey
= chcr_authenc_setkey
,
4105 .setauthsize
= chcr_authenc_setauthsize
,
4109 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4113 .cra_name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4115 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4117 .cra_priority
= CHCR_AEAD_PRIORITY
,
4118 .cra_ctxsize
= sizeof(struct chcr_context
) +
4119 sizeof(struct chcr_aead_ctx
) +
4120 sizeof(struct chcr_authenc_ctx
),
4123 .ivsize
= CTR_RFC3686_IV_SIZE
,
4124 .maxauthsize
= SHA384_DIGEST_SIZE
,
4125 .setkey
= chcr_authenc_setkey
,
4126 .setauthsize
= chcr_authenc_setauthsize
,
4130 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4134 .cra_name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4136 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4138 .cra_priority
= CHCR_AEAD_PRIORITY
,
4139 .cra_ctxsize
= sizeof(struct chcr_context
) +
4140 sizeof(struct chcr_aead_ctx
) +
4141 sizeof(struct chcr_authenc_ctx
),
4144 .ivsize
= CTR_RFC3686_IV_SIZE
,
4145 .maxauthsize
= SHA512_DIGEST_SIZE
,
4146 .setkey
= chcr_authenc_setkey
,
4147 .setauthsize
= chcr_authenc_setauthsize
,
4151 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_NULL
,
4155 .cra_name
= "authenc(digest_null,rfc3686(ctr(aes)))",
4157 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4159 .cra_priority
= CHCR_AEAD_PRIORITY
,
4160 .cra_ctxsize
= sizeof(struct chcr_context
) +
4161 sizeof(struct chcr_aead_ctx
) +
4162 sizeof(struct chcr_authenc_ctx
),
4165 .ivsize
= CTR_RFC3686_IV_SIZE
,
4167 .setkey
= chcr_aead_digest_null_setkey
,
4168 .setauthsize
= chcr_authenc_null_setauthsize
,
4175 * chcr_unregister_alg - Deregister crypto algorithms with
4178 static int chcr_unregister_alg(void)
4182 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4183 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4184 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4185 if (driver_algs
[i
].is_registered
)
4186 crypto_unregister_alg(
4187 &driver_algs
[i
].alg
.crypto
);
4189 case CRYPTO_ALG_TYPE_AEAD
:
4190 if (driver_algs
[i
].is_registered
)
4191 crypto_unregister_aead(
4192 &driver_algs
[i
].alg
.aead
);
4194 case CRYPTO_ALG_TYPE_AHASH
:
4195 if (driver_algs
[i
].is_registered
)
4196 crypto_unregister_ahash(
4197 &driver_algs
[i
].alg
.hash
);
4200 driver_algs
[i
].is_registered
= 0;
4205 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4206 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4207 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4210 * chcr_register_alg - Register crypto algorithms with kernel framework.
4212 static int chcr_register_alg(void)
4214 struct crypto_alg ai
;
4215 struct ahash_alg
*a_hash
;
4219 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4220 if (driver_algs
[i
].is_registered
)
4222 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4223 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4224 driver_algs
[i
].alg
.crypto
.cra_priority
=
4226 driver_algs
[i
].alg
.crypto
.cra_module
= THIS_MODULE
;
4227 driver_algs
[i
].alg
.crypto
.cra_flags
=
4228 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
4229 CRYPTO_ALG_NEED_FALLBACK
;
4230 driver_algs
[i
].alg
.crypto
.cra_ctxsize
=
4231 sizeof(struct chcr_context
) +
4232 sizeof(struct ablk_ctx
);
4233 driver_algs
[i
].alg
.crypto
.cra_alignmask
= 0;
4234 driver_algs
[i
].alg
.crypto
.cra_type
=
4235 &crypto_ablkcipher_type
;
4236 err
= crypto_register_alg(&driver_algs
[i
].alg
.crypto
);
4237 name
= driver_algs
[i
].alg
.crypto
.cra_driver_name
;
4239 case CRYPTO_ALG_TYPE_AEAD
:
4240 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
4241 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
;
4242 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
4243 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
4244 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
4245 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
4246 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
4247 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
4248 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
4250 case CRYPTO_ALG_TYPE_AHASH
:
4251 a_hash
= &driver_algs
[i
].alg
.hash
;
4252 a_hash
->update
= chcr_ahash_update
;
4253 a_hash
->final
= chcr_ahash_final
;
4254 a_hash
->finup
= chcr_ahash_finup
;
4255 a_hash
->digest
= chcr_ahash_digest
;
4256 a_hash
->export
= chcr_ahash_export
;
4257 a_hash
->import
= chcr_ahash_import
;
4258 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
4259 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
4260 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
4261 a_hash
->halg
.base
.cra_flags
= CRYPTO_ALG_ASYNC
;
4262 a_hash
->halg
.base
.cra_alignmask
= 0;
4263 a_hash
->halg
.base
.cra_exit
= NULL
;
4265 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
4266 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
4267 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
4268 a_hash
->init
= chcr_hmac_init
;
4269 a_hash
->setkey
= chcr_ahash_setkey
;
4270 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
4272 a_hash
->init
= chcr_sha_init
;
4273 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
4274 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
4276 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
4277 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
4278 name
= ai
.cra_driver_name
;
4282 pr_err("chcr : %s : Algorithm registration failed\n",
4286 driver_algs
[i
].is_registered
= 1;
4292 chcr_unregister_alg();
4297 * start_crypto - Register the crypto algorithms.
4298 * This should called once when the first device comesup. After this
4299 * kernel will start calling driver APIs for crypto operations.
4301 int start_crypto(void)
4303 return chcr_register_alg();
4307 * stop_crypto - Deregister all the crypto algorithms with kernel.
4308 * This should be called once when the last device goes down. After this
4309 * kernel will not call the driver API for crypto operations.
4311 int stop_crypto(void)
4313 chcr_unregister_alg();