2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha1.h>
57 #include <crypto/sha2.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len
[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len
[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant
[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct skcipher_request
*req
,
97 unsigned char *input
, int err
);
99 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
101 return &ctx
->crypto_ctx
->aeadctx
;
104 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
106 return &ctx
->crypto_ctx
->ablkctx
;
109 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
111 return &ctx
->crypto_ctx
->hmacctx
;
114 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
116 return gctx
->ctx
->gcm
;
119 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
121 return gctx
->ctx
->authenc
;
124 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
126 return container_of(ctx
->dev
, struct uld_ctx
, dev
);
129 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx
*reqctx
)
131 memset(&reqctx
->hctx_wr
, 0, sizeof(struct chcr_hctx_per_wr
));
134 static int sg_nents_xlen(struct scatterlist
*sg
, unsigned int reqlen
,
140 unsigned int skip_len
= 0;
143 if (sg_dma_len(sg
) <= skip
) {
144 skip
-= sg_dma_len(sg
);
153 while (sg
&& reqlen
) {
154 less
= min(reqlen
, sg_dma_len(sg
) - skip_len
);
155 nents
+= DIV_ROUND_UP(less
, entlen
);
163 static inline int get_aead_subtype(struct crypto_aead
*aead
)
165 struct aead_alg
*alg
= crypto_aead_alg(aead
);
166 struct chcr_alg_template
*chcr_crypto_alg
=
167 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
168 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
171 void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
173 u8 temp
[SHA512_DIGEST_SIZE
];
174 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
175 int authsize
= crypto_aead_authsize(tfm
);
176 struct cpl_fw6_pld
*fw6_pld
;
179 fw6_pld
= (struct cpl_fw6_pld
*)input
;
180 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
181 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
182 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
185 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
186 authsize
, req
->assoclen
+
187 req
->cryptlen
- authsize
);
188 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
196 static int chcr_inc_wrcount(struct chcr_dev
*dev
)
198 if (dev
->state
== CHCR_DETACH
)
200 atomic_inc(&dev
->inflight
);
204 static inline void chcr_dec_wrcount(struct chcr_dev
*dev
)
206 atomic_dec(&dev
->inflight
);
209 static inline int chcr_handle_aead_resp(struct aead_request
*req
,
210 unsigned char *input
,
213 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
214 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
215 struct chcr_dev
*dev
= a_ctx(tfm
)->dev
;
217 chcr_aead_common_exit(req
);
218 if (reqctx
->verify
== VERIFY_SW
) {
219 chcr_verify_tag(req
, input
, &err
);
220 reqctx
->verify
= VERIFY_HW
;
222 chcr_dec_wrcount(dev
);
223 aead_request_complete(req
, err
);
228 static void get_aes_decrypt_key(unsigned char *dec_key
,
229 const unsigned char *key
,
230 unsigned int keylength
)
238 case AES_KEYLENGTH_128BIT
:
239 nk
= KEYLENGTH_4BYTES
;
240 nr
= NUMBER_OF_ROUNDS_10
;
242 case AES_KEYLENGTH_192BIT
:
243 nk
= KEYLENGTH_6BYTES
;
244 nr
= NUMBER_OF_ROUNDS_12
;
246 case AES_KEYLENGTH_256BIT
:
247 nk
= KEYLENGTH_8BYTES
;
248 nr
= NUMBER_OF_ROUNDS_14
;
253 for (i
= 0; i
< nk
; i
++)
254 w_ring
[i
] = get_unaligned_be32(&key
[i
* 4]);
257 temp
= w_ring
[nk
- 1];
258 while (i
+ nk
< (nr
+ 1) * 4) {
261 temp
= (temp
<< 8) | (temp
>> 24);
262 temp
= aes_ks_subword(temp
);
263 temp
^= round_constant
[i
/ nk
];
264 } else if (nk
== 8 && (i
% 4 == 0)) {
265 temp
= aes_ks_subword(temp
);
267 w_ring
[i
% nk
] ^= temp
;
268 temp
= w_ring
[i
% nk
];
272 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
273 put_unaligned_be32(w_ring
[j
], &dec_key
[k
* 4]);
280 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
282 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
285 case SHA1_DIGEST_SIZE
:
286 base_hash
= crypto_alloc_shash("sha1", 0, 0);
288 case SHA224_DIGEST_SIZE
:
289 base_hash
= crypto_alloc_shash("sha224", 0, 0);
291 case SHA256_DIGEST_SIZE
:
292 base_hash
= crypto_alloc_shash("sha256", 0, 0);
294 case SHA384_DIGEST_SIZE
:
295 base_hash
= crypto_alloc_shash("sha384", 0, 0);
297 case SHA512_DIGEST_SIZE
:
298 base_hash
= crypto_alloc_shash("sha512", 0, 0);
305 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
306 char *iopad
, char *result_hash
,
309 struct sha1_state sha1_st
;
310 struct sha256_state sha256_st
;
311 struct sha512_state sha512_st
;
314 if (digest_size
== SHA1_DIGEST_SIZE
) {
315 error
= crypto_shash_init(desc
) ?:
316 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
317 crypto_shash_export(desc
, (void *)&sha1_st
);
318 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
319 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
320 error
= crypto_shash_init(desc
) ?:
321 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
322 crypto_shash_export(desc
, (void *)&sha256_st
);
323 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
325 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
326 error
= crypto_shash_init(desc
) ?:
327 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
328 crypto_shash_export(desc
, (void *)&sha256_st
);
329 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
331 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
332 error
= crypto_shash_init(desc
) ?:
333 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
334 crypto_shash_export(desc
, (void *)&sha512_st
);
335 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
337 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
338 error
= crypto_shash_init(desc
) ?:
339 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
340 crypto_shash_export(desc
, (void *)&sha512_st
);
341 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
344 pr_err("Unknown digest size %d\n", digest_size
);
349 static void chcr_change_order(char *buf
, int ds
)
353 if (ds
== SHA512_DIGEST_SIZE
) {
354 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
355 *((__be64
*)buf
+ i
) =
356 cpu_to_be64(*((u64
*)buf
+ i
));
358 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
359 *((__be32
*)buf
+ i
) =
360 cpu_to_be32(*((u32
*)buf
+ i
));
364 static inline int is_hmac(struct crypto_tfm
*tfm
)
366 struct crypto_alg
*alg
= tfm
->__crt_alg
;
367 struct chcr_alg_template
*chcr_crypto_alg
=
368 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
370 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
375 static inline void dsgl_walk_init(struct dsgl_walk
*walk
,
376 struct cpl_rx_phys_dsgl
*dsgl
)
380 walk
->to
= (struct phys_sge_pairs
*)(dsgl
+ 1);
383 static inline void dsgl_walk_end(struct dsgl_walk
*walk
, unsigned short qid
,
386 struct cpl_rx_phys_dsgl
*phys_cpl
;
388 phys_cpl
= walk
->dsgl
;
390 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
391 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
392 phys_cpl
->pcirlxorder_to_noofsgentr
=
393 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
394 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
395 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
396 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
397 CPL_RX_PHYS_DSGL_DCAID_V(0) |
398 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk
->nents
));
399 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
400 phys_cpl
->rss_hdr_int
.qid
= htons(qid
);
401 phys_cpl
->rss_hdr_int
.hash_val
= 0;
402 phys_cpl
->rss_hdr_int
.channel
= pci_chan_id
;
405 static inline void dsgl_walk_add_page(struct dsgl_walk
*walk
,
414 walk
->to
->len
[j
% 8] = htons(size
);
415 walk
->to
->addr
[j
% 8] = cpu_to_be64(addr
);
422 static void dsgl_walk_add_sg(struct dsgl_walk
*walk
,
423 struct scatterlist
*sg
,
428 unsigned int left_size
= slen
, len
= 0;
429 unsigned int j
= walk
->nents
;
435 if (sg_dma_len(sg
) <= skip
) {
436 skip
-= sg_dma_len(sg
);
445 while (left_size
&& sg
) {
446 len
= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
449 ent_len
= min_t(u32
, len
, CHCR_DST_SG_SIZE
);
450 walk
->to
->len
[j
% 8] = htons(ent_len
);
451 walk
->to
->addr
[j
% 8] = cpu_to_be64(sg_dma_address(sg
) +
460 walk
->last_sg_len
= min_t(u32
, left_size
, sg_dma_len(sg
) -
461 skip_len
) + skip_len
;
462 left_size
-= min_t(u32
, left_size
, sg_dma_len(sg
) - skip_len
);
469 static inline void ulptx_walk_init(struct ulptx_walk
*walk
,
470 struct ulptx_sgl
*ulp
)
475 walk
->pair
= ulp
->sge
;
476 walk
->last_sg
= NULL
;
477 walk
->last_sg_len
= 0;
480 static inline void ulptx_walk_end(struct ulptx_walk
*walk
)
482 walk
->sgl
->cmd_nsge
= htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL
) |
483 ULPTX_NSGE_V(walk
->nents
));
487 static inline void ulptx_walk_add_page(struct ulptx_walk
*walk
,
494 if (walk
->nents
== 0) {
495 walk
->sgl
->len0
= cpu_to_be32(size
);
496 walk
->sgl
->addr0
= cpu_to_be64(addr
);
498 walk
->pair
->addr
[walk
->pair_idx
] = cpu_to_be64(addr
);
499 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(size
);
500 walk
->pair_idx
= !walk
->pair_idx
;
507 static void ulptx_walk_add_sg(struct ulptx_walk
*walk
,
508 struct scatterlist
*sg
,
519 if (sg_dma_len(sg
) <= skip
) {
520 skip
-= sg_dma_len(sg
);
528 WARN(!sg
, "SG should not be null here\n");
529 if (sg
&& (walk
->nents
== 0)) {
530 small
= min_t(unsigned int, sg_dma_len(sg
) - skip_len
, len
);
531 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
532 walk
->sgl
->len0
= cpu_to_be32(sgmin
);
533 walk
->sgl
->addr0
= cpu_to_be64(sg_dma_address(sg
) + skip_len
);
537 walk
->last_sg_len
= sgmin
+ skip_len
;
539 if (sg_dma_len(sg
) == skip_len
) {
546 small
= min(sg_dma_len(sg
) - skip_len
, len
);
547 sgmin
= min_t(unsigned int, small
, CHCR_SRC_SG_SIZE
);
548 walk
->pair
->len
[walk
->pair_idx
] = cpu_to_be32(sgmin
);
549 walk
->pair
->addr
[walk
->pair_idx
] =
550 cpu_to_be64(sg_dma_address(sg
) + skip_len
);
551 walk
->pair_idx
= !walk
->pair_idx
;
558 walk
->last_sg_len
= skip_len
;
559 if (sg_dma_len(sg
) == skip_len
) {
566 static inline int get_cryptoalg_subtype(struct crypto_skcipher
*tfm
)
568 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
569 struct chcr_alg_template
*chcr_crypto_alg
=
570 container_of(alg
, struct chcr_alg_template
, alg
.skcipher
);
572 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
575 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
577 struct adapter
*adap
= netdev2adap(dev
);
578 struct sge_uld_txq_info
*txq_info
=
579 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
580 struct sge_uld_txq
*txq
;
584 txq
= &txq_info
->uldtxq
[idx
];
585 spin_lock(&txq
->sendq
.lock
);
588 spin_unlock(&txq
->sendq
.lock
);
593 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
594 struct _key_ctx
*key_ctx
)
596 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
597 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
600 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
601 ablkctx
->enckey_len
>> 1);
602 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
603 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
608 static int chcr_hash_ent_in_wr(struct scatterlist
*src
,
611 unsigned int srcskip
)
615 int soffset
= 0, sless
;
617 if (sg_dma_len(src
) == srcskip
) {
621 while (src
&& space
> (sgl_ent_len
[srcsg
+ 1])) {
622 sless
= min_t(unsigned int, sg_dma_len(src
) - soffset
- srcskip
,
627 if (sg_dma_len(src
) == (soffset
+ srcskip
)) {
636 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
637 struct scatterlist
*dst
,
640 unsigned int srcskip
,
641 unsigned int dstskip
)
643 int srclen
= 0, dstlen
= 0;
644 int srcsg
= minsg
, dstsg
= minsg
;
645 int offset
= 0, soffset
= 0, less
, sless
= 0;
647 if (sg_dma_len(src
) == srcskip
) {
651 if (sg_dma_len(dst
) == dstskip
) {
657 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
658 sless
= min_t(unsigned int, sg_dma_len(src
) - srcskip
- soffset
,
663 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
664 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
665 if (srclen
<= dstlen
)
667 less
= min_t(unsigned int, sg_dma_len(dst
) - offset
-
668 dstskip
, CHCR_DST_SG_SIZE
);
671 if ((offset
+ dstskip
) == sg_dma_len(dst
)) {
679 if ((soffset
+ srcskip
) == sg_dma_len(src
)) {
686 return min(srclen
, dstlen
);
689 static int chcr_cipher_fallback(struct crypto_skcipher
*cipher
,
690 struct skcipher_request
*req
,
692 unsigned short op_type
)
694 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
697 skcipher_request_set_tfm(&reqctx
->fallback_req
, cipher
);
698 skcipher_request_set_callback(&reqctx
->fallback_req
, req
->base
.flags
,
699 req
->base
.complete
, req
->base
.data
);
700 skcipher_request_set_crypt(&reqctx
->fallback_req
, req
->src
, req
->dst
,
703 err
= op_type
? crypto_skcipher_decrypt(&reqctx
->fallback_req
) :
704 crypto_skcipher_encrypt(&reqctx
->fallback_req
);
710 static inline int get_qidxs(struct crypto_async_request
*req
,
711 unsigned int *txqidx
, unsigned int *rxqidx
)
713 struct crypto_tfm
*tfm
= req
->tfm
;
716 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
717 case CRYPTO_ALG_TYPE_AEAD
:
719 struct aead_request
*aead_req
=
720 container_of(req
, struct aead_request
, base
);
721 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(aead_req
);
722 *txqidx
= reqctx
->txqidx
;
723 *rxqidx
= reqctx
->rxqidx
;
726 case CRYPTO_ALG_TYPE_SKCIPHER
:
728 struct skcipher_request
*sk_req
=
729 container_of(req
, struct skcipher_request
, base
);
730 struct chcr_skcipher_req_ctx
*reqctx
=
731 skcipher_request_ctx(sk_req
);
732 *txqidx
= reqctx
->txqidx
;
733 *rxqidx
= reqctx
->rxqidx
;
736 case CRYPTO_ALG_TYPE_AHASH
:
738 struct ahash_request
*ahash_req
=
739 container_of(req
, struct ahash_request
, base
);
740 struct chcr_ahash_req_ctx
*reqctx
=
741 ahash_request_ctx(ahash_req
);
742 *txqidx
= reqctx
->txqidx
;
743 *rxqidx
= reqctx
->rxqidx
;
748 /* should never get here */
755 static inline void create_wreq(struct chcr_context
*ctx
,
756 struct chcr_wr
*chcr_req
,
757 struct crypto_async_request
*req
,
764 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
765 unsigned int tx_channel_id
, rx_channel_id
;
766 unsigned int txqidx
= 0, rxqidx
= 0;
767 unsigned int qid
, fid
, portno
;
769 get_qidxs(req
, &txqidx
, &rxqidx
);
770 qid
= u_ctx
->lldi
.rxq_ids
[rxqidx
];
771 fid
= u_ctx
->lldi
.rxq_ids
[0];
772 portno
= rxqidx
/ ctx
->rxq_perchan
;
773 tx_channel_id
= txqidx
/ ctx
->txq_perchan
;
774 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[portno
]);
777 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE
;
778 chcr_req
->wreq
.pld_size_hash_size
=
779 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
780 chcr_req
->wreq
.len16_pkd
=
781 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16
, 16)));
782 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
783 chcr_req
->wreq
.rx_chid_to_rx_q_id
= FILL_WR_RX_Q_ID(rx_channel_id
, qid
,
786 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(tx_channel_id
, fid
);
787 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP(len16
, 16) -
788 ((sizeof(chcr_req
->wreq
)) >> 4)));
789 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(!imm
);
790 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
791 sizeof(chcr_req
->key_ctx
) + sc_len
);
795 * create_cipher_wr - form the WR for cipher operations
796 * @wrparam: Container for create_cipher_wr()'s parameters
798 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
800 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(wrparam
->req
);
801 struct chcr_context
*ctx
= c_ctx(tfm
);
802 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
803 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
804 struct sk_buff
*skb
= NULL
;
805 struct chcr_wr
*chcr_req
;
806 struct cpl_rx_phys_dsgl
*phys_cpl
;
807 struct ulptx_sgl
*ulptx
;
808 struct chcr_skcipher_req_ctx
*reqctx
=
809 skcipher_request_ctx(wrparam
->req
);
810 unsigned int temp
= 0, transhdr_len
, dst_size
;
813 unsigned int kctx_len
;
814 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
815 GFP_KERNEL
: GFP_ATOMIC
;
816 struct adapter
*adap
= padap(ctx
->dev
);
817 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
819 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[rx_channel_id
]);
820 nents
= sg_nents_xlen(reqctx
->dstsg
, wrparam
->bytes
, CHCR_DST_SG_SIZE
,
822 dst_size
= get_space_for_phys_dsgl(nents
);
823 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
824 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
825 nents
= sg_nents_xlen(reqctx
->srcsg
, wrparam
->bytes
,
826 CHCR_SRC_SG_SIZE
, reqctx
->src_ofst
);
827 temp
= reqctx
->imm
? roundup(wrparam
->bytes
, 16) :
828 (sgl_len(nents
) * 8);
829 transhdr_len
+= temp
;
830 transhdr_len
= roundup(transhdr_len
, 16);
831 skb
= alloc_skb(SGE_MAX_WR_LEN
, flags
);
836 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
837 chcr_req
->sec_cpl
.op_ivinsrtofst
=
838 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
840 chcr_req
->sec_cpl
.pldlen
= htonl(IV
+ wrparam
->bytes
);
841 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
842 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV
+ 1, 0);
844 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
845 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
846 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
849 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
852 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
853 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
854 (!(get_cryptoalg_subtype(tfm
) ==
855 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
856 (!(get_cryptoalg_subtype(tfm
) ==
857 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
858 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
860 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
861 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
862 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
863 ablkctx
->enckey_len
);
865 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
866 (ablkctx
->enckey_len
>> 1),
867 ablkctx
->enckey_len
>> 1);
868 memcpy(chcr_req
->key_ctx
.key
+
869 (ablkctx
->enckey_len
>> 1),
871 ablkctx
->enckey_len
>> 1);
874 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
875 ulptx
= (struct ulptx_sgl
*)((u8
*)(phys_cpl
+ 1) + dst_size
);
876 chcr_add_cipher_src_ent(wrparam
->req
, ulptx
, wrparam
);
877 chcr_add_cipher_dst_ent(wrparam
->req
, phys_cpl
, wrparam
, wrparam
->qid
);
879 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
880 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ kctx_len
+ IV
881 + (reqctx
->imm
? (wrparam
->bytes
) : 0);
882 create_wreq(c_ctx(tfm
), chcr_req
, &(wrparam
->req
->base
), reqctx
->imm
, 0,
884 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
887 if (reqctx
->op
&& (ablkctx
->ciph_mode
==
888 CHCR_SCMD_CIPHER_MODE_AES_CBC
))
889 sg_pcopy_to_buffer(wrparam
->req
->src
,
890 sg_nents(wrparam
->req
->src
), wrparam
->req
->iv
, 16,
891 reqctx
->processed
+ wrparam
->bytes
- AES_BLOCK_SIZE
);
895 return ERR_PTR(error
);
898 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
902 if (keylen
== AES_KEYSIZE_128
)
903 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
904 else if (keylen
== AES_KEYSIZE_192
)
905 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
906 else if (keylen
== AES_KEYSIZE_256
)
907 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
913 static int chcr_cipher_fallback_setkey(struct crypto_skcipher
*cipher
,
917 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
919 crypto_skcipher_clear_flags(ablkctx
->sw_cipher
,
920 CRYPTO_TFM_REQ_MASK
);
921 crypto_skcipher_set_flags(ablkctx
->sw_cipher
,
922 cipher
->base
.crt_flags
& CRYPTO_TFM_REQ_MASK
);
923 return crypto_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
926 static int chcr_aes_cbc_setkey(struct crypto_skcipher
*cipher
,
930 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
931 unsigned int ck_size
, context_size
;
935 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
939 ck_size
= chcr_keyctx_ck_size(keylen
);
940 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
941 memcpy(ablkctx
->key
, key
, keylen
);
942 ablkctx
->enckey_len
= keylen
;
943 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
944 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
945 keylen
+ alignment
) >> 4;
947 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
949 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
952 ablkctx
->enckey_len
= 0;
957 static int chcr_aes_ctr_setkey(struct crypto_skcipher
*cipher
,
961 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
962 unsigned int ck_size
, context_size
;
966 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
969 ck_size
= chcr_keyctx_ck_size(keylen
);
970 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
971 memcpy(ablkctx
->key
, key
, keylen
);
972 ablkctx
->enckey_len
= keylen
;
973 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
974 keylen
+ alignment
) >> 4;
976 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
978 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
982 ablkctx
->enckey_len
= 0;
987 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher
*cipher
,
991 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
992 unsigned int ck_size
, context_size
;
996 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
998 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
999 CTR_RFC3686_NONCE_SIZE
);
1001 keylen
-= CTR_RFC3686_NONCE_SIZE
;
1002 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
1006 ck_size
= chcr_keyctx_ck_size(keylen
);
1007 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
1008 memcpy(ablkctx
->key
, key
, keylen
);
1009 ablkctx
->enckey_len
= keylen
;
1010 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
1011 keylen
+ alignment
) >> 4;
1013 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
1014 0, 0, context_size
);
1015 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
1019 ablkctx
->enckey_len
= 0;
1023 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
1025 unsigned int size
= AES_BLOCK_SIZE
;
1026 __be32
*b
= (__be32
*)(dstiv
+ size
);
1029 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
1030 for (; size
>= 4; size
-= 4) {
1031 prev
= be32_to_cpu(*--b
);
1033 *b
= cpu_to_be32(c
);
1041 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
1043 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
1045 u32 temp
= be32_to_cpu(*--b
);
1048 c
= (u64
)temp
+ 1; // No of block can processed without overflow
1049 if ((bytes
/ AES_BLOCK_SIZE
) >= c
)
1050 bytes
= c
* AES_BLOCK_SIZE
;
1054 static int chcr_update_tweak(struct skcipher_request
*req
, u8
*iv
,
1057 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1058 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1059 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1060 struct crypto_aes_ctx aes
;
1063 unsigned int keylen
;
1064 int round
= reqctx
->last_req_len
/ AES_BLOCK_SIZE
;
1065 int round8
= round
/ 8;
1067 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1069 keylen
= ablkctx
->enckey_len
/ 2;
1070 key
= ablkctx
->key
+ keylen
;
1071 /* For a 192 bit key remove the padded zeroes which was
1072 * added in chcr_xts_setkey
1074 if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx
->key_ctx_hdr
))
1075 == CHCR_KEYCTX_CIPHER_KEY_SIZE_192
)
1076 ret
= aes_expandkey(&aes
, key
, keylen
- 8);
1078 ret
= aes_expandkey(&aes
, key
, keylen
);
1081 aes_encrypt(&aes
, iv
, iv
);
1082 for (i
= 0; i
< round8
; i
++)
1083 gf128mul_x8_ble((le128
*)iv
, (le128
*)iv
);
1085 for (i
= 0; i
< (round
% 8); i
++)
1086 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
1089 aes_decrypt(&aes
, iv
, iv
);
1091 memzero_explicit(&aes
, sizeof(aes
));
1095 static int chcr_update_cipher_iv(struct skcipher_request
*req
,
1096 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1098 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1099 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1100 int subtype
= get_cryptoalg_subtype(tfm
);
1103 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1104 ctr_add_iv(iv
, req
->iv
, (reqctx
->processed
/
1106 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
1107 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1108 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
1109 AES_BLOCK_SIZE
) + 1);
1110 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1111 ret
= chcr_update_tweak(req
, iv
, 0);
1112 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1114 /*Updated before sending last WR*/
1115 memcpy(iv
, req
->iv
, AES_BLOCK_SIZE
);
1117 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1124 /* We need separate function for final iv because in rfc3686 Initial counter
1125 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1126 * for subsequent update requests
1129 static int chcr_final_cipher_iv(struct skcipher_request
*req
,
1130 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
1132 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1133 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1134 int subtype
= get_cryptoalg_subtype(tfm
);
1137 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
1138 ctr_add_iv(iv
, req
->iv
, DIV_ROUND_UP(reqctx
->processed
,
1140 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
) {
1141 if (!reqctx
->partial_req
)
1142 memcpy(iv
, reqctx
->iv
, AES_BLOCK_SIZE
);
1144 ret
= chcr_update_tweak(req
, iv
, 1);
1146 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
1147 /*Already updated for Decrypt*/
1149 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
1156 static int chcr_handle_cipher_resp(struct skcipher_request
*req
,
1157 unsigned char *input
, int err
)
1159 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1160 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1161 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
1162 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1163 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1164 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1165 struct chcr_context
*ctx
= c_ctx(tfm
);
1166 struct adapter
*adap
= padap(ctx
->dev
);
1167 struct cipher_wr_param wrparam
;
1168 struct sk_buff
*skb
;
1173 if (req
->cryptlen
== reqctx
->processed
) {
1174 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1176 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->iv
);
1181 bytes
= chcr_sg_ent_in_wr(reqctx
->srcsg
, reqctx
->dstsg
, 0,
1182 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1183 reqctx
->src_ofst
, reqctx
->dst_ofst
);
1184 if ((bytes
+ reqctx
->processed
) >= req
->cryptlen
)
1185 bytes
= req
->cryptlen
- reqctx
->processed
;
1187 bytes
= rounddown(bytes
, 16);
1189 /*CTR mode counter overflow*/
1190 bytes
= req
->cryptlen
- reqctx
->processed
;
1192 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1196 if (unlikely(bytes
== 0)) {
1197 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1199 memcpy(req
->iv
, reqctx
->init_iv
, IV
);
1200 atomic_inc(&adap
->chcr_stats
.fallback
);
1201 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
, req
, req
->iv
,
1206 if (get_cryptoalg_subtype(tfm
) ==
1207 CRYPTO_ALG_SUB_TYPE_CTR
)
1208 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1209 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
];
1211 wrparam
.bytes
= bytes
;
1212 skb
= create_cipher_wr(&wrparam
);
1214 pr_err("%s : Failed to form WR. No memory\n", __func__
);
1218 skb
->dev
= u_ctx
->lldi
.ports
[0];
1219 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1221 reqctx
->last_req_len
= bytes
;
1222 reqctx
->processed
+= bytes
;
1223 if (get_cryptoalg_subtype(tfm
) ==
1224 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1225 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1226 complete(&ctx
->cbc_aes_aio_done
);
1230 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1232 if (get_cryptoalg_subtype(tfm
) ==
1233 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1234 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1235 complete(&ctx
->cbc_aes_aio_done
);
1237 chcr_dec_wrcount(dev
);
1238 skcipher_request_complete(req
, err
);
1242 static int process_cipher(struct skcipher_request
*req
,
1244 struct sk_buff
**skb
,
1245 unsigned short op_type
)
1247 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1248 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1249 unsigned int ivsize
= crypto_skcipher_ivsize(tfm
);
1250 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(tfm
));
1251 struct adapter
*adap
= padap(c_ctx(tfm
)->dev
);
1252 struct cipher_wr_param wrparam
;
1253 int bytes
, err
= -EINVAL
;
1256 reqctx
->processed
= 0;
1257 reqctx
->partial_req
= 0;
1260 subtype
= get_cryptoalg_subtype(tfm
);
1261 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1262 (req
->cryptlen
== 0) ||
1263 (req
->cryptlen
% crypto_skcipher_blocksize(tfm
))) {
1264 if (req
->cryptlen
== 0 && subtype
!= CRYPTO_ALG_SUB_TYPE_XTS
)
1266 else if (req
->cryptlen
% crypto_skcipher_blocksize(tfm
) &&
1267 subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
1269 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1270 ablkctx
->enckey_len
, req
->cryptlen
, ivsize
);
1274 err
= chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1277 if (req
->cryptlen
< (SGE_MAX_WR_LEN
- (sizeof(struct chcr_wr
) +
1279 sizeof(struct cpl_rx_phys_dsgl
) +
1282 /* Can be sent as Imm*/
1283 unsigned int dnents
= 0, transhdr_len
, phys_dsgl
, kctx_len
;
1285 dnents
= sg_nents_xlen(req
->dst
, req
->cryptlen
,
1286 CHCR_DST_SG_SIZE
, 0);
1287 phys_dsgl
= get_space_for_phys_dsgl(dnents
);
1288 kctx_len
= roundup(ablkctx
->enckey_len
, 16);
1289 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
1290 reqctx
->imm
= (transhdr_len
+ IV
+ req
->cryptlen
) <=
1292 bytes
= IV
+ req
->cryptlen
;
1299 bytes
= chcr_sg_ent_in_wr(req
->src
, req
->dst
, 0,
1300 CIP_SPACE_LEFT(ablkctx
->enckey_len
),
1302 if ((bytes
+ reqctx
->processed
) >= req
->cryptlen
)
1303 bytes
= req
->cryptlen
- reqctx
->processed
;
1305 bytes
= rounddown(bytes
, 16);
1307 bytes
= req
->cryptlen
;
1309 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
) {
1310 bytes
= adjust_ctr_overflow(req
->iv
, bytes
);
1312 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1313 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1314 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
1315 CTR_RFC3686_IV_SIZE
);
1317 /* initialize counter portion of counter block */
1318 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1319 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1320 memcpy(reqctx
->init_iv
, reqctx
->iv
, IV
);
1324 memcpy(reqctx
->iv
, req
->iv
, IV
);
1325 memcpy(reqctx
->init_iv
, req
->iv
, IV
);
1327 if (unlikely(bytes
== 0)) {
1328 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
,
1330 fallback
: atomic_inc(&adap
->chcr_stats
.fallback
);
1331 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
, req
,
1333 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
?
1334 reqctx
->iv
: req
->iv
,
1338 reqctx
->op
= op_type
;
1339 reqctx
->srcsg
= req
->src
;
1340 reqctx
->dstsg
= req
->dst
;
1341 reqctx
->src_ofst
= 0;
1342 reqctx
->dst_ofst
= 0;
1345 wrparam
.bytes
= bytes
;
1346 *skb
= create_cipher_wr(&wrparam
);
1348 err
= PTR_ERR(*skb
);
1351 reqctx
->processed
= bytes
;
1352 reqctx
->last_req_len
= bytes
;
1353 reqctx
->partial_req
= !!(req
->cryptlen
- reqctx
->processed
);
1357 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm
))->lldi
.pdev
->dev
, req
);
1362 static int chcr_aes_encrypt(struct skcipher_request
*req
)
1364 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1365 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1366 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1367 struct sk_buff
*skb
= NULL
;
1369 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1370 struct chcr_context
*ctx
= c_ctx(tfm
);
1374 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
1375 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
1378 err
= chcr_inc_wrcount(dev
);
1381 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1383 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1388 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
],
1389 &skb
, CHCR_ENCRYPT_OP
);
1392 skb
->dev
= u_ctx
->lldi
.ports
[0];
1393 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1395 if (get_cryptoalg_subtype(tfm
) ==
1396 CRYPTO_ALG_SUB_TYPE_CBC
&& req
->base
.flags
==
1397 CRYPTO_TFM_REQ_MAY_SLEEP
) {
1398 reqctx
->partial_req
= 1;
1399 wait_for_completion(&ctx
->cbc_aes_aio_done
);
1401 return -EINPROGRESS
;
1403 chcr_dec_wrcount(dev
);
1407 static int chcr_aes_decrypt(struct skcipher_request
*req
)
1409 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
1410 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
1411 struct uld_ctx
*u_ctx
= ULD_CTX(c_ctx(tfm
));
1412 struct chcr_dev
*dev
= c_ctx(tfm
)->dev
;
1413 struct sk_buff
*skb
= NULL
;
1415 struct chcr_context
*ctx
= c_ctx(tfm
);
1419 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
1420 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
1423 err
= chcr_inc_wrcount(dev
);
1427 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1429 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))))
1431 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
],
1432 &skb
, CHCR_DECRYPT_OP
);
1435 skb
->dev
= u_ctx
->lldi
.ports
[0];
1436 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
1438 return -EINPROGRESS
;
1440 static int chcr_device_init(struct chcr_context
*ctx
)
1442 struct uld_ctx
*u_ctx
= NULL
;
1443 int txq_perchan
, ntxq
;
1444 int err
= 0, rxq_perchan
;
1447 u_ctx
= assign_chcr_device();
1450 pr_err("chcr device assignment fails\n");
1453 ctx
->dev
= &u_ctx
->dev
;
1454 ntxq
= u_ctx
->lldi
.ntxq
;
1455 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1456 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1458 ctx
->nrxq
= u_ctx
->lldi
.nrxq
;
1459 ctx
->rxq_perchan
= rxq_perchan
;
1460 ctx
->txq_perchan
= txq_perchan
;
1466 static int chcr_init_tfm(struct crypto_skcipher
*tfm
)
1468 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1469 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1470 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1472 ablkctx
->sw_cipher
= crypto_alloc_skcipher(alg
->base
.cra_name
, 0,
1473 CRYPTO_ALG_NEED_FALLBACK
);
1474 if (IS_ERR(ablkctx
->sw_cipher
)) {
1475 pr_err("failed to allocate fallback for %s\n", alg
->base
.cra_name
);
1476 return PTR_ERR(ablkctx
->sw_cipher
);
1478 init_completion(&ctx
->cbc_aes_aio_done
);
1479 crypto_skcipher_set_reqsize(tfm
, sizeof(struct chcr_skcipher_req_ctx
) +
1480 crypto_skcipher_reqsize(ablkctx
->sw_cipher
));
1482 return chcr_device_init(ctx
);
1485 static int chcr_rfc3686_init(struct crypto_skcipher
*tfm
)
1487 struct skcipher_alg
*alg
= crypto_skcipher_alg(tfm
);
1488 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1489 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1491 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1492 * cannot be used as fallback in chcr_handle_cipher_response
1494 ablkctx
->sw_cipher
= crypto_alloc_skcipher("ctr(aes)", 0,
1495 CRYPTO_ALG_NEED_FALLBACK
);
1496 if (IS_ERR(ablkctx
->sw_cipher
)) {
1497 pr_err("failed to allocate fallback for %s\n", alg
->base
.cra_name
);
1498 return PTR_ERR(ablkctx
->sw_cipher
);
1500 crypto_skcipher_set_reqsize(tfm
, sizeof(struct chcr_skcipher_req_ctx
) +
1501 crypto_skcipher_reqsize(ablkctx
->sw_cipher
));
1502 return chcr_device_init(ctx
);
1506 static void chcr_exit_tfm(struct crypto_skcipher
*tfm
)
1508 struct chcr_context
*ctx
= crypto_skcipher_ctx(tfm
);
1509 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1511 crypto_free_skcipher(ablkctx
->sw_cipher
);
1514 static int get_alg_config(struct algo_param
*params
,
1515 unsigned int auth_size
)
1517 switch (auth_size
) {
1518 case SHA1_DIGEST_SIZE
:
1519 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1520 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1521 params
->result_size
= SHA1_DIGEST_SIZE
;
1523 case SHA224_DIGEST_SIZE
:
1524 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1525 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1526 params
->result_size
= SHA256_DIGEST_SIZE
;
1528 case SHA256_DIGEST_SIZE
:
1529 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1530 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1531 params
->result_size
= SHA256_DIGEST_SIZE
;
1533 case SHA384_DIGEST_SIZE
:
1534 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1535 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1536 params
->result_size
= SHA512_DIGEST_SIZE
;
1538 case SHA512_DIGEST_SIZE
:
1539 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1540 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1541 params
->result_size
= SHA512_DIGEST_SIZE
;
1544 pr_err("ERROR, unsupported digest size\n");
1550 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1552 crypto_free_shash(base_hash
);
1556 * create_hash_wr - Create hash work request
1557 * @req: Cipher req base
1558 * @param: Container for create_hash_wr()'s parameters
1560 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1561 struct hash_wr_param
*param
)
1563 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1564 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1565 struct chcr_context
*ctx
= h_ctx(tfm
);
1566 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1567 struct sk_buff
*skb
= NULL
;
1568 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1569 struct chcr_wr
*chcr_req
;
1570 struct ulptx_sgl
*ulptx
;
1571 unsigned int nents
= 0, transhdr_len
;
1572 unsigned int temp
= 0;
1573 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1575 struct adapter
*adap
= padap(h_ctx(tfm
)->dev
);
1577 unsigned int rx_channel_id
= req_ctx
->rxqidx
/ ctx
->rxq_perchan
;
1579 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[rx_channel_id
]);
1580 transhdr_len
= HASH_TRANSHDR_SIZE(param
->kctx_len
);
1581 req_ctx
->hctx_wr
.imm
= (transhdr_len
+ param
->bfr_len
+
1582 param
->sg_len
) <= SGE_MAX_WR_LEN
;
1583 nents
= sg_nents_xlen(req_ctx
->hctx_wr
.srcsg
, param
->sg_len
,
1584 CHCR_SRC_SG_SIZE
, req_ctx
->hctx_wr
.src_ofst
);
1585 nents
+= param
->bfr_len
? 1 : 0;
1586 transhdr_len
+= req_ctx
->hctx_wr
.imm
? roundup(param
->bfr_len
+
1587 param
->sg_len
, 16) : (sgl_len(nents
) * 8);
1588 transhdr_len
= roundup(transhdr_len
, 16);
1590 skb
= alloc_skb(transhdr_len
, flags
);
1592 return ERR_PTR(-ENOMEM
);
1593 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1595 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1596 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 0);
1598 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1600 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1601 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1602 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1603 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1604 chcr_req
->sec_cpl
.seqno_numivs
=
1605 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1606 param
->opad_needed
, 0);
1608 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1609 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1611 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1612 param
->alg_prm
.result_size
);
1614 if (param
->opad_needed
)
1615 memcpy(chcr_req
->key_ctx
.key
+
1616 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1617 CHCR_HASH_MAX_DIGEST_SIZE
),
1618 hmacctx
->opad
, param
->alg_prm
.result_size
);
1620 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1621 param
->alg_prm
.mk_size
, 0,
1624 sizeof(chcr_req
->key_ctx
)) >> 4));
1625 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1626 ulptx
= (struct ulptx_sgl
*)((u8
*)(chcr_req
+ 1) + param
->kctx_len
+
1628 if (param
->bfr_len
!= 0) {
1629 req_ctx
->hctx_wr
.dma_addr
=
1630 dma_map_single(&u_ctx
->lldi
.pdev
->dev
, req_ctx
->reqbfr
,
1631 param
->bfr_len
, DMA_TO_DEVICE
);
1632 if (dma_mapping_error(&u_ctx
->lldi
.pdev
->dev
,
1633 req_ctx
->hctx_wr
. dma_addr
)) {
1637 req_ctx
->hctx_wr
.dma_len
= param
->bfr_len
;
1639 req_ctx
->hctx_wr
.dma_addr
= 0;
1641 chcr_add_hash_src_ent(req
, ulptx
, param
);
1642 /* Request upto max wr size */
1643 temp
= param
->kctx_len
+ DUMMY_BYTES
+ (req_ctx
->hctx_wr
.imm
?
1644 (param
->sg_len
+ param
->bfr_len
) : 0);
1645 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1646 create_wreq(h_ctx(tfm
), chcr_req
, &req
->base
, req_ctx
->hctx_wr
.imm
,
1647 param
->hash_size
, transhdr_len
,
1649 req_ctx
->hctx_wr
.skb
= skb
;
1653 return ERR_PTR(error
);
1656 static int chcr_ahash_update(struct ahash_request
*req
)
1658 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1659 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1660 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1661 struct chcr_context
*ctx
= h_ctx(rtfm
);
1662 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1663 struct sk_buff
*skb
;
1664 u8 remainder
= 0, bs
;
1665 unsigned int nbytes
= req
->nbytes
;
1666 struct hash_wr_param params
;
1671 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1672 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1675 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1677 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1678 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1679 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1681 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1682 + req_ctx
->reqlen
, nbytes
, 0);
1683 req_ctx
->reqlen
+= nbytes
;
1686 error
= chcr_inc_wrcount(dev
);
1689 /* Detach state for CHCR means lldi or padap is freed. Increasing
1690 * inflight count for dev guarantees that lldi and padap is valid
1692 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1694 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1699 chcr_init_hctx_per_wr(req_ctx
);
1700 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1705 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1706 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1707 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1708 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1709 if (params
.sg_len
> req
->nbytes
)
1710 params
.sg_len
= req
->nbytes
;
1711 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
) -
1713 params
.opad_needed
= 0;
1716 params
.bfr_len
= req_ctx
->reqlen
;
1718 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1720 params
.hash_size
= params
.alg_prm
.result_size
;
1721 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1722 skb
= create_hash_wr(req
, ¶ms
);
1724 error
= PTR_ERR(skb
);
1728 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1731 swap(req_ctx
->reqbfr
, req_ctx
->skbfr
);
1732 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1733 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1736 req_ctx
->reqlen
= remainder
;
1737 skb
->dev
= u_ctx
->lldi
.ports
[0];
1738 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1740 return -EINPROGRESS
;
1742 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1744 chcr_dec_wrcount(dev
);
1748 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1750 memset(bfr_ptr
, 0, bs
);
1753 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1755 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1758 static int chcr_ahash_final(struct ahash_request
*req
)
1760 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1761 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1762 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1763 struct hash_wr_param params
;
1764 struct sk_buff
*skb
;
1765 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1766 struct chcr_context
*ctx
= h_ctx(rtfm
);
1767 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1772 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1773 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1776 error
= chcr_inc_wrcount(dev
);
1780 chcr_init_hctx_per_wr(req_ctx
);
1781 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1782 params
.opad_needed
= 1;
1784 params
.opad_needed
= 0;
1786 req_ctx
->hctx_wr
.isfinal
= 1;
1787 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1788 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1789 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1790 params
.opad_needed
= 1;
1791 params
.kctx_len
*= 2;
1793 params
.opad_needed
= 0;
1796 req_ctx
->hctx_wr
.result
= 1;
1797 params
.bfr_len
= req_ctx
->reqlen
;
1798 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1799 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1800 if (req_ctx
->reqlen
== 0) {
1801 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1805 params
.bfr_len
= bs
;
1808 params
.scmd1
= req_ctx
->data_len
;
1812 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1813 skb
= create_hash_wr(req
, ¶ms
);
1815 error
= PTR_ERR(skb
);
1818 req_ctx
->reqlen
= 0;
1819 skb
->dev
= u_ctx
->lldi
.ports
[0];
1820 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1822 return -EINPROGRESS
;
1824 chcr_dec_wrcount(dev
);
1828 static int chcr_ahash_finup(struct ahash_request
*req
)
1830 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1831 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1832 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1833 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1834 struct chcr_context
*ctx
= h_ctx(rtfm
);
1835 struct sk_buff
*skb
;
1836 struct hash_wr_param params
;
1842 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1843 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1846 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1847 error
= chcr_inc_wrcount(dev
);
1851 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1853 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1857 chcr_init_hctx_per_wr(req_ctx
);
1858 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1864 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1865 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1866 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1867 params
.kctx_len
*= 2;
1868 params
.opad_needed
= 1;
1870 params
.opad_needed
= 0;
1873 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1874 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1875 if (params
.sg_len
< req
->nbytes
) {
1876 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1877 params
.kctx_len
/= 2;
1878 params
.opad_needed
= 0;
1882 params
.sg_len
= rounddown(params
.sg_len
+ req_ctx
->reqlen
, bs
)
1884 params
.hash_size
= params
.alg_prm
.result_size
;
1889 params
.sg_len
= req
->nbytes
;
1890 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1891 params
.scmd1
= req_ctx
->data_len
+ req_ctx
->reqlen
+
1894 params
.bfr_len
= req_ctx
->reqlen
;
1895 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1896 req_ctx
->hctx_wr
.result
= 1;
1897 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1898 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1899 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1903 params
.bfr_len
= bs
;
1905 skb
= create_hash_wr(req
, ¶ms
);
1907 error
= PTR_ERR(skb
);
1910 req_ctx
->reqlen
= 0;
1911 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
1912 skb
->dev
= u_ctx
->lldi
.ports
[0];
1913 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
1915 return -EINPROGRESS
;
1917 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
1919 chcr_dec_wrcount(dev
);
1923 static int chcr_hmac_init(struct ahash_request
*areq
);
1924 static int chcr_sha_init(struct ahash_request
*areq
);
1926 static int chcr_ahash_digest(struct ahash_request
*req
)
1928 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1929 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1930 struct chcr_dev
*dev
= h_ctx(rtfm
)->dev
;
1931 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(rtfm
));
1932 struct chcr_context
*ctx
= h_ctx(rtfm
);
1933 struct sk_buff
*skb
;
1934 struct hash_wr_param params
;
1940 req_ctx
->txqidx
= cpu
% ctx
->ntxq
;
1941 req_ctx
->rxqidx
= cpu
% ctx
->nrxq
;
1944 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1945 chcr_hmac_init(req
);
1949 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1950 error
= chcr_inc_wrcount(dev
);
1954 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1956 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)))) {
1961 chcr_init_hctx_per_wr(req_ctx
);
1962 error
= chcr_hash_dma_map(&u_ctx
->lldi
.pdev
->dev
, req
);
1968 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1969 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
1970 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1971 params
.kctx_len
*= 2;
1972 params
.opad_needed
= 1;
1974 params
.opad_needed
= 0;
1976 params
.sg_len
= chcr_hash_ent_in_wr(req
->src
, !!req_ctx
->reqlen
,
1977 HASH_SPACE_LEFT(params
.kctx_len
), 0);
1978 if (params
.sg_len
< req
->nbytes
) {
1979 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1980 params
.kctx_len
/= 2;
1981 params
.opad_needed
= 0;
1986 params
.sg_len
= rounddown(params
.sg_len
, bs
);
1987 params
.hash_size
= params
.alg_prm
.result_size
;
1989 params
.sg_len
= req
->nbytes
;
1990 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
1993 params
.scmd1
= req
->nbytes
+ req_ctx
->data_len
;
1997 req_ctx
->hctx_wr
.result
= 1;
1998 req_ctx
->hctx_wr
.srcsg
= req
->src
;
1999 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
2001 if (req
->nbytes
== 0) {
2002 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
2004 params
.bfr_len
= bs
;
2007 skb
= create_hash_wr(req
, ¶ms
);
2009 error
= PTR_ERR(skb
);
2012 req_ctx
->hctx_wr
.processed
+= params
.sg_len
;
2013 skb
->dev
= u_ctx
->lldi
.ports
[0];
2014 set_wr_txq(skb
, CPL_PRIORITY_DATA
, req_ctx
->txqidx
);
2016 return -EINPROGRESS
;
2018 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2020 chcr_dec_wrcount(dev
);
2024 static int chcr_ahash_continue(struct ahash_request
*req
)
2026 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2027 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2028 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
2029 struct chcr_context
*ctx
= h_ctx(rtfm
);
2030 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2031 struct sk_buff
*skb
;
2032 struct hash_wr_param params
;
2038 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
2039 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
2042 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2043 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
2044 params
.kctx_len
= roundup(params
.alg_prm
.result_size
, 16);
2045 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2046 params
.kctx_len
*= 2;
2047 params
.opad_needed
= 1;
2049 params
.opad_needed
= 0;
2051 params
.sg_len
= chcr_hash_ent_in_wr(hctx_wr
->srcsg
, 0,
2052 HASH_SPACE_LEFT(params
.kctx_len
),
2054 if ((params
.sg_len
+ hctx_wr
->processed
) > req
->nbytes
)
2055 params
.sg_len
= req
->nbytes
- hctx_wr
->processed
;
2056 if (!hctx_wr
->result
||
2057 ((params
.sg_len
+ hctx_wr
->processed
) < req
->nbytes
)) {
2058 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2059 params
.kctx_len
/= 2;
2060 params
.opad_needed
= 0;
2064 params
.sg_len
= rounddown(params
.sg_len
, bs
);
2065 params
.hash_size
= params
.alg_prm
.result_size
;
2070 params
.hash_size
= crypto_ahash_digestsize(rtfm
);
2071 params
.scmd1
= reqctx
->data_len
+ params
.sg_len
;
2074 reqctx
->data_len
+= params
.sg_len
;
2075 skb
= create_hash_wr(req
, ¶ms
);
2077 error
= PTR_ERR(skb
);
2080 hctx_wr
->processed
+= params
.sg_len
;
2081 skb
->dev
= u_ctx
->lldi
.ports
[0];
2082 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
2089 static inline void chcr_handle_ahash_resp(struct ahash_request
*req
,
2090 unsigned char *input
,
2093 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2094 struct chcr_hctx_per_wr
*hctx_wr
= &reqctx
->hctx_wr
;
2095 int digestsize
, updated_digestsize
;
2096 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2097 struct uld_ctx
*u_ctx
= ULD_CTX(h_ctx(tfm
));
2098 struct chcr_dev
*dev
= h_ctx(tfm
)->dev
;
2102 digestsize
= crypto_ahash_digestsize(crypto_ahash_reqtfm(req
));
2103 updated_digestsize
= digestsize
;
2104 if (digestsize
== SHA224_DIGEST_SIZE
)
2105 updated_digestsize
= SHA256_DIGEST_SIZE
;
2106 else if (digestsize
== SHA384_DIGEST_SIZE
)
2107 updated_digestsize
= SHA512_DIGEST_SIZE
;
2109 if (hctx_wr
->dma_addr
) {
2110 dma_unmap_single(&u_ctx
->lldi
.pdev
->dev
, hctx_wr
->dma_addr
,
2111 hctx_wr
->dma_len
, DMA_TO_DEVICE
);
2112 hctx_wr
->dma_addr
= 0;
2114 if (hctx_wr
->isfinal
|| ((hctx_wr
->processed
+ reqctx
->reqlen
) ==
2116 if (hctx_wr
->result
== 1) {
2117 hctx_wr
->result
= 0;
2118 memcpy(req
->result
, input
+ sizeof(struct cpl_fw6_pld
),
2121 memcpy(reqctx
->partial_hash
,
2122 input
+ sizeof(struct cpl_fw6_pld
),
2123 updated_digestsize
);
2128 memcpy(reqctx
->partial_hash
, input
+ sizeof(struct cpl_fw6_pld
),
2129 updated_digestsize
);
2131 err
= chcr_ahash_continue(req
);
2136 if (hctx_wr
->is_sg_map
)
2137 chcr_hash_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
);
2141 chcr_dec_wrcount(dev
);
2142 ahash_request_complete(req
, err
);
2146 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2147 * @req: crypto request
2149 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
2152 struct crypto_tfm
*tfm
= req
->tfm
;
2153 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2154 struct adapter
*adap
= padap(ctx
->dev
);
2156 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
2157 case CRYPTO_ALG_TYPE_AEAD
:
2158 err
= chcr_handle_aead_resp(aead_request_cast(req
), input
, err
);
2161 case CRYPTO_ALG_TYPE_SKCIPHER
:
2162 chcr_handle_cipher_resp(skcipher_request_cast(req
),
2165 case CRYPTO_ALG_TYPE_AHASH
:
2166 chcr_handle_ahash_resp(ahash_request_cast(req
), input
, err
);
2168 atomic_inc(&adap
->chcr_stats
.complete
);
2171 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
2173 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2174 struct chcr_ahash_req_ctx
*state
= out
;
2176 state
->reqlen
= req_ctx
->reqlen
;
2177 state
->data_len
= req_ctx
->data_len
;
2178 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
2179 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
2180 CHCR_HASH_MAX_DIGEST_SIZE
);
2181 chcr_init_hctx_per_wr(state
);
2185 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
2187 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2188 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
2190 req_ctx
->reqlen
= state
->reqlen
;
2191 req_ctx
->data_len
= state
->data_len
;
2192 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2193 req_ctx
->skbfr
= req_ctx
->bfr2
;
2194 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
2195 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
2196 CHCR_HASH_MAX_DIGEST_SIZE
);
2197 chcr_init_hctx_per_wr(req_ctx
);
2201 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2202 unsigned int keylen
)
2204 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(tfm
));
2205 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2206 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2207 unsigned int i
, err
= 0, updated_digestsize
;
2209 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
2211 /* use the key to calculate the ipad and opad. ipad will sent with the
2212 * first request's data. opad will be sent with the final hash result
2213 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2215 shash
->tfm
= hmacctx
->base_hash
;
2217 err
= crypto_shash_digest(shash
, key
, keylen
,
2221 keylen
= digestsize
;
2223 memcpy(hmacctx
->ipad
, key
, keylen
);
2225 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
2226 unsafe_memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
,
2227 "fortified memcpy causes -Wrestrict warning");
2229 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
2230 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
2231 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
2234 updated_digestsize
= digestsize
;
2235 if (digestsize
== SHA224_DIGEST_SIZE
)
2236 updated_digestsize
= SHA256_DIGEST_SIZE
;
2237 else if (digestsize
== SHA384_DIGEST_SIZE
)
2238 updated_digestsize
= SHA512_DIGEST_SIZE
;
2239 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
2240 hmacctx
->ipad
, digestsize
);
2243 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
2245 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
2246 hmacctx
->opad
, digestsize
);
2249 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
2254 static int chcr_aes_xts_setkey(struct crypto_skcipher
*cipher
, const u8
*key
,
2255 unsigned int key_len
)
2257 struct ablk_ctx
*ablkctx
= ABLK_CTX(c_ctx(cipher
));
2258 unsigned short context_size
= 0;
2261 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
2265 memcpy(ablkctx
->key
, key
, key_len
);
2266 ablkctx
->enckey_len
= key_len
;
2267 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
2268 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
2269 /* Both keys for xts must be aligned to 16 byte boundary
2270 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2272 if (key_len
== 48) {
2273 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
2275 memmove(ablkctx
->key
+ 32, ablkctx
->key
+ 24, 24);
2276 memset(ablkctx
->key
+ 24, 0, 8);
2277 memset(ablkctx
->key
+ 56, 0, 8);
2278 ablkctx
->enckey_len
= 64;
2279 ablkctx
->key_ctx_hdr
=
2280 FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192
,
2281 CHCR_KEYCTX_NO_KEY
, 1,
2284 ablkctx
->key_ctx_hdr
=
2285 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
2286 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
2287 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
2288 CHCR_KEYCTX_NO_KEY
, 1,
2291 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
2294 ablkctx
->enckey_len
= 0;
2299 static int chcr_sha_init(struct ahash_request
*areq
)
2301 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2302 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2303 int digestsize
= crypto_ahash_digestsize(tfm
);
2305 req_ctx
->data_len
= 0;
2306 req_ctx
->reqlen
= 0;
2307 req_ctx
->reqbfr
= req_ctx
->bfr1
;
2308 req_ctx
->skbfr
= req_ctx
->bfr2
;
2309 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
2314 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
2316 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2317 sizeof(struct chcr_ahash_req_ctx
));
2318 return chcr_device_init(crypto_tfm_ctx(tfm
));
2321 static int chcr_hmac_init(struct ahash_request
*areq
)
2323 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2324 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
2325 struct hmac_ctx
*hmacctx
= HMAC_CTX(h_ctx(rtfm
));
2326 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
2327 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
2329 chcr_sha_init(areq
);
2330 req_ctx
->data_len
= bs
;
2331 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
2332 if (digestsize
== SHA224_DIGEST_SIZE
)
2333 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2334 SHA256_DIGEST_SIZE
);
2335 else if (digestsize
== SHA384_DIGEST_SIZE
)
2336 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2337 SHA512_DIGEST_SIZE
);
2339 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
2345 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
2347 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2348 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2349 unsigned int digestsize
=
2350 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
2352 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2353 sizeof(struct chcr_ahash_req_ctx
));
2354 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
2355 if (IS_ERR(hmacctx
->base_hash
))
2356 return PTR_ERR(hmacctx
->base_hash
);
2357 return chcr_device_init(crypto_tfm_ctx(tfm
));
2360 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
2362 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
2363 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
2365 if (hmacctx
->base_hash
) {
2366 chcr_free_shash(hmacctx
->base_hash
);
2367 hmacctx
->base_hash
= NULL
;
2371 inline void chcr_aead_common_exit(struct aead_request
*req
)
2373 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2374 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2375 struct uld_ctx
*u_ctx
= ULD_CTX(a_ctx(tfm
));
2377 chcr_aead_dma_unmap(&u_ctx
->lldi
.pdev
->dev
, req
, reqctx
->op
);
2380 static int chcr_aead_common_init(struct aead_request
*req
)
2382 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2383 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2384 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2385 unsigned int authsize
= crypto_aead_authsize(tfm
);
2386 int error
= -EINVAL
;
2388 /* validate key size */
2389 if (aeadctx
->enckey_len
== 0)
2391 if (reqctx
->op
&& req
->cryptlen
< authsize
)
2394 reqctx
->scratch_pad
= reqctx
->iv
+ IV
;
2396 reqctx
->scratch_pad
= NULL
;
2398 error
= chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm
))->lldi
.pdev
->dev
, req
,
2410 static int chcr_aead_need_fallback(struct aead_request
*req
, int dst_nents
,
2411 int aadmax
, int wrlen
,
2412 unsigned short op_type
)
2414 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
2416 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
2417 dst_nents
> MAX_DSGL_ENT
||
2418 (req
->assoclen
> aadmax
) ||
2419 (wrlen
> SGE_MAX_WR_LEN
))
2424 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
2426 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2427 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2428 struct aead_request
*subreq
= aead_request_ctx_dma(req
);
2430 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
2431 aead_request_set_callback(subreq
, req
->base
.flags
,
2432 req
->base
.complete
, req
->base
.data
);
2433 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
2435 aead_request_set_ad(subreq
, req
->assoclen
);
2436 return op_type
? crypto_aead_decrypt(subreq
) :
2437 crypto_aead_encrypt(subreq
);
2440 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
2444 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2445 struct chcr_context
*ctx
= a_ctx(tfm
);
2446 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2447 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2448 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2449 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2450 struct sk_buff
*skb
= NULL
;
2451 struct chcr_wr
*chcr_req
;
2452 struct cpl_rx_phys_dsgl
*phys_cpl
;
2453 struct ulptx_sgl
*ulptx
;
2454 unsigned int transhdr_len
;
2455 unsigned int dst_size
= 0, temp
, subtype
= get_aead_subtype(tfm
);
2456 unsigned int kctx_len
= 0, dnents
, snents
;
2457 unsigned int authsize
= crypto_aead_authsize(tfm
);
2458 int error
= -EINVAL
;
2461 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2463 struct adapter
*adap
= padap(ctx
->dev
);
2464 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2466 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[rx_channel_id
]);
2467 if (req
->cryptlen
== 0)
2471 error
= chcr_aead_common_init(req
);
2473 return ERR_PTR(error
);
2475 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
||
2476 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2479 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
2480 (reqctx
->op
? -authsize
: authsize
), CHCR_DST_SG_SIZE
, 0);
2481 dnents
+= MIN_AUTH_SG
; // For IV
2482 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
2483 CHCR_SRC_SG_SIZE
, 0);
2484 dst_size
= get_space_for_phys_dsgl(dnents
);
2485 kctx_len
= (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx
->key_ctx_hdr
)) << 4)
2486 - sizeof(chcr_req
->key_ctx
);
2487 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2488 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <
2490 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16)
2491 : (sgl_len(snents
) * 8);
2492 transhdr_len
+= temp
;
2493 transhdr_len
= roundup(transhdr_len
, 16);
2495 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
2496 transhdr_len
, reqctx
->op
)) {
2497 atomic_inc(&adap
->chcr_stats
.fallback
);
2498 chcr_aead_common_exit(req
);
2499 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
2501 skb
= alloc_skb(transhdr_len
, flags
);
2507 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2509 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2512 * Input order is AAD,IV and Payload. where IV should be included as
2513 * the part of authdata. All other fields should be filled according
2514 * to the hardware spec
2516 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2517 FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
2518 chcr_req
->sec_cpl
.pldlen
= htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
2519 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2521 null
? 0 : IV
+ req
->assoclen
,
2522 req
->assoclen
+ IV
+ 1,
2523 (temp
& 0x1F0) >> 4);
2524 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2526 null
? 0 : req
->assoclen
+ IV
+ 1,
2528 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
||
2529 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
)
2530 temp
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
2532 temp
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
2533 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
,
2534 (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2536 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2538 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2541 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2542 if (reqctx
->op
== CHCR_ENCRYPT_OP
||
2543 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2544 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
)
2545 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2546 aeadctx
->enckey_len
);
2548 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2549 aeadctx
->enckey_len
);
2551 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
2552 actx
->h_iopad
, kctx_len
- roundup(aeadctx
->enckey_len
, 16));
2553 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2554 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
2555 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
2556 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
2557 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
2558 memcpy(ivptr
, aeadctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
2559 memcpy(ivptr
+ CTR_RFC3686_NONCE_SIZE
, req
->iv
,
2560 CTR_RFC3686_IV_SIZE
);
2561 *(__be32
*)(ivptr
+ CTR_RFC3686_NONCE_SIZE
+
2562 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
2564 memcpy(ivptr
, req
->iv
, IV
);
2566 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
2567 chcr_add_aead_src_ent(req
, ulptx
);
2568 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2569 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
2570 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
2571 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
2572 transhdr_len
, temp
, 0);
2577 chcr_aead_common_exit(req
);
2579 return ERR_PTR(error
);
2582 int chcr_aead_dma_map(struct device
*dev
,
2583 struct aead_request
*req
,
2584 unsigned short op_type
)
2587 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2588 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2589 unsigned int authsize
= crypto_aead_authsize(tfm
);
2590 int src_len
, dst_len
;
2592 /* calculate and handle src and dst sg length separately
2593 * for inplace and out-of place operations
2595 if (req
->src
== req
->dst
) {
2596 src_len
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2600 src_len
= req
->assoclen
+ req
->cryptlen
;
2601 dst_len
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2602 -authsize
: authsize
);
2605 if (!req
->cryptlen
|| !src_len
|| !dst_len
)
2607 reqctx
->iv_dma
= dma_map_single(dev
, reqctx
->iv
, (IV
+ reqctx
->b0_len
),
2609 if (dma_mapping_error(dev
, reqctx
->iv_dma
))
2612 reqctx
->b0_dma
= reqctx
->iv_dma
+ IV
;
2615 if (req
->src
== req
->dst
) {
2616 error
= dma_map_sg(dev
, req
->src
,
2617 sg_nents_for_len(req
->src
, src_len
),
2622 error
= dma_map_sg(dev
, req
->src
,
2623 sg_nents_for_len(req
->src
, src_len
),
2627 error
= dma_map_sg(dev
, req
->dst
,
2628 sg_nents_for_len(req
->dst
, dst_len
),
2631 dma_unmap_sg(dev
, req
->src
,
2632 sg_nents_for_len(req
->src
, src_len
),
2640 dma_unmap_single(dev
, reqctx
->iv_dma
, IV
, DMA_BIDIRECTIONAL
);
2644 void chcr_aead_dma_unmap(struct device
*dev
,
2645 struct aead_request
*req
,
2646 unsigned short op_type
)
2648 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2649 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2650 unsigned int authsize
= crypto_aead_authsize(tfm
);
2651 int src_len
, dst_len
;
2653 /* calculate and handle src and dst sg length separately
2654 * for inplace and out-of place operations
2656 if (req
->src
== req
->dst
) {
2657 src_len
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2661 src_len
= req
->assoclen
+ req
->cryptlen
;
2662 dst_len
= req
->assoclen
+ req
->cryptlen
+ (op_type
?
2663 -authsize
: authsize
);
2666 if (!req
->cryptlen
|| !src_len
|| !dst_len
)
2669 dma_unmap_single(dev
, reqctx
->iv_dma
, (IV
+ reqctx
->b0_len
),
2671 if (req
->src
== req
->dst
) {
2672 dma_unmap_sg(dev
, req
->src
,
2673 sg_nents_for_len(req
->src
, src_len
),
2676 dma_unmap_sg(dev
, req
->src
,
2677 sg_nents_for_len(req
->src
, src_len
),
2679 dma_unmap_sg(dev
, req
->dst
,
2680 sg_nents_for_len(req
->dst
, dst_len
),
2685 void chcr_add_aead_src_ent(struct aead_request
*req
,
2686 struct ulptx_sgl
*ulptx
)
2688 struct ulptx_walk ulp_walk
;
2689 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2692 u8
*buf
= (u8
*)ulptx
;
2694 if (reqctx
->b0_len
) {
2695 memcpy(buf
, reqctx
->scratch_pad
, reqctx
->b0_len
);
2696 buf
+= reqctx
->b0_len
;
2698 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2699 buf
, req
->cryptlen
+ req
->assoclen
, 0);
2701 ulptx_walk_init(&ulp_walk
, ulptx
);
2703 ulptx_walk_add_page(&ulp_walk
, reqctx
->b0_len
,
2705 ulptx_walk_add_sg(&ulp_walk
, req
->src
, req
->cryptlen
+
2707 ulptx_walk_end(&ulp_walk
);
2711 void chcr_add_aead_dst_ent(struct aead_request
*req
,
2712 struct cpl_rx_phys_dsgl
*phys_cpl
,
2715 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2716 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2717 struct dsgl_walk dsgl_walk
;
2718 unsigned int authsize
= crypto_aead_authsize(tfm
);
2719 struct chcr_context
*ctx
= a_ctx(tfm
);
2720 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2722 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2724 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[rx_channel_id
]);
2725 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2726 dsgl_walk_add_page(&dsgl_walk
, IV
+ reqctx
->b0_len
, reqctx
->iv_dma
);
2727 temp
= req
->assoclen
+ req
->cryptlen
+
2728 (reqctx
->op
? -authsize
: authsize
);
2729 dsgl_walk_add_sg(&dsgl_walk
, req
->dst
, temp
, 0);
2730 dsgl_walk_end(&dsgl_walk
, qid
, rx_channel_id
);
2733 void chcr_add_cipher_src_ent(struct skcipher_request
*req
,
2735 struct cipher_wr_param
*wrparam
)
2737 struct ulptx_walk ulp_walk
;
2738 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
2741 memcpy(buf
, reqctx
->iv
, IV
);
2744 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
2745 buf
, wrparam
->bytes
, reqctx
->processed
);
2747 ulptx_walk_init(&ulp_walk
, (struct ulptx_sgl
*)buf
);
2748 ulptx_walk_add_sg(&ulp_walk
, reqctx
->srcsg
, wrparam
->bytes
,
2750 reqctx
->srcsg
= ulp_walk
.last_sg
;
2751 reqctx
->src_ofst
= ulp_walk
.last_sg_len
;
2752 ulptx_walk_end(&ulp_walk
);
2756 void chcr_add_cipher_dst_ent(struct skcipher_request
*req
,
2757 struct cpl_rx_phys_dsgl
*phys_cpl
,
2758 struct cipher_wr_param
*wrparam
,
2761 struct chcr_skcipher_req_ctx
*reqctx
= skcipher_request_ctx(req
);
2762 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(wrparam
->req
);
2763 struct chcr_context
*ctx
= c_ctx(tfm
);
2764 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2765 struct dsgl_walk dsgl_walk
;
2766 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2768 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[rx_channel_id
]);
2769 dsgl_walk_init(&dsgl_walk
, phys_cpl
);
2770 dsgl_walk_add_sg(&dsgl_walk
, reqctx
->dstsg
, wrparam
->bytes
,
2772 reqctx
->dstsg
= dsgl_walk
.last_sg
;
2773 reqctx
->dst_ofst
= dsgl_walk
.last_sg_len
;
2774 dsgl_walk_end(&dsgl_walk
, qid
, rx_channel_id
);
2777 void chcr_add_hash_src_ent(struct ahash_request
*req
,
2778 struct ulptx_sgl
*ulptx
,
2779 struct hash_wr_param
*param
)
2781 struct ulptx_walk ulp_walk
;
2782 struct chcr_ahash_req_ctx
*reqctx
= ahash_request_ctx(req
);
2784 if (reqctx
->hctx_wr
.imm
) {
2785 u8
*buf
= (u8
*)ulptx
;
2787 if (param
->bfr_len
) {
2788 memcpy(buf
, reqctx
->reqbfr
, param
->bfr_len
);
2789 buf
+= param
->bfr_len
;
2792 sg_pcopy_to_buffer(reqctx
->hctx_wr
.srcsg
,
2793 sg_nents(reqctx
->hctx_wr
.srcsg
), buf
,
2796 ulptx_walk_init(&ulp_walk
, ulptx
);
2798 ulptx_walk_add_page(&ulp_walk
, param
->bfr_len
,
2799 reqctx
->hctx_wr
.dma_addr
);
2800 ulptx_walk_add_sg(&ulp_walk
, reqctx
->hctx_wr
.srcsg
,
2801 param
->sg_len
, reqctx
->hctx_wr
.src_ofst
);
2802 reqctx
->hctx_wr
.srcsg
= ulp_walk
.last_sg
;
2803 reqctx
->hctx_wr
.src_ofst
= ulp_walk
.last_sg_len
;
2804 ulptx_walk_end(&ulp_walk
);
2808 int chcr_hash_dma_map(struct device
*dev
,
2809 struct ahash_request
*req
)
2811 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2816 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2820 req_ctx
->hctx_wr
.is_sg_map
= 1;
2824 void chcr_hash_dma_unmap(struct device
*dev
,
2825 struct ahash_request
*req
)
2827 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
2832 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2834 req_ctx
->hctx_wr
.is_sg_map
= 0;
2838 int chcr_cipher_dma_map(struct device
*dev
,
2839 struct skcipher_request
*req
)
2843 if (req
->src
== req
->dst
) {
2844 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2849 error
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
2853 error
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2856 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2867 void chcr_cipher_dma_unmap(struct device
*dev
,
2868 struct skcipher_request
*req
)
2870 if (req
->src
== req
->dst
) {
2871 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2874 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
),
2876 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
2881 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2885 memset(block
, 0, csize
);
2890 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2893 data
= cpu_to_be32(msglen
);
2894 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2899 static int generate_b0(struct aead_request
*req
, u8
*ivptr
,
2900 unsigned short op_type
)
2902 unsigned int l
, lp
, m
;
2904 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2905 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2906 u8
*b0
= reqctx
->scratch_pad
;
2908 m
= crypto_aead_authsize(aead
);
2910 memcpy(b0
, ivptr
, 16);
2915 /* set m, bits 3-5 */
2916 *b0
|= (8 * ((m
- 2) / 2));
2918 /* set adata, bit 6, if associated data is used */
2921 rc
= set_msg_len(b0
+ 16 - l
,
2922 (op_type
== CHCR_DECRYPT_OP
) ?
2923 req
->cryptlen
- m
: req
->cryptlen
, l
);
2928 static inline int crypto_ccm_check_iv(const u8
*iv
)
2930 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2931 if (iv
[0] < 1 || iv
[0] > 7)
2937 static int ccm_format_packet(struct aead_request
*req
,
2939 unsigned int sub_type
,
2940 unsigned short op_type
,
2941 unsigned int assoclen
)
2943 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2944 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2945 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
2948 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2950 memcpy(ivptr
+ 1, &aeadctx
->salt
[0], 3);
2951 memcpy(ivptr
+ 4, req
->iv
, 8);
2952 memset(ivptr
+ 12, 0, 4);
2954 memcpy(ivptr
, req
->iv
, 16);
2957 put_unaligned_be16(assoclen
, &reqctx
->scratch_pad
[16]);
2959 rc
= generate_b0(req
, ivptr
, op_type
);
2960 /* zero the ctr value */
2961 memset(ivptr
+ 15 - ivptr
[0], 0, ivptr
[0] + 1);
2965 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2966 unsigned int dst_size
,
2967 struct aead_request
*req
,
2968 unsigned short op_type
)
2970 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2971 struct chcr_context
*ctx
= a_ctx(tfm
);
2972 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2973 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2974 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
2975 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2976 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2977 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
2978 unsigned int ccm_xtra
;
2979 unsigned int tag_offset
= 0, auth_offset
= 0;
2980 unsigned int assoclen
;
2982 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[rx_channel_id
]);
2984 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2985 assoclen
= req
->assoclen
- 8;
2987 assoclen
= req
->assoclen
;
2988 ccm_xtra
= CCM_B0_SIZE
+
2989 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2991 auth_offset
= req
->cryptlen
?
2992 (req
->assoclen
+ IV
+ 1 + ccm_xtra
) : 0;
2993 if (op_type
== CHCR_DECRYPT_OP
) {
2994 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2995 tag_offset
= crypto_aead_authsize(tfm
);
3000 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(rx_channel_id
, 2, 1);
3002 htonl(req
->assoclen
+ IV
+ req
->cryptlen
+ ccm_xtra
);
3003 /* For CCM there wil be b0 always. So AAD start will be 1 always */
3004 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
3005 1 + IV
, IV
+ assoclen
+ ccm_xtra
,
3006 req
->assoclen
+ IV
+ 1 + ccm_xtra
, 0);
3008 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
3009 auth_offset
, tag_offset
,
3010 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
3011 crypto_aead_authsize(tfm
));
3012 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
3013 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
3014 cipher_mode
, mac_mode
,
3015 aeadctx
->hmac_ctrl
, IV
>> 1);
3017 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3021 static int aead_ccm_validate_input(unsigned short op_type
,
3022 struct aead_request
*req
,
3023 struct chcr_aead_ctx
*aeadctx
,
3024 unsigned int sub_type
)
3026 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
3027 if (crypto_ccm_check_iv(req
->iv
)) {
3028 pr_err("CCM: IV check fails\n");
3032 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
3033 pr_err("RFC4309: Invalid AAD length %d\n",
3041 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
3045 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3046 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3047 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
3048 struct sk_buff
*skb
= NULL
;
3049 struct chcr_wr
*chcr_req
;
3050 struct cpl_rx_phys_dsgl
*phys_cpl
;
3051 struct ulptx_sgl
*ulptx
;
3052 unsigned int transhdr_len
;
3053 unsigned int dst_size
= 0, kctx_len
, dnents
, temp
, snents
;
3054 unsigned int sub_type
, assoclen
= req
->assoclen
;
3055 unsigned int authsize
= crypto_aead_authsize(tfm
);
3056 int error
= -EINVAL
;
3058 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
3060 struct adapter
*adap
= padap(a_ctx(tfm
)->dev
);
3062 sub_type
= get_aead_subtype(tfm
);
3063 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
3065 reqctx
->b0_len
= CCM_B0_SIZE
+ (assoclen
? CCM_AAD_FIELD_SIZE
: 0);
3066 error
= chcr_aead_common_init(req
);
3068 return ERR_PTR(error
);
3070 error
= aead_ccm_validate_input(reqctx
->op
, req
, aeadctx
, sub_type
);
3073 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
3074 + (reqctx
->op
? -authsize
: authsize
),
3075 CHCR_DST_SG_SIZE
, 0);
3076 dnents
+= MIN_CCM_SG
; // For IV and B0
3077 dst_size
= get_space_for_phys_dsgl(dnents
);
3078 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3079 CHCR_SRC_SG_SIZE
, 0);
3080 snents
+= MIN_CCM_SG
; //For B0
3081 kctx_len
= roundup(aeadctx
->enckey_len
, 16) * 2;
3082 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3083 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
+
3084 reqctx
->b0_len
) <= SGE_MAX_WR_LEN
;
3085 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
+
3086 reqctx
->b0_len
, 16) :
3087 (sgl_len(snents
) * 8);
3088 transhdr_len
+= temp
;
3089 transhdr_len
= roundup(transhdr_len
, 16);
3091 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
-
3092 reqctx
->b0_len
, transhdr_len
, reqctx
->op
)) {
3093 atomic_inc(&adap
->chcr_stats
.fallback
);
3094 chcr_aead_common_exit(req
);
3095 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3097 skb
= alloc_skb(transhdr_len
, flags
);
3104 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3106 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, reqctx
->op
);
3108 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3109 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3110 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3111 aeadctx
->key
, aeadctx
->enckey_len
);
3113 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3114 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3115 ulptx
= (struct ulptx_sgl
*)(ivptr
+ IV
);
3116 error
= ccm_format_packet(req
, ivptr
, sub_type
, reqctx
->op
, assoclen
);
3119 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3120 chcr_add_aead_src_ent(req
, ulptx
);
3122 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3123 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3124 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
+
3125 reqctx
->b0_len
) : 0);
3126 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, 0,
3127 transhdr_len
, temp
, 0);
3134 chcr_aead_common_exit(req
);
3135 return ERR_PTR(error
);
3138 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
3142 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3143 struct chcr_context
*ctx
= a_ctx(tfm
);
3144 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
3145 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3146 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
3147 struct sk_buff
*skb
= NULL
;
3148 struct chcr_wr
*chcr_req
;
3149 struct cpl_rx_phys_dsgl
*phys_cpl
;
3150 struct ulptx_sgl
*ulptx
;
3151 unsigned int transhdr_len
, dnents
= 0, snents
;
3152 unsigned int dst_size
= 0, temp
= 0, kctx_len
, assoclen
= req
->assoclen
;
3153 unsigned int authsize
= crypto_aead_authsize(tfm
);
3154 int error
= -EINVAL
;
3156 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
3158 struct adapter
*adap
= padap(ctx
->dev
);
3159 unsigned int rx_channel_id
= reqctx
->rxqidx
/ ctx
->rxq_perchan
;
3161 rx_channel_id
= cxgb4_port_e2cchan(u_ctx
->lldi
.ports
[rx_channel_id
]);
3162 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
3163 assoclen
= req
->assoclen
- 8;
3166 error
= chcr_aead_common_init(req
);
3168 return ERR_PTR(error
);
3169 dnents
= sg_nents_xlen(req
->dst
, req
->assoclen
+ req
->cryptlen
+
3170 (reqctx
->op
? -authsize
: authsize
),
3171 CHCR_DST_SG_SIZE
, 0);
3172 snents
= sg_nents_xlen(req
->src
, req
->assoclen
+ req
->cryptlen
,
3173 CHCR_SRC_SG_SIZE
, 0);
3174 dnents
+= MIN_GCM_SG
; // For IV
3175 dst_size
= get_space_for_phys_dsgl(dnents
);
3176 kctx_len
= roundup(aeadctx
->enckey_len
, 16) + AEAD_H_SIZE
;
3177 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
3178 reqctx
->imm
= (transhdr_len
+ req
->assoclen
+ req
->cryptlen
) <=
3180 temp
= reqctx
->imm
? roundup(req
->assoclen
+ req
->cryptlen
, 16) :
3181 (sgl_len(snents
) * 8);
3182 transhdr_len
+= temp
;
3183 transhdr_len
= roundup(transhdr_len
, 16);
3184 if (chcr_aead_need_fallback(req
, dnents
, T6_MAX_AAD_SIZE
,
3185 transhdr_len
, reqctx
->op
)) {
3187 atomic_inc(&adap
->chcr_stats
.fallback
);
3188 chcr_aead_common_exit(req
);
3189 return ERR_PTR(chcr_aead_fallback(req
, reqctx
->op
));
3191 skb
= alloc_skb(transhdr_len
, flags
);
3197 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
3199 //Offset of tag from end
3200 temp
= (reqctx
->op
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
3201 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
3202 rx_channel_id
, 2, 1);
3203 chcr_req
->sec_cpl
.pldlen
=
3204 htonl(req
->assoclen
+ IV
+ req
->cryptlen
);
3205 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
3206 assoclen
? 1 + IV
: 0,
3207 assoclen
? IV
+ assoclen
: 0,
3208 req
->assoclen
+ IV
+ 1, 0);
3209 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
3210 FILL_SEC_CPL_AUTHINSERT(0, req
->assoclen
+ IV
+ 1,
3212 chcr_req
->sec_cpl
.seqno_numivs
=
3213 FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, (reqctx
->op
==
3214 CHCR_ENCRYPT_OP
) ? 1 : 0,
3215 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
3216 CHCR_SCMD_AUTH_MODE_GHASH
,
3217 aeadctx
->hmac_ctrl
, IV
>> 1);
3218 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3220 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
3221 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
3222 memcpy(chcr_req
->key_ctx
.key
+ roundup(aeadctx
->enckey_len
, 16),
3223 GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
3225 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
3226 ivptr
= (u8
*)(phys_cpl
+ 1) + dst_size
;
3227 /* prepare a 16 byte iv */
3228 /* S A L T | IV | 0x00000001 */
3229 if (get_aead_subtype(tfm
) ==
3230 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
3231 memcpy(ivptr
, aeadctx
->salt
, 4);
3232 memcpy(ivptr
+ 4, req
->iv
, GCM_RFC4106_IV_SIZE
);
3234 memcpy(ivptr
, req
->iv
, GCM_AES_IV_SIZE
);
3236 put_unaligned_be32(0x01, &ivptr
[12]);
3237 ulptx
= (struct ulptx_sgl
*)(ivptr
+ 16);
3239 chcr_add_aead_dst_ent(req
, phys_cpl
, qid
);
3240 chcr_add_aead_src_ent(req
, ulptx
);
3241 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
3242 temp
= sizeof(struct cpl_rx_phys_dsgl
) + dst_size
+ IV
+
3243 kctx_len
+ (reqctx
->imm
? (req
->assoclen
+ req
->cryptlen
) : 0);
3244 create_wreq(a_ctx(tfm
), chcr_req
, &req
->base
, reqctx
->imm
, size
,
3245 transhdr_len
, temp
, reqctx
->verify
);
3250 chcr_aead_common_exit(req
);
3251 return ERR_PTR(error
);
3256 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
3258 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3259 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3261 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
3262 CRYPTO_ALG_NEED_FALLBACK
|
3264 if (IS_ERR(aeadctx
->sw_cipher
))
3265 return PTR_ERR(aeadctx
->sw_cipher
);
3266 crypto_aead_set_reqsize_dma(
3267 tfm
, max(sizeof(struct chcr_aead_reqctx
),
3268 sizeof(struct aead_request
) +
3269 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
3270 return chcr_device_init(a_ctx(tfm
));
3273 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
3275 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3277 crypto_free_aead(aeadctx
->sw_cipher
);
3280 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
3281 unsigned int authsize
)
3283 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3285 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
3286 aeadctx
->mayverify
= VERIFY_HW
;
3287 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3289 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
3290 unsigned int authsize
)
3292 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3293 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
3295 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3296 * true for sha1. authsize == 12 condition should be before
3297 * authsize == (maxauth >> 1)
3299 if (authsize
== ICV_4
) {
3300 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3301 aeadctx
->mayverify
= VERIFY_HW
;
3302 } else if (authsize
== ICV_6
) {
3303 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3304 aeadctx
->mayverify
= VERIFY_HW
;
3305 } else if (authsize
== ICV_10
) {
3306 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3307 aeadctx
->mayverify
= VERIFY_HW
;
3308 } else if (authsize
== ICV_12
) {
3309 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3310 aeadctx
->mayverify
= VERIFY_HW
;
3311 } else if (authsize
== ICV_14
) {
3312 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3313 aeadctx
->mayverify
= VERIFY_HW
;
3314 } else if (authsize
== (maxauth
>> 1)) {
3315 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3316 aeadctx
->mayverify
= VERIFY_HW
;
3317 } else if (authsize
== maxauth
) {
3318 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3319 aeadctx
->mayverify
= VERIFY_HW
;
3321 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3322 aeadctx
->mayverify
= VERIFY_SW
;
3324 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3328 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
3330 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3334 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3335 aeadctx
->mayverify
= VERIFY_HW
;
3338 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3339 aeadctx
->mayverify
= VERIFY_HW
;
3342 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3343 aeadctx
->mayverify
= VERIFY_HW
;
3346 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3347 aeadctx
->mayverify
= VERIFY_HW
;
3350 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3351 aeadctx
->mayverify
= VERIFY_HW
;
3355 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3356 aeadctx
->mayverify
= VERIFY_SW
;
3361 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3364 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
3365 unsigned int authsize
)
3367 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3371 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3372 aeadctx
->mayverify
= VERIFY_HW
;
3375 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3376 aeadctx
->mayverify
= VERIFY_HW
;
3379 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3380 aeadctx
->mayverify
= VERIFY_HW
;
3385 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3388 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
3389 unsigned int authsize
)
3391 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(tfm
));
3395 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
3396 aeadctx
->mayverify
= VERIFY_HW
;
3399 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
3400 aeadctx
->mayverify
= VERIFY_HW
;
3403 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
3404 aeadctx
->mayverify
= VERIFY_HW
;
3407 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
3408 aeadctx
->mayverify
= VERIFY_HW
;
3411 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
3412 aeadctx
->mayverify
= VERIFY_HW
;
3415 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
3416 aeadctx
->mayverify
= VERIFY_HW
;
3419 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
3420 aeadctx
->mayverify
= VERIFY_HW
;
3425 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
3428 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
3430 unsigned int keylen
)
3432 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3433 unsigned char ck_size
, mk_size
;
3434 int key_ctx_size
= 0;
3436 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) * 2;
3437 if (keylen
== AES_KEYSIZE_128
) {
3438 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3439 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_128
;
3440 } else if (keylen
== AES_KEYSIZE_192
) {
3441 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3442 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
3443 } else if (keylen
== AES_KEYSIZE_256
) {
3444 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3445 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
3447 aeadctx
->enckey_len
= 0;
3450 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
3452 memcpy(aeadctx
->key
, key
, keylen
);
3453 aeadctx
->enckey_len
= keylen
;
3458 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
3460 unsigned int keylen
)
3462 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3465 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3466 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3467 CRYPTO_TFM_REQ_MASK
);
3468 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3471 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3474 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
3475 unsigned int keylen
)
3477 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3481 aeadctx
->enckey_len
= 0;
3484 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3485 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
3486 CRYPTO_TFM_REQ_MASK
);
3487 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3491 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
3492 return chcr_ccm_common_setkey(aead
, key
, keylen
);
3495 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
3496 unsigned int keylen
)
3498 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(aead
));
3499 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
3500 unsigned int ck_size
;
3501 int ret
= 0, key_ctx_size
= 0;
3502 struct crypto_aes_ctx aes
;
3504 aeadctx
->enckey_len
= 0;
3505 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3506 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
3507 & CRYPTO_TFM_REQ_MASK
);
3508 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3512 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3514 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
3515 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
3517 if (keylen
== AES_KEYSIZE_128
) {
3518 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3519 } else if (keylen
== AES_KEYSIZE_192
) {
3520 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3521 } else if (keylen
== AES_KEYSIZE_256
) {
3522 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3524 pr_err("GCM: Invalid key length %d\n", keylen
);
3529 memcpy(aeadctx
->key
, key
, keylen
);
3530 aeadctx
->enckey_len
= keylen
;
3531 key_ctx_size
= sizeof(struct _key_ctx
) + roundup(keylen
, 16) +
3533 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
3534 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
3537 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3538 * It will go in key context
3540 ret
= aes_expandkey(&aes
, key
, keylen
);
3542 aeadctx
->enckey_len
= 0;
3545 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
3546 aes_encrypt(&aes
, gctx
->ghash_h
, gctx
->ghash_h
);
3547 memzero_explicit(&aes
, sizeof(aes
));
3553 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
3554 unsigned int keylen
)
3556 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3557 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3558 /* it contains auth and cipher key both*/
3559 struct crypto_authenc_keys keys
;
3560 unsigned int bs
, subtype
;
3561 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
3562 int err
= 0, i
, key_ctx_len
= 0;
3563 unsigned char ck_size
= 0;
3564 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
3565 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
3566 struct algo_param param
;
3570 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3571 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3572 & CRYPTO_TFM_REQ_MASK
);
3573 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3577 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
3580 if (get_alg_config(¶m
, max_authsize
)) {
3581 pr_err("Unsupported digest size\n");
3584 subtype
= get_aead_subtype(authenc
);
3585 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3586 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3587 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3589 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3590 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3591 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3593 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3594 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3595 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3596 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3597 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3598 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3600 pr_err("Unsupported cipher key\n");
3604 /* Copy only encryption key. We use authkey to generate h(ipad) and
3605 * h(opad) so authkey is not needed again. authkeylen size have the
3606 * size of the hash digest size.
3608 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3609 aeadctx
->enckey_len
= keys
.enckeylen
;
3610 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3611 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3613 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3614 aeadctx
->enckey_len
<< 3);
3616 base_hash
= chcr_alloc_shash(max_authsize
);
3617 if (IS_ERR(base_hash
)) {
3618 pr_err("Base driver cannot be loaded\n");
3622 SHASH_DESC_ON_STACK(shash
, base_hash
);
3624 shash
->tfm
= base_hash
;
3625 bs
= crypto_shash_blocksize(base_hash
);
3626 align
= KEYCTX_ALIGN_PAD(max_authsize
);
3627 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
3629 if (keys
.authkeylen
> bs
) {
3630 err
= crypto_shash_digest(shash
, keys
.authkey
,
3634 pr_err("Base driver cannot be loaded\n");
3637 keys
.authkeylen
= max_authsize
;
3639 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
3641 /* Compute the ipad-digest*/
3642 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3643 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3644 for (i
= 0; i
< bs
>> 2; i
++)
3645 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3647 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3650 /* Compute the opad-digest */
3651 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3652 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3653 for (i
= 0; i
< bs
>> 2; i
++)
3654 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3656 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3659 /* convert the ipad and opad digest to network order */
3660 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3661 chcr_change_order(o_ptr
, param
.result_size
);
3662 key_ctx_len
= sizeof(struct _key_ctx
) +
3663 roundup(keys
.enckeylen
, 16) +
3664 (param
.result_size
+ align
) * 2;
3665 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3666 0, 1, key_ctx_len
>> 4);
3667 actx
->auth_mode
= param
.auth_mode
;
3668 chcr_free_shash(base_hash
);
3670 memzero_explicit(&keys
, sizeof(keys
));
3674 aeadctx
->enckey_len
= 0;
3675 memzero_explicit(&keys
, sizeof(keys
));
3676 if (!IS_ERR(base_hash
))
3677 chcr_free_shash(base_hash
);
3681 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3682 const u8
*key
, unsigned int keylen
)
3684 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(a_ctx(authenc
));
3685 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3686 struct crypto_authenc_keys keys
;
3688 /* it contains auth and cipher key both*/
3689 unsigned int subtype
;
3690 int key_ctx_len
= 0;
3691 unsigned char ck_size
= 0;
3693 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3694 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3695 & CRYPTO_TFM_REQ_MASK
);
3696 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3700 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
3703 subtype
= get_aead_subtype(authenc
);
3704 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_SHA
||
3705 subtype
== CRYPTO_ALG_SUB_TYPE_CTR_NULL
) {
3706 if (keys
.enckeylen
< CTR_RFC3686_NONCE_SIZE
)
3708 memcpy(aeadctx
->nonce
, keys
.enckey
+ (keys
.enckeylen
3709 - CTR_RFC3686_NONCE_SIZE
), CTR_RFC3686_NONCE_SIZE
);
3710 keys
.enckeylen
-= CTR_RFC3686_NONCE_SIZE
;
3712 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3713 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3714 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3715 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3716 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3717 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3719 pr_err("Unsupported cipher key %d\n", keys
.enckeylen
);
3722 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3723 aeadctx
->enckey_len
= keys
.enckeylen
;
3724 if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC_SHA
||
3725 subtype
== CRYPTO_ALG_SUB_TYPE_CBC_NULL
) {
3726 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3727 aeadctx
->enckey_len
<< 3);
3729 key_ctx_len
= sizeof(struct _key_ctx
) + roundup(keys
.enckeylen
, 16);
3731 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3732 0, key_ctx_len
>> 4);
3733 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3734 memzero_explicit(&keys
, sizeof(keys
));
3737 aeadctx
->enckey_len
= 0;
3738 memzero_explicit(&keys
, sizeof(keys
));
3742 static int chcr_aead_op(struct aead_request
*req
,
3744 create_wr_t create_wr_fn
)
3746 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3747 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
3748 struct chcr_context
*ctx
= a_ctx(tfm
);
3749 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
3750 struct sk_buff
*skb
;
3751 struct chcr_dev
*cdev
;
3753 cdev
= a_ctx(tfm
)->dev
;
3755 pr_err("%s : No crypto device.\n", __func__
);
3759 if (chcr_inc_wrcount(cdev
)) {
3760 /* Detach state for CHCR means lldi or padap is freed.
3761 * We cannot increment fallback here.
3763 return chcr_aead_fallback(req
, reqctx
->op
);
3766 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3768 (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))) {
3769 chcr_dec_wrcount(cdev
);
3773 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
3774 crypto_ipsec_check_assoclen(req
->assoclen
) != 0) {
3775 pr_err("RFC4106: Invalid value of assoclen %d\n",
3780 /* Form a WR from req */
3781 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[reqctx
->rxqidx
], size
);
3783 if (IS_ERR_OR_NULL(skb
)) {
3784 chcr_dec_wrcount(cdev
);
3785 return PTR_ERR_OR_ZERO(skb
);
3788 skb
->dev
= u_ctx
->lldi
.ports
[0];
3789 set_wr_txq(skb
, CPL_PRIORITY_DATA
, reqctx
->txqidx
);
3791 return -EINPROGRESS
;
3794 static int chcr_aead_encrypt(struct aead_request
*req
)
3796 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3797 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
3798 struct chcr_context
*ctx
= a_ctx(tfm
);
3802 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
3803 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
3806 reqctx
->verify
= VERIFY_HW
;
3807 reqctx
->op
= CHCR_ENCRYPT_OP
;
3809 switch (get_aead_subtype(tfm
)) {
3810 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3811 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3812 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3813 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3814 return chcr_aead_op(req
, 0, create_authenc_wr
);
3815 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3816 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3817 return chcr_aead_op(req
, 0, create_aead_ccm_wr
);
3819 return chcr_aead_op(req
, 0, create_gcm_wr
);
3823 static int chcr_aead_decrypt(struct aead_request
*req
)
3825 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3826 struct chcr_context
*ctx
= a_ctx(tfm
);
3827 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3828 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx_dma(req
);
3833 reqctx
->txqidx
= cpu
% ctx
->ntxq
;
3834 reqctx
->rxqidx
= cpu
% ctx
->nrxq
;
3837 if (aeadctx
->mayverify
== VERIFY_SW
) {
3838 size
= crypto_aead_maxauthsize(tfm
);
3839 reqctx
->verify
= VERIFY_SW
;
3842 reqctx
->verify
= VERIFY_HW
;
3844 reqctx
->op
= CHCR_DECRYPT_OP
;
3845 switch (get_aead_subtype(tfm
)) {
3846 case CRYPTO_ALG_SUB_TYPE_CBC_SHA
:
3847 case CRYPTO_ALG_SUB_TYPE_CTR_SHA
:
3848 case CRYPTO_ALG_SUB_TYPE_CBC_NULL
:
3849 case CRYPTO_ALG_SUB_TYPE_CTR_NULL
:
3850 return chcr_aead_op(req
, size
, create_authenc_wr
);
3851 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3852 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3853 return chcr_aead_op(req
, size
, create_aead_ccm_wr
);
3855 return chcr_aead_op(req
, size
, create_gcm_wr
);
3859 static struct chcr_alg_template driver_algs
[] = {
3862 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3865 .base
.cra_name
= "cbc(aes)",
3866 .base
.cra_driver_name
= "cbc-aes-chcr",
3867 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
3869 .init
= chcr_init_tfm
,
3870 .exit
= chcr_exit_tfm
,
3871 .min_keysize
= AES_MIN_KEY_SIZE
,
3872 .max_keysize
= AES_MAX_KEY_SIZE
,
3873 .ivsize
= AES_BLOCK_SIZE
,
3874 .setkey
= chcr_aes_cbc_setkey
,
3875 .encrypt
= chcr_aes_encrypt
,
3876 .decrypt
= chcr_aes_decrypt
,
3880 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3883 .base
.cra_name
= "xts(aes)",
3884 .base
.cra_driver_name
= "xts-aes-chcr",
3885 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
3887 .init
= chcr_init_tfm
,
3888 .exit
= chcr_exit_tfm
,
3889 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3890 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3891 .ivsize
= AES_BLOCK_SIZE
,
3892 .setkey
= chcr_aes_xts_setkey
,
3893 .encrypt
= chcr_aes_encrypt
,
3894 .decrypt
= chcr_aes_decrypt
,
3898 .type
= CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3901 .base
.cra_name
= "ctr(aes)",
3902 .base
.cra_driver_name
= "ctr-aes-chcr",
3903 .base
.cra_blocksize
= 1,
3905 .init
= chcr_init_tfm
,
3906 .exit
= chcr_exit_tfm
,
3907 .min_keysize
= AES_MIN_KEY_SIZE
,
3908 .max_keysize
= AES_MAX_KEY_SIZE
,
3909 .ivsize
= AES_BLOCK_SIZE
,
3910 .setkey
= chcr_aes_ctr_setkey
,
3911 .encrypt
= chcr_aes_encrypt
,
3912 .decrypt
= chcr_aes_decrypt
,
3916 .type
= CRYPTO_ALG_TYPE_SKCIPHER
|
3917 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3920 .base
.cra_name
= "rfc3686(ctr(aes))",
3921 .base
.cra_driver_name
= "rfc3686-ctr-aes-chcr",
3922 .base
.cra_blocksize
= 1,
3924 .init
= chcr_rfc3686_init
,
3925 .exit
= chcr_exit_tfm
,
3926 .min_keysize
= AES_MIN_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
3927 .max_keysize
= AES_MAX_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
3928 .ivsize
= CTR_RFC3686_IV_SIZE
,
3929 .setkey
= chcr_aes_rfc3686_setkey
,
3930 .encrypt
= chcr_aes_encrypt
,
3931 .decrypt
= chcr_aes_decrypt
,
3936 .type
= CRYPTO_ALG_TYPE_AHASH
,
3939 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3942 .cra_driver_name
= "sha1-chcr",
3943 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3948 .type
= CRYPTO_ALG_TYPE_AHASH
,
3951 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3953 .cra_name
= "sha256",
3954 .cra_driver_name
= "sha256-chcr",
3955 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3960 .type
= CRYPTO_ALG_TYPE_AHASH
,
3963 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3965 .cra_name
= "sha224",
3966 .cra_driver_name
= "sha224-chcr",
3967 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3972 .type
= CRYPTO_ALG_TYPE_AHASH
,
3975 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3977 .cra_name
= "sha384",
3978 .cra_driver_name
= "sha384-chcr",
3979 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3984 .type
= CRYPTO_ALG_TYPE_AHASH
,
3987 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3989 .cra_name
= "sha512",
3990 .cra_driver_name
= "sha512-chcr",
3991 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3997 .type
= CRYPTO_ALG_TYPE_HMAC
,
4000 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
4002 .cra_name
= "hmac(sha1)",
4003 .cra_driver_name
= "hmac-sha1-chcr",
4004 .cra_blocksize
= SHA1_BLOCK_SIZE
,
4009 .type
= CRYPTO_ALG_TYPE_HMAC
,
4012 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
4014 .cra_name
= "hmac(sha224)",
4015 .cra_driver_name
= "hmac-sha224-chcr",
4016 .cra_blocksize
= SHA224_BLOCK_SIZE
,
4021 .type
= CRYPTO_ALG_TYPE_HMAC
,
4024 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
4026 .cra_name
= "hmac(sha256)",
4027 .cra_driver_name
= "hmac-sha256-chcr",
4028 .cra_blocksize
= SHA256_BLOCK_SIZE
,
4033 .type
= CRYPTO_ALG_TYPE_HMAC
,
4036 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
4038 .cra_name
= "hmac(sha384)",
4039 .cra_driver_name
= "hmac-sha384-chcr",
4040 .cra_blocksize
= SHA384_BLOCK_SIZE
,
4045 .type
= CRYPTO_ALG_TYPE_HMAC
,
4048 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
4050 .cra_name
= "hmac(sha512)",
4051 .cra_driver_name
= "hmac-sha512-chcr",
4052 .cra_blocksize
= SHA512_BLOCK_SIZE
,
4056 /* Add AEAD Algorithms */
4058 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
4062 .cra_name
= "gcm(aes)",
4063 .cra_driver_name
= "gcm-aes-chcr",
4065 .cra_priority
= CHCR_AEAD_PRIORITY
,
4066 .cra_ctxsize
= sizeof(struct chcr_context
) +
4067 sizeof(struct chcr_aead_ctx
) +
4068 sizeof(struct chcr_gcm_ctx
),
4070 .ivsize
= GCM_AES_IV_SIZE
,
4071 .maxauthsize
= GHASH_DIGEST_SIZE
,
4072 .setkey
= chcr_gcm_setkey
,
4073 .setauthsize
= chcr_gcm_setauthsize
,
4077 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
4081 .cra_name
= "rfc4106(gcm(aes))",
4082 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
4084 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4085 .cra_ctxsize
= sizeof(struct chcr_context
) +
4086 sizeof(struct chcr_aead_ctx
) +
4087 sizeof(struct chcr_gcm_ctx
),
4090 .ivsize
= GCM_RFC4106_IV_SIZE
,
4091 .maxauthsize
= GHASH_DIGEST_SIZE
,
4092 .setkey
= chcr_gcm_setkey
,
4093 .setauthsize
= chcr_4106_4309_setauthsize
,
4097 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
4101 .cra_name
= "ccm(aes)",
4102 .cra_driver_name
= "ccm-aes-chcr",
4104 .cra_priority
= CHCR_AEAD_PRIORITY
,
4105 .cra_ctxsize
= sizeof(struct chcr_context
) +
4106 sizeof(struct chcr_aead_ctx
),
4109 .ivsize
= AES_BLOCK_SIZE
,
4110 .maxauthsize
= GHASH_DIGEST_SIZE
,
4111 .setkey
= chcr_aead_ccm_setkey
,
4112 .setauthsize
= chcr_ccm_setauthsize
,
4116 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
4120 .cra_name
= "rfc4309(ccm(aes))",
4121 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
4123 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
4124 .cra_ctxsize
= sizeof(struct chcr_context
) +
4125 sizeof(struct chcr_aead_ctx
),
4129 .maxauthsize
= GHASH_DIGEST_SIZE
,
4130 .setkey
= chcr_aead_rfc4309_setkey
,
4131 .setauthsize
= chcr_4106_4309_setauthsize
,
4135 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4139 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
4141 "authenc-hmac-sha1-cbc-aes-chcr",
4142 .cra_blocksize
= AES_BLOCK_SIZE
,
4143 .cra_priority
= CHCR_AEAD_PRIORITY
,
4144 .cra_ctxsize
= sizeof(struct chcr_context
) +
4145 sizeof(struct chcr_aead_ctx
) +
4146 sizeof(struct chcr_authenc_ctx
),
4149 .ivsize
= AES_BLOCK_SIZE
,
4150 .maxauthsize
= SHA1_DIGEST_SIZE
,
4151 .setkey
= chcr_authenc_setkey
,
4152 .setauthsize
= chcr_authenc_setauthsize
,
4156 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4161 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
4163 "authenc-hmac-sha256-cbc-aes-chcr",
4164 .cra_blocksize
= AES_BLOCK_SIZE
,
4165 .cra_priority
= CHCR_AEAD_PRIORITY
,
4166 .cra_ctxsize
= sizeof(struct chcr_context
) +
4167 sizeof(struct chcr_aead_ctx
) +
4168 sizeof(struct chcr_authenc_ctx
),
4171 .ivsize
= AES_BLOCK_SIZE
,
4172 .maxauthsize
= SHA256_DIGEST_SIZE
,
4173 .setkey
= chcr_authenc_setkey
,
4174 .setauthsize
= chcr_authenc_setauthsize
,
4178 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4182 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
4184 "authenc-hmac-sha224-cbc-aes-chcr",
4185 .cra_blocksize
= AES_BLOCK_SIZE
,
4186 .cra_priority
= CHCR_AEAD_PRIORITY
,
4187 .cra_ctxsize
= sizeof(struct chcr_context
) +
4188 sizeof(struct chcr_aead_ctx
) +
4189 sizeof(struct chcr_authenc_ctx
),
4191 .ivsize
= AES_BLOCK_SIZE
,
4192 .maxauthsize
= SHA224_DIGEST_SIZE
,
4193 .setkey
= chcr_authenc_setkey
,
4194 .setauthsize
= chcr_authenc_setauthsize
,
4198 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4202 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
4204 "authenc-hmac-sha384-cbc-aes-chcr",
4205 .cra_blocksize
= AES_BLOCK_SIZE
,
4206 .cra_priority
= CHCR_AEAD_PRIORITY
,
4207 .cra_ctxsize
= sizeof(struct chcr_context
) +
4208 sizeof(struct chcr_aead_ctx
) +
4209 sizeof(struct chcr_authenc_ctx
),
4212 .ivsize
= AES_BLOCK_SIZE
,
4213 .maxauthsize
= SHA384_DIGEST_SIZE
,
4214 .setkey
= chcr_authenc_setkey
,
4215 .setauthsize
= chcr_authenc_setauthsize
,
4219 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_SHA
,
4223 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
4225 "authenc-hmac-sha512-cbc-aes-chcr",
4226 .cra_blocksize
= AES_BLOCK_SIZE
,
4227 .cra_priority
= CHCR_AEAD_PRIORITY
,
4228 .cra_ctxsize
= sizeof(struct chcr_context
) +
4229 sizeof(struct chcr_aead_ctx
) +
4230 sizeof(struct chcr_authenc_ctx
),
4233 .ivsize
= AES_BLOCK_SIZE
,
4234 .maxauthsize
= SHA512_DIGEST_SIZE
,
4235 .setkey
= chcr_authenc_setkey
,
4236 .setauthsize
= chcr_authenc_setauthsize
,
4240 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CBC_NULL
,
4244 .cra_name
= "authenc(digest_null,cbc(aes))",
4246 "authenc-digest_null-cbc-aes-chcr",
4247 .cra_blocksize
= AES_BLOCK_SIZE
,
4248 .cra_priority
= CHCR_AEAD_PRIORITY
,
4249 .cra_ctxsize
= sizeof(struct chcr_context
) +
4250 sizeof(struct chcr_aead_ctx
) +
4251 sizeof(struct chcr_authenc_ctx
),
4254 .ivsize
= AES_BLOCK_SIZE
,
4256 .setkey
= chcr_aead_digest_null_setkey
,
4257 .setauthsize
= chcr_authenc_null_setauthsize
,
4261 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4265 .cra_name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4267 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4269 .cra_priority
= CHCR_AEAD_PRIORITY
,
4270 .cra_ctxsize
= sizeof(struct chcr_context
) +
4271 sizeof(struct chcr_aead_ctx
) +
4272 sizeof(struct chcr_authenc_ctx
),
4275 .ivsize
= CTR_RFC3686_IV_SIZE
,
4276 .maxauthsize
= SHA1_DIGEST_SIZE
,
4277 .setkey
= chcr_authenc_setkey
,
4278 .setauthsize
= chcr_authenc_setauthsize
,
4282 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4287 .cra_name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4289 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4291 .cra_priority
= CHCR_AEAD_PRIORITY
,
4292 .cra_ctxsize
= sizeof(struct chcr_context
) +
4293 sizeof(struct chcr_aead_ctx
) +
4294 sizeof(struct chcr_authenc_ctx
),
4297 .ivsize
= CTR_RFC3686_IV_SIZE
,
4298 .maxauthsize
= SHA256_DIGEST_SIZE
,
4299 .setkey
= chcr_authenc_setkey
,
4300 .setauthsize
= chcr_authenc_setauthsize
,
4304 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4308 .cra_name
= "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4310 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4312 .cra_priority
= CHCR_AEAD_PRIORITY
,
4313 .cra_ctxsize
= sizeof(struct chcr_context
) +
4314 sizeof(struct chcr_aead_ctx
) +
4315 sizeof(struct chcr_authenc_ctx
),
4317 .ivsize
= CTR_RFC3686_IV_SIZE
,
4318 .maxauthsize
= SHA224_DIGEST_SIZE
,
4319 .setkey
= chcr_authenc_setkey
,
4320 .setauthsize
= chcr_authenc_setauthsize
,
4324 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4328 .cra_name
= "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4330 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4332 .cra_priority
= CHCR_AEAD_PRIORITY
,
4333 .cra_ctxsize
= sizeof(struct chcr_context
) +
4334 sizeof(struct chcr_aead_ctx
) +
4335 sizeof(struct chcr_authenc_ctx
),
4338 .ivsize
= CTR_RFC3686_IV_SIZE
,
4339 .maxauthsize
= SHA384_DIGEST_SIZE
,
4340 .setkey
= chcr_authenc_setkey
,
4341 .setauthsize
= chcr_authenc_setauthsize
,
4345 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_SHA
,
4349 .cra_name
= "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4351 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4353 .cra_priority
= CHCR_AEAD_PRIORITY
,
4354 .cra_ctxsize
= sizeof(struct chcr_context
) +
4355 sizeof(struct chcr_aead_ctx
) +
4356 sizeof(struct chcr_authenc_ctx
),
4359 .ivsize
= CTR_RFC3686_IV_SIZE
,
4360 .maxauthsize
= SHA512_DIGEST_SIZE
,
4361 .setkey
= chcr_authenc_setkey
,
4362 .setauthsize
= chcr_authenc_setauthsize
,
4366 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_CTR_NULL
,
4370 .cra_name
= "authenc(digest_null,rfc3686(ctr(aes)))",
4372 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4374 .cra_priority
= CHCR_AEAD_PRIORITY
,
4375 .cra_ctxsize
= sizeof(struct chcr_context
) +
4376 sizeof(struct chcr_aead_ctx
) +
4377 sizeof(struct chcr_authenc_ctx
),
4380 .ivsize
= CTR_RFC3686_IV_SIZE
,
4382 .setkey
= chcr_aead_digest_null_setkey
,
4383 .setauthsize
= chcr_authenc_null_setauthsize
,
4389 * chcr_unregister_alg - Deregister crypto algorithms with
4392 static int chcr_unregister_alg(void)
4396 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4397 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4398 case CRYPTO_ALG_TYPE_SKCIPHER
:
4399 if (driver_algs
[i
].is_registered
&& refcount_read(
4400 &driver_algs
[i
].alg
.skcipher
.base
.cra_refcnt
)
4402 crypto_unregister_skcipher(
4403 &driver_algs
[i
].alg
.skcipher
);
4404 driver_algs
[i
].is_registered
= 0;
4407 case CRYPTO_ALG_TYPE_AEAD
:
4408 if (driver_algs
[i
].is_registered
&& refcount_read(
4409 &driver_algs
[i
].alg
.aead
.base
.cra_refcnt
) == 1) {
4410 crypto_unregister_aead(
4411 &driver_algs
[i
].alg
.aead
);
4412 driver_algs
[i
].is_registered
= 0;
4415 case CRYPTO_ALG_TYPE_AHASH
:
4416 if (driver_algs
[i
].is_registered
&& refcount_read(
4417 &driver_algs
[i
].alg
.hash
.halg
.base
.cra_refcnt
)
4419 crypto_unregister_ahash(
4420 &driver_algs
[i
].alg
.hash
);
4421 driver_algs
[i
].is_registered
= 0;
4429 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4430 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4431 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4434 * chcr_register_alg - Register crypto algorithms with kernel framework.
4436 static int chcr_register_alg(void)
4438 struct crypto_alg ai
;
4439 struct ahash_alg
*a_hash
;
4443 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4444 if (driver_algs
[i
].is_registered
)
4446 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
4447 case CRYPTO_ALG_TYPE_SKCIPHER
:
4448 driver_algs
[i
].alg
.skcipher
.base
.cra_priority
=
4450 driver_algs
[i
].alg
.skcipher
.base
.cra_module
= THIS_MODULE
;
4451 driver_algs
[i
].alg
.skcipher
.base
.cra_flags
=
4452 CRYPTO_ALG_TYPE_SKCIPHER
| CRYPTO_ALG_ASYNC
|
4453 CRYPTO_ALG_ALLOCATES_MEMORY
|
4454 CRYPTO_ALG_NEED_FALLBACK
;
4455 driver_algs
[i
].alg
.skcipher
.base
.cra_ctxsize
=
4456 sizeof(struct chcr_context
) +
4457 sizeof(struct ablk_ctx
);
4458 driver_algs
[i
].alg
.skcipher
.base
.cra_alignmask
= 0;
4460 err
= crypto_register_skcipher(&driver_algs
[i
].alg
.skcipher
);
4461 name
= driver_algs
[i
].alg
.skcipher
.base
.cra_driver_name
;
4463 case CRYPTO_ALG_TYPE_AEAD
:
4464 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
4465 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
|
4466 CRYPTO_ALG_ALLOCATES_MEMORY
;
4467 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
4468 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
4469 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
4470 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
4471 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
4472 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
4473 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
4475 case CRYPTO_ALG_TYPE_AHASH
:
4476 a_hash
= &driver_algs
[i
].alg
.hash
;
4477 a_hash
->update
= chcr_ahash_update
;
4478 a_hash
->final
= chcr_ahash_final
;
4479 a_hash
->finup
= chcr_ahash_finup
;
4480 a_hash
->digest
= chcr_ahash_digest
;
4481 a_hash
->export
= chcr_ahash_export
;
4482 a_hash
->import
= chcr_ahash_import
;
4483 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
4484 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
4485 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
4486 a_hash
->halg
.base
.cra_flags
=
4487 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
;
4488 a_hash
->halg
.base
.cra_alignmask
= 0;
4489 a_hash
->halg
.base
.cra_exit
= NULL
;
4491 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
4492 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
4493 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
4494 a_hash
->init
= chcr_hmac_init
;
4495 a_hash
->setkey
= chcr_ahash_setkey
;
4496 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
4498 a_hash
->init
= chcr_sha_init
;
4499 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
4500 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
4502 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
4503 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
4504 name
= ai
.cra_driver_name
;
4508 pr_err("%s : Algorithm registration failed\n", name
);
4511 driver_algs
[i
].is_registered
= 1;
4517 chcr_unregister_alg();
4522 * start_crypto - Register the crypto algorithms.
4523 * This should called once when the first device comesup. After this
4524 * kernel will start calling driver APIs for crypto operations.
4526 int start_crypto(void)
4528 return chcr_register_alg();
4532 * stop_crypto - Deregister all the crypto algorithms with kernel.
4533 * This should be called once when the last device goes down. After this
4534 * kernel will not call the driver API for crypto operations.
4536 int stop_crypto(void)
4538 chcr_unregister_alg();