2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
73 static inline struct chcr_aead_ctx
*AEAD_CTX(struct chcr_context
*ctx
)
75 return ctx
->crypto_ctx
->aeadctx
;
78 static inline struct ablk_ctx
*ABLK_CTX(struct chcr_context
*ctx
)
80 return ctx
->crypto_ctx
->ablkctx
;
83 static inline struct hmac_ctx
*HMAC_CTX(struct chcr_context
*ctx
)
85 return ctx
->crypto_ctx
->hmacctx
;
88 static inline struct chcr_gcm_ctx
*GCM_CTX(struct chcr_aead_ctx
*gctx
)
90 return gctx
->ctx
->gcm
;
93 static inline struct chcr_authenc_ctx
*AUTHENC_CTX(struct chcr_aead_ctx
*gctx
)
95 return gctx
->ctx
->authenc
;
98 static inline struct uld_ctx
*ULD_CTX(struct chcr_context
*ctx
)
100 return ctx
->dev
->u_ctx
;
103 static inline int is_ofld_imm(const struct sk_buff
*skb
)
105 return (skb
->len
<= CRYPTO_MAX_IMM_TX_PKT_LEN
);
109 * sgl_len - calculates the size of an SGL of the given capacity
110 * @n: the number of SGL entries
111 * Calculates the number of flits needed for a scatter/gather list that
112 * can hold the given number of entries.
114 static inline unsigned int sgl_len(unsigned int n
)
117 return (3 * n
) / 2 + (n
& 1) + 2;
120 static void chcr_verify_tag(struct aead_request
*req
, u8
*input
, int *err
)
122 u8 temp
[SHA512_DIGEST_SIZE
];
123 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
124 int authsize
= crypto_aead_authsize(tfm
);
125 struct cpl_fw6_pld
*fw6_pld
;
128 fw6_pld
= (struct cpl_fw6_pld
*)input
;
129 if ((get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) ||
130 (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM
)) {
131 cmp
= crypto_memneq(&fw6_pld
->data
[2], (fw6_pld
+ 1), authsize
);
134 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), temp
,
135 authsize
, req
->assoclen
+
136 req
->cryptlen
- authsize
);
137 cmp
= crypto_memneq(temp
, (fw6_pld
+ 1), authsize
);
146 * chcr_handle_resp - Unmap the DMA buffers associated with the request
147 * @req: crypto request
149 int chcr_handle_resp(struct crypto_async_request
*req
, unsigned char *input
,
152 struct crypto_tfm
*tfm
= req
->tfm
;
153 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
154 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
155 struct chcr_req_ctx ctx_req
;
156 unsigned int digestsize
, updated_digestsize
;
157 struct adapter
*adap
= padap(ctx
->dev
);
159 switch (tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) {
160 case CRYPTO_ALG_TYPE_AEAD
:
161 ctx_req
.req
.aead_req
= aead_request_cast(req
);
162 ctx_req
.ctx
.reqctx
= aead_request_ctx(ctx_req
.req
.aead_req
);
163 dma_unmap_sg(&u_ctx
->lldi
.pdev
->dev
, ctx_req
.ctx
.reqctx
->dst
,
164 ctx_req
.ctx
.reqctx
->dst_nents
, DMA_FROM_DEVICE
);
165 if (ctx_req
.ctx
.reqctx
->skb
) {
166 kfree_skb(ctx_req
.ctx
.reqctx
->skb
);
167 ctx_req
.ctx
.reqctx
->skb
= NULL
;
169 free_new_sg(ctx_req
.ctx
.reqctx
->newdstsg
);
170 ctx_req
.ctx
.reqctx
->newdstsg
= NULL
;
171 if (ctx_req
.ctx
.reqctx
->verify
== VERIFY_SW
) {
172 chcr_verify_tag(ctx_req
.req
.aead_req
, input
,
174 ctx_req
.ctx
.reqctx
->verify
= VERIFY_HW
;
176 ctx_req
.req
.aead_req
->base
.complete(req
, err
);
179 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
180 err
= chcr_handle_cipher_resp(ablkcipher_request_cast(req
),
184 case CRYPTO_ALG_TYPE_AHASH
:
185 ctx_req
.req
.ahash_req
= ahash_request_cast(req
);
186 ctx_req
.ctx
.ahash_ctx
=
187 ahash_request_ctx(ctx_req
.req
.ahash_req
);
189 crypto_ahash_digestsize(crypto_ahash_reqtfm(
190 ctx_req
.req
.ahash_req
));
191 updated_digestsize
= digestsize
;
192 if (digestsize
== SHA224_DIGEST_SIZE
)
193 updated_digestsize
= SHA256_DIGEST_SIZE
;
194 else if (digestsize
== SHA384_DIGEST_SIZE
)
195 updated_digestsize
= SHA512_DIGEST_SIZE
;
196 if (ctx_req
.ctx
.ahash_ctx
->skb
) {
197 kfree_skb(ctx_req
.ctx
.ahash_ctx
->skb
);
198 ctx_req
.ctx
.ahash_ctx
->skb
= NULL
;
200 if (ctx_req
.ctx
.ahash_ctx
->result
== 1) {
201 ctx_req
.ctx
.ahash_ctx
->result
= 0;
202 memcpy(ctx_req
.req
.ahash_req
->result
, input
+
203 sizeof(struct cpl_fw6_pld
),
206 memcpy(ctx_req
.ctx
.ahash_ctx
->partial_hash
, input
+
207 sizeof(struct cpl_fw6_pld
),
210 ctx_req
.req
.ahash_req
->base
.complete(req
, err
);
213 atomic_inc(&adap
->chcr_stats
.complete
);
218 * calc_tx_flits_ofld - calculate # of flits for an offload packet
220 * Returns the number of flits needed for the given offload packet.
221 * These packets are already fully constructed and no additional headers
224 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff
*skb
)
226 unsigned int flits
, cnt
;
228 if (is_ofld_imm(skb
))
229 return DIV_ROUND_UP(skb
->len
, 8);
231 flits
= skb_transport_offset(skb
) / 8; /* headers */
232 cnt
= skb_shinfo(skb
)->nr_frags
;
233 if (skb_tail_pointer(skb
) != skb_transport_header(skb
))
235 return flits
+ sgl_len(cnt
);
238 static inline void get_aes_decrypt_key(unsigned char *dec_key
,
239 const unsigned char *key
,
240 unsigned int keylength
)
248 case AES_KEYLENGTH_128BIT
:
249 nk
= KEYLENGTH_4BYTES
;
250 nr
= NUMBER_OF_ROUNDS_10
;
252 case AES_KEYLENGTH_192BIT
:
253 nk
= KEYLENGTH_6BYTES
;
254 nr
= NUMBER_OF_ROUNDS_12
;
256 case AES_KEYLENGTH_256BIT
:
257 nk
= KEYLENGTH_8BYTES
;
258 nr
= NUMBER_OF_ROUNDS_14
;
263 for (i
= 0; i
< nk
; i
++)
264 w_ring
[i
] = be32_to_cpu(*(u32
*)&key
[4 * i
]);
267 temp
= w_ring
[nk
- 1];
268 while (i
+ nk
< (nr
+ 1) * 4) {
271 temp
= (temp
<< 8) | (temp
>> 24);
272 temp
= aes_ks_subword(temp
);
273 temp
^= round_constant
[i
/ nk
];
274 } else if (nk
== 8 && (i
% 4 == 0)) {
275 temp
= aes_ks_subword(temp
);
277 w_ring
[i
% nk
] ^= temp
;
278 temp
= w_ring
[i
% nk
];
282 for (k
= 0, j
= i
% nk
; k
< nk
; k
++) {
283 *((u32
*)dec_key
+ k
) = htonl(w_ring
[j
]);
290 static struct crypto_shash
*chcr_alloc_shash(unsigned int ds
)
292 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
295 case SHA1_DIGEST_SIZE
:
296 base_hash
= crypto_alloc_shash("sha1", 0, 0);
298 case SHA224_DIGEST_SIZE
:
299 base_hash
= crypto_alloc_shash("sha224", 0, 0);
301 case SHA256_DIGEST_SIZE
:
302 base_hash
= crypto_alloc_shash("sha256", 0, 0);
304 case SHA384_DIGEST_SIZE
:
305 base_hash
= crypto_alloc_shash("sha384", 0, 0);
307 case SHA512_DIGEST_SIZE
:
308 base_hash
= crypto_alloc_shash("sha512", 0, 0);
315 static int chcr_compute_partial_hash(struct shash_desc
*desc
,
316 char *iopad
, char *result_hash
,
319 struct sha1_state sha1_st
;
320 struct sha256_state sha256_st
;
321 struct sha512_state sha512_st
;
324 if (digest_size
== SHA1_DIGEST_SIZE
) {
325 error
= crypto_shash_init(desc
) ?:
326 crypto_shash_update(desc
, iopad
, SHA1_BLOCK_SIZE
) ?:
327 crypto_shash_export(desc
, (void *)&sha1_st
);
328 memcpy(result_hash
, sha1_st
.state
, SHA1_DIGEST_SIZE
);
329 } else if (digest_size
== SHA224_DIGEST_SIZE
) {
330 error
= crypto_shash_init(desc
) ?:
331 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
332 crypto_shash_export(desc
, (void *)&sha256_st
);
333 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
335 } else if (digest_size
== SHA256_DIGEST_SIZE
) {
336 error
= crypto_shash_init(desc
) ?:
337 crypto_shash_update(desc
, iopad
, SHA256_BLOCK_SIZE
) ?:
338 crypto_shash_export(desc
, (void *)&sha256_st
);
339 memcpy(result_hash
, sha256_st
.state
, SHA256_DIGEST_SIZE
);
341 } else if (digest_size
== SHA384_DIGEST_SIZE
) {
342 error
= crypto_shash_init(desc
) ?:
343 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
344 crypto_shash_export(desc
, (void *)&sha512_st
);
345 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
347 } else if (digest_size
== SHA512_DIGEST_SIZE
) {
348 error
= crypto_shash_init(desc
) ?:
349 crypto_shash_update(desc
, iopad
, SHA512_BLOCK_SIZE
) ?:
350 crypto_shash_export(desc
, (void *)&sha512_st
);
351 memcpy(result_hash
, sha512_st
.state
, SHA512_DIGEST_SIZE
);
354 pr_err("Unknown digest size %d\n", digest_size
);
359 static void chcr_change_order(char *buf
, int ds
)
363 if (ds
== SHA512_DIGEST_SIZE
) {
364 for (i
= 0; i
< (ds
/ sizeof(u64
)); i
++)
365 *((__be64
*)buf
+ i
) =
366 cpu_to_be64(*((u64
*)buf
+ i
));
368 for (i
= 0; i
< (ds
/ sizeof(u32
)); i
++)
369 *((__be32
*)buf
+ i
) =
370 cpu_to_be32(*((u32
*)buf
+ i
));
374 static inline int is_hmac(struct crypto_tfm
*tfm
)
376 struct crypto_alg
*alg
= tfm
->__crt_alg
;
377 struct chcr_alg_template
*chcr_crypto_alg
=
378 container_of(__crypto_ahash_alg(alg
), struct chcr_alg_template
,
380 if (chcr_crypto_alg
->type
== CRYPTO_ALG_TYPE_HMAC
)
385 static void write_phys_cpl(struct cpl_rx_phys_dsgl
*phys_cpl
,
386 struct scatterlist
*sg
,
387 struct phys_sge_parm
*sg_param
)
389 struct phys_sge_pairs
*to
;
390 unsigned int len
= 0, left_size
= sg_param
->obsize
;
391 unsigned int nents
= sg_param
->nents
, i
, j
= 0;
393 phys_cpl
->op_to_tid
= htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL
)
394 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
395 phys_cpl
->pcirlxorder_to_noofsgentr
=
396 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
397 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
398 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
399 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
400 CPL_RX_PHYS_DSGL_DCAID_V(0) |
401 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents
));
402 phys_cpl
->rss_hdr_int
.opcode
= CPL_RX_PHYS_ADDR
;
403 phys_cpl
->rss_hdr_int
.qid
= htons(sg_param
->qid
);
404 phys_cpl
->rss_hdr_int
.hash_val
= 0;
405 to
= (struct phys_sge_pairs
*)((unsigned char *)phys_cpl
+
406 sizeof(struct cpl_rx_phys_dsgl
));
407 for (i
= 0; nents
&& left_size
; to
++) {
408 for (j
= 0; j
< 8 && nents
&& left_size
; j
++, nents
--) {
409 len
= min(left_size
, sg_dma_len(sg
));
410 to
->len
[j
] = htons(len
);
411 to
->addr
[j
] = cpu_to_be64(sg_dma_address(sg
));
418 static inline int map_writesg_phys_cpl(struct device
*dev
,
419 struct cpl_rx_phys_dsgl
*phys_cpl
,
420 struct scatterlist
*sg
,
421 struct phys_sge_parm
*sg_param
)
423 if (!sg
|| !sg_param
->nents
)
426 sg_param
->nents
= dma_map_sg(dev
, sg
, sg_param
->nents
, DMA_FROM_DEVICE
);
427 if (sg_param
->nents
== 0) {
428 pr_err("CHCR : DMA mapping failed\n");
431 write_phys_cpl(phys_cpl
, sg
, sg_param
);
435 static inline int get_aead_subtype(struct crypto_aead
*aead
)
437 struct aead_alg
*alg
= crypto_aead_alg(aead
);
438 struct chcr_alg_template
*chcr_crypto_alg
=
439 container_of(alg
, struct chcr_alg_template
, alg
.aead
);
440 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
443 static inline int get_cryptoalg_subtype(struct crypto_tfm
*tfm
)
445 struct crypto_alg
*alg
= tfm
->__crt_alg
;
446 struct chcr_alg_template
*chcr_crypto_alg
=
447 container_of(alg
, struct chcr_alg_template
, alg
.crypto
);
449 return chcr_crypto_alg
->type
& CRYPTO_ALG_SUB_TYPE_MASK
;
452 static inline void write_buffer_to_skb(struct sk_buff
*skb
,
458 skb
->data_len
+= bfr_len
;
459 skb
->truesize
+= bfr_len
;
460 get_page(virt_to_page(bfr
));
461 skb_fill_page_desc(skb
, *frags
, virt_to_page(bfr
),
462 offset_in_page(bfr
), bfr_len
);
468 write_sg_to_skb(struct sk_buff
*skb
, unsigned int *frags
,
469 struct scatterlist
*sg
, unsigned int count
)
472 unsigned int page_len
;
475 skb
->data_len
+= count
;
476 skb
->truesize
+= count
;
479 if (!sg
|| (!(sg
->length
)))
483 page_len
= min(sg
->length
, count
);
484 skb_fill_page_desc(skb
, *frags
, spage
, sg
->offset
, page_len
);
491 static int cxgb4_is_crypto_q_full(struct net_device
*dev
, unsigned int idx
)
493 struct adapter
*adap
= netdev2adap(dev
);
494 struct sge_uld_txq_info
*txq_info
=
495 adap
->sge
.uld_txq_info
[CXGB4_TX_CRYPTO
];
496 struct sge_uld_txq
*txq
;
500 txq
= &txq_info
->uldtxq
[idx
];
501 spin_lock(&txq
->sendq
.lock
);
504 spin_unlock(&txq
->sendq
.lock
);
509 static int generate_copy_rrkey(struct ablk_ctx
*ablkctx
,
510 struct _key_ctx
*key_ctx
)
512 if (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) {
513 memcpy(key_ctx
->key
, ablkctx
->rrkey
, ablkctx
->enckey_len
);
516 ablkctx
->key
+ (ablkctx
->enckey_len
>> 1),
517 ablkctx
->enckey_len
>> 1);
518 memcpy(key_ctx
->key
+ (ablkctx
->enckey_len
>> 1),
519 ablkctx
->rrkey
, ablkctx
->enckey_len
>> 1);
523 static int chcr_sg_ent_in_wr(struct scatterlist
*src
,
524 struct scatterlist
*dst
,
530 int srclen
= 0, dstlen
= 0;
531 int srcsg
= minsg
, dstsg
= 0;
535 while (src
&& dst
&& ((srcsg
+ 1) <= MAX_SKB_FRAGS
) &&
536 space
> (sgl_ent_len
[srcsg
+ 1] + dsgl_ent_len
[dstsg
])) {
537 srclen
+= src
->length
;
539 while (dst
&& ((dstsg
+ 1) <= MAX_DSGL_ENT
) &&
540 space
> (sgl_ent_len
[srcsg
] + dsgl_ent_len
[dstsg
+ 1])) {
541 if (srclen
<= dstlen
)
543 dstlen
+= dst
->length
;
549 *sent
= srcsg
- minsg
;
551 return min(srclen
, dstlen
);
554 static int chcr_cipher_fallback(struct crypto_skcipher
*cipher
,
556 struct scatterlist
*src
,
557 struct scatterlist
*dst
,
560 unsigned short op_type
)
564 SKCIPHER_REQUEST_ON_STACK(subreq
, cipher
);
565 skcipher_request_set_tfm(subreq
, cipher
);
566 skcipher_request_set_callback(subreq
, flags
, NULL
, NULL
);
567 skcipher_request_set_crypt(subreq
, src
, dst
,
570 err
= op_type
? crypto_skcipher_decrypt(subreq
) :
571 crypto_skcipher_encrypt(subreq
);
572 skcipher_request_zero(subreq
);
577 static inline void create_wreq(struct chcr_context
*ctx
,
578 struct chcr_wr
*chcr_req
,
579 void *req
, struct sk_buff
*skb
,
580 int kctx_len
, int hash_sz
,
585 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
586 int iv_loc
= IV_DSGL
;
587 int qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
588 unsigned int immdatalen
= 0, nr_frags
= 0;
590 if (is_ofld_imm(skb
)) {
591 immdatalen
= skb
->data_len
;
592 iv_loc
= IV_IMMEDIATE
;
594 nr_frags
= skb_shinfo(skb
)->nr_frags
;
597 chcr_req
->wreq
.op_to_cctx_size
= FILL_WR_OP_CCTX_SIZE(immdatalen
,
598 ((sizeof(chcr_req
->key_ctx
) + kctx_len
) >> 4));
599 chcr_req
->wreq
.pld_size_hash_size
=
600 htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths
[nr_frags
]) |
601 FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz
));
602 chcr_req
->wreq
.len16_pkd
=
603 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
604 (calc_tx_flits_ofld(skb
) * 8), 16)));
605 chcr_req
->wreq
.cookie
= cpu_to_be64((uintptr_t)req
);
606 chcr_req
->wreq
.rx_chid_to_rx_q_id
=
607 FILL_WR_RX_Q_ID(ctx
->dev
->rx_channel_id
, qid
,
608 is_iv
? iv_loc
: IV_NOP
, !!lcb
,
611 chcr_req
->ulptx
.cmd_dest
= FILL_ULPTX_CMD_DEST(ctx
->dev
->tx_channel_id
,
613 chcr_req
->ulptx
.len
= htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb
) * 8),
614 16) - ((sizeof(chcr_req
->wreq
)) >> 4)));
616 chcr_req
->sc_imm
.cmd_more
= FILL_CMD_MORE(immdatalen
);
617 chcr_req
->sc_imm
.len
= cpu_to_be32(sizeof(struct cpl_tx_sec_pdu
) +
618 sizeof(chcr_req
->key_ctx
) +
619 kctx_len
+ sc_len
+ immdatalen
);
623 * create_cipher_wr - form the WR for cipher operations
625 * @ctx: crypto driver context of the request.
626 * @qid: ingress qid where response of this WR should be received.
627 * @op_type: encryption or decryption
629 static struct sk_buff
*create_cipher_wr(struct cipher_wr_param
*wrparam
)
631 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(wrparam
->req
);
632 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
633 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
634 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
635 struct sk_buff
*skb
= NULL
;
636 struct chcr_wr
*chcr_req
;
637 struct cpl_rx_phys_dsgl
*phys_cpl
;
638 struct chcr_blkcipher_req_ctx
*reqctx
=
639 ablkcipher_request_ctx(wrparam
->req
);
640 struct phys_sge_parm sg_param
;
641 unsigned int frags
= 0, transhdr_len
, phys_dsgl
;
643 unsigned int ivsize
= AES_BLOCK_SIZE
, kctx_len
;
644 gfp_t flags
= wrparam
->req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
?
645 GFP_KERNEL
: GFP_ATOMIC
;
646 struct adapter
*adap
= padap(ctx
->dev
);
648 phys_dsgl
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
650 kctx_len
= (DIV_ROUND_UP(ablkctx
->enckey_len
, 16) * 16);
651 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, phys_dsgl
);
652 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
657 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
658 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
659 chcr_req
->sec_cpl
.op_ivinsrtofst
=
660 FILL_SEC_CPL_OP_IVINSR(ctx
->dev
->rx_channel_id
, 2, 1);
662 chcr_req
->sec_cpl
.pldlen
= htonl(ivsize
+ wrparam
->bytes
);
663 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
664 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize
+ 1, 0);
666 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
667 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
668 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(reqctx
->op
, 0,
671 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
674 chcr_req
->key_ctx
.ctx_hdr
= ablkctx
->key_ctx_hdr
;
675 if ((reqctx
->op
== CHCR_DECRYPT_OP
) &&
676 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
677 CRYPTO_ALG_SUB_TYPE_CTR
)) &&
678 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
679 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
))) {
680 generate_copy_rrkey(ablkctx
, &chcr_req
->key_ctx
);
682 if ((ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
) ||
683 (ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CTR
)) {
684 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
,
685 ablkctx
->enckey_len
);
687 memcpy(chcr_req
->key_ctx
.key
, ablkctx
->key
+
688 (ablkctx
->enckey_len
>> 1),
689 ablkctx
->enckey_len
>> 1);
690 memcpy(chcr_req
->key_ctx
.key
+
691 (ablkctx
->enckey_len
>> 1),
693 ablkctx
->enckey_len
>> 1);
696 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
697 sg_param
.nents
= reqctx
->dst_nents
;
698 sg_param
.obsize
= wrparam
->bytes
;
699 sg_param
.qid
= wrparam
->qid
;
700 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
701 reqctx
->dst
, &sg_param
);
705 skb_set_transport_header(skb
, transhdr_len
);
706 write_buffer_to_skb(skb
, &frags
, reqctx
->iv
, ivsize
);
707 write_sg_to_skb(skb
, &frags
, wrparam
->srcsg
, wrparam
->bytes
);
708 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
709 create_wreq(ctx
, chcr_req
, &(wrparam
->req
->base
), skb
, kctx_len
, 0, 1,
710 sizeof(struct cpl_rx_phys_dsgl
) + phys_dsgl
,
711 ablkctx
->ciph_mode
== CHCR_SCMD_CIPHER_MODE_AES_CBC
);
718 return ERR_PTR(error
);
721 static inline int chcr_keyctx_ck_size(unsigned int keylen
)
725 if (keylen
== AES_KEYSIZE_128
)
726 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
727 else if (keylen
== AES_KEYSIZE_192
)
728 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
729 else if (keylen
== AES_KEYSIZE_256
)
730 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
736 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher
*cipher
,
740 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(cipher
);
741 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
742 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
745 crypto_skcipher_clear_flags(ablkctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
746 crypto_skcipher_set_flags(ablkctx
->sw_cipher
, cipher
->base
.crt_flags
&
747 CRYPTO_TFM_REQ_MASK
);
748 err
= crypto_skcipher_setkey(ablkctx
->sw_cipher
, key
, keylen
);
749 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
751 crypto_skcipher_get_flags(ablkctx
->sw_cipher
) &
756 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher
*cipher
,
760 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
761 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
762 unsigned int ck_size
, context_size
;
766 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
770 ck_size
= chcr_keyctx_ck_size(keylen
);
771 alignment
= ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
? 8 : 0;
772 memcpy(ablkctx
->key
, key
, keylen
);
773 ablkctx
->enckey_len
= keylen
;
774 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, keylen
<< 3);
775 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
776 keylen
+ alignment
) >> 4;
778 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
780 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CBC
;
783 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
784 ablkctx
->enckey_len
= 0;
789 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher
*cipher
,
793 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
794 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
795 unsigned int ck_size
, context_size
;
799 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
802 ck_size
= chcr_keyctx_ck_size(keylen
);
803 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
804 memcpy(ablkctx
->key
, key
, keylen
);
805 ablkctx
->enckey_len
= keylen
;
806 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
807 keylen
+ alignment
) >> 4;
809 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
811 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
815 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
816 ablkctx
->enckey_len
= 0;
821 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher
*cipher
,
825 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
826 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
827 unsigned int ck_size
, context_size
;
831 if (keylen
< CTR_RFC3686_NONCE_SIZE
)
833 memcpy(ablkctx
->nonce
, key
+ (keylen
- CTR_RFC3686_NONCE_SIZE
),
834 CTR_RFC3686_NONCE_SIZE
);
836 keylen
-= CTR_RFC3686_NONCE_SIZE
;
837 err
= chcr_cipher_fallback_setkey(cipher
, key
, keylen
);
841 ck_size
= chcr_keyctx_ck_size(keylen
);
842 alignment
= (ck_size
== CHCR_KEYCTX_CIPHER_KEY_SIZE_192
) ? 8 : 0;
843 memcpy(ablkctx
->key
, key
, keylen
);
844 ablkctx
->enckey_len
= keylen
;
845 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+
846 keylen
+ alignment
) >> 4;
848 ablkctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
,
850 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_CTR
;
854 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
855 ablkctx
->enckey_len
= 0;
859 static void ctr_add_iv(u8
*dstiv
, u8
*srciv
, u32 add
)
861 unsigned int size
= AES_BLOCK_SIZE
;
862 __be32
*b
= (__be32
*)(dstiv
+ size
);
865 memcpy(dstiv
, srciv
, AES_BLOCK_SIZE
);
866 for (; size
>= 4; size
-= 4) {
867 prev
= be32_to_cpu(*--b
);
877 static unsigned int adjust_ctr_overflow(u8
*iv
, u32 bytes
)
879 __be32
*b
= (__be32
*)(iv
+ AES_BLOCK_SIZE
);
881 u32 temp
= be32_to_cpu(*--b
);
884 c
= (u64
)temp
+ 1; // No of block can processed withou overflow
885 if ((bytes
/ AES_BLOCK_SIZE
) > c
)
886 bytes
= c
* AES_BLOCK_SIZE
;
890 static int chcr_update_tweak(struct ablkcipher_request
*req
, u8
*iv
)
892 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
893 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
894 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
895 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
896 struct crypto_cipher
*cipher
;
901 cipher
= ablkctx
->aes_generic
;
902 memcpy(iv
, req
->info
, AES_BLOCK_SIZE
);
904 keylen
= ablkctx
->enckey_len
/ 2;
905 key
= ablkctx
->key
+ keylen
;
906 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
910 crypto_cipher_encrypt_one(cipher
, iv
, iv
);
911 for (i
= 0; i
< (reqctx
->processed
/ AES_BLOCK_SIZE
); i
++)
912 gf128mul_x_ble((le128
*)iv
, (le128
*)iv
);
914 crypto_cipher_decrypt_one(cipher
, iv
, iv
);
919 static int chcr_update_cipher_iv(struct ablkcipher_request
*req
,
920 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
922 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
923 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
924 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
927 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
928 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
930 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
)
931 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
932 CTR_RFC3686_IV_SIZE
) = cpu_to_be32((reqctx
->processed
/
933 AES_BLOCK_SIZE
) + 1);
934 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
935 ret
= chcr_update_tweak(req
, iv
);
936 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
938 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), iv
,
940 reqctx
->processed
- AES_BLOCK_SIZE
);
942 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
949 /* We need separate function for final iv because in rfc3686 Initial counter
950 * starts from 1 and buffer size of iv is 8 byte only which remains constant
951 * for subsequent update requests
954 static int chcr_final_cipher_iv(struct ablkcipher_request
*req
,
955 struct cpl_fw6_pld
*fw6_pld
, u8
*iv
)
957 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
958 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
959 int subtype
= get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
));
962 if (subtype
== CRYPTO_ALG_SUB_TYPE_CTR
)
963 ctr_add_iv(iv
, req
->info
, (reqctx
->processed
/
965 else if (subtype
== CRYPTO_ALG_SUB_TYPE_XTS
)
966 ret
= chcr_update_tweak(req
, iv
);
967 else if (subtype
== CRYPTO_ALG_SUB_TYPE_CBC
) {
969 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), iv
,
971 reqctx
->processed
- AES_BLOCK_SIZE
);
973 memcpy(iv
, &fw6_pld
->data
[2], AES_BLOCK_SIZE
);
981 static int chcr_handle_cipher_resp(struct ablkcipher_request
*req
,
982 unsigned char *input
, int err
)
984 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
985 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
986 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
987 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
989 struct cpl_fw6_pld
*fw6_pld
= (struct cpl_fw6_pld
*)input
;
990 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
991 struct cipher_wr_param wrparam
;
994 dma_unmap_sg(&u_ctx
->lldi
.pdev
->dev
, reqctx
->dst
, reqctx
->dst_nents
,
998 kfree_skb(reqctx
->skb
);
1004 if (req
->nbytes
== reqctx
->processed
) {
1005 err
= chcr_final_cipher_iv(req
, fw6_pld
, req
->info
);
1009 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1011 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
1017 wrparam
.srcsg
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
,
1019 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
, reqctx
->dstsg
,
1021 if (!wrparam
.srcsg
|| !reqctx
->dst
) {
1022 pr_err("Input sg list length less that nbytes\n");
1026 bytes
= chcr_sg_ent_in_wr(wrparam
.srcsg
, reqctx
->dst
, 1,
1027 SPACE_LEFT(ablkctx
->enckey_len
),
1028 &wrparam
.snent
, &reqctx
->dst_nents
);
1029 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1030 bytes
= req
->nbytes
- reqctx
->processed
;
1032 bytes
= ROUND_16(bytes
);
1033 err
= chcr_update_cipher_iv(req
, fw6_pld
, reqctx
->iv
);
1037 if (unlikely(bytes
== 0)) {
1038 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1042 req
->nbytes
- reqctx
->processed
,
1048 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1049 CRYPTO_ALG_SUB_TYPE_CTR
)
1050 bytes
= adjust_ctr_overflow(reqctx
->iv
, bytes
);
1051 reqctx
->processed
+= bytes
;
1052 wrparam
.qid
= u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
];
1054 wrparam
.bytes
= bytes
;
1055 skb
= create_cipher_wr(&wrparam
);
1057 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__
);
1061 skb
->dev
= u_ctx
->lldi
.ports
[0];
1062 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1066 free_new_sg(reqctx
->newdstsg
);
1067 reqctx
->newdstsg
= NULL
;
1068 req
->base
.complete(&req
->base
, err
);
1072 static int process_cipher(struct ablkcipher_request
*req
,
1074 struct sk_buff
**skb
,
1075 unsigned short op_type
)
1077 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1078 unsigned int ivsize
= crypto_ablkcipher_ivsize(tfm
);
1079 struct chcr_blkcipher_req_ctx
*reqctx
= ablkcipher_request_ctx(req
);
1080 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
1081 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1082 struct cipher_wr_param wrparam
;
1083 int bytes
, nents
, err
= -EINVAL
;
1085 reqctx
->newdstsg
= NULL
;
1086 reqctx
->processed
= 0;
1089 if ((ablkctx
->enckey_len
== 0) || (ivsize
> AES_BLOCK_SIZE
) ||
1090 (req
->nbytes
== 0) ||
1091 (req
->nbytes
% crypto_ablkcipher_blocksize(tfm
))) {
1092 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1093 ablkctx
->enckey_len
, req
->nbytes
, ivsize
);
1096 wrparam
.srcsg
= req
->src
;
1097 if (is_newsg(req
->dst
, &nents
)) {
1098 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
1099 if (IS_ERR(reqctx
->newdstsg
))
1100 return PTR_ERR(reqctx
->newdstsg
);
1101 reqctx
->dstsg
= reqctx
->newdstsg
;
1103 reqctx
->dstsg
= req
->dst
;
1105 bytes
= chcr_sg_ent_in_wr(wrparam
.srcsg
, reqctx
->dstsg
, MIN_CIPHER_SG
,
1106 SPACE_LEFT(ablkctx
->enckey_len
),
1108 &reqctx
->dst_nents
);
1109 if ((bytes
+ reqctx
->processed
) >= req
->nbytes
)
1110 bytes
= req
->nbytes
- reqctx
->processed
;
1112 bytes
= ROUND_16(bytes
);
1113 if (unlikely(bytes
> req
->nbytes
))
1114 bytes
= req
->nbytes
;
1115 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1116 CRYPTO_ALG_SUB_TYPE_CTR
) {
1117 bytes
= adjust_ctr_overflow(req
->info
, bytes
);
1119 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm
)) ==
1120 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
) {
1121 memcpy(reqctx
->iv
, ablkctx
->nonce
, CTR_RFC3686_NONCE_SIZE
);
1122 memcpy(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
, req
->info
,
1123 CTR_RFC3686_IV_SIZE
);
1125 /* initialize counter portion of counter block */
1126 *(__be32
*)(reqctx
->iv
+ CTR_RFC3686_NONCE_SIZE
+
1127 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1131 memcpy(reqctx
->iv
, req
->info
, ivsize
);
1133 if (unlikely(bytes
== 0)) {
1134 err
= chcr_cipher_fallback(ablkctx
->sw_cipher
,
1143 reqctx
->processed
= bytes
;
1144 reqctx
->dst
= reqctx
->dstsg
;
1145 reqctx
->op
= op_type
;
1148 wrparam
.bytes
= bytes
;
1149 *skb
= create_cipher_wr(&wrparam
);
1151 err
= PTR_ERR(*skb
);
1157 free_new_sg(reqctx
->newdstsg
);
1158 reqctx
->newdstsg
= NULL
;
1162 static int chcr_aes_encrypt(struct ablkcipher_request
*req
)
1164 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1165 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
1166 struct sk_buff
*skb
= NULL
;
1168 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1170 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1172 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1176 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
], &skb
,
1180 skb
->dev
= u_ctx
->lldi
.ports
[0];
1181 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1183 return -EINPROGRESS
;
1186 static int chcr_aes_decrypt(struct ablkcipher_request
*req
)
1188 struct crypto_ablkcipher
*tfm
= crypto_ablkcipher_reqtfm(req
);
1189 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(tfm
);
1190 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1191 struct sk_buff
*skb
= NULL
;
1194 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1196 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1200 err
= process_cipher(req
, u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
], &skb
,
1204 skb
->dev
= u_ctx
->lldi
.ports
[0];
1205 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1207 return -EINPROGRESS
;
1210 static int chcr_device_init(struct chcr_context
*ctx
)
1212 struct uld_ctx
*u_ctx
= NULL
;
1213 struct adapter
*adap
;
1215 int txq_perchan
, txq_idx
, ntxq
;
1216 int err
= 0, rxq_perchan
, rxq_idx
;
1218 id
= smp_processor_id();
1220 u_ctx
= assign_chcr_device();
1222 pr_err("chcr device assignment fails\n");
1225 ctx
->dev
= u_ctx
->dev
;
1226 adap
= padap(ctx
->dev
);
1227 ntxq
= min_not_zero((unsigned int)u_ctx
->lldi
.nrxq
,
1228 adap
->vres
.ncrypto_fc
);
1229 rxq_perchan
= u_ctx
->lldi
.nrxq
/ u_ctx
->lldi
.nchan
;
1230 txq_perchan
= ntxq
/ u_ctx
->lldi
.nchan
;
1231 rxq_idx
= ctx
->dev
->tx_channel_id
* rxq_perchan
;
1232 rxq_idx
+= id
% rxq_perchan
;
1233 txq_idx
= ctx
->dev
->tx_channel_id
* txq_perchan
;
1234 txq_idx
+= id
% txq_perchan
;
1235 spin_lock(&ctx
->dev
->lock_chcr_dev
);
1236 ctx
->rx_qidx
= rxq_idx
;
1237 ctx
->tx_qidx
= txq_idx
;
1238 ctx
->dev
->tx_channel_id
= !ctx
->dev
->tx_channel_id
;
1239 ctx
->dev
->rx_channel_id
= 0;
1240 spin_unlock(&ctx
->dev
->lock_chcr_dev
);
1246 static int chcr_cra_init(struct crypto_tfm
*tfm
)
1248 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1249 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1250 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1252 ablkctx
->sw_cipher
= crypto_alloc_skcipher(alg
->cra_name
, 0,
1253 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1254 if (IS_ERR(ablkctx
->sw_cipher
)) {
1255 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1256 return PTR_ERR(ablkctx
->sw_cipher
);
1259 if (get_cryptoalg_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_XTS
) {
1260 /* To update tweak*/
1261 ablkctx
->aes_generic
= crypto_alloc_cipher("aes-generic", 0, 0);
1262 if (IS_ERR(ablkctx
->aes_generic
)) {
1263 pr_err("failed to allocate aes cipher for tweak\n");
1264 return PTR_ERR(ablkctx
->aes_generic
);
1267 ablkctx
->aes_generic
= NULL
;
1269 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1270 return chcr_device_init(crypto_tfm_ctx(tfm
));
1273 static int chcr_rfc3686_init(struct crypto_tfm
*tfm
)
1275 struct crypto_alg
*alg
= tfm
->__crt_alg
;
1276 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1277 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1279 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1280 * cannot be used as fallback in chcr_handle_cipher_response
1282 ablkctx
->sw_cipher
= crypto_alloc_skcipher("ctr(aes)", 0,
1283 CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
);
1284 if (IS_ERR(ablkctx
->sw_cipher
)) {
1285 pr_err("failed to allocate fallback for %s\n", alg
->cra_name
);
1286 return PTR_ERR(ablkctx
->sw_cipher
);
1288 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct chcr_blkcipher_req_ctx
);
1289 return chcr_device_init(crypto_tfm_ctx(tfm
));
1293 static void chcr_cra_exit(struct crypto_tfm
*tfm
)
1295 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1296 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1298 crypto_free_skcipher(ablkctx
->sw_cipher
);
1299 if (ablkctx
->aes_generic
)
1300 crypto_free_cipher(ablkctx
->aes_generic
);
1303 static int get_alg_config(struct algo_param
*params
,
1304 unsigned int auth_size
)
1306 switch (auth_size
) {
1307 case SHA1_DIGEST_SIZE
:
1308 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_160
;
1309 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA1
;
1310 params
->result_size
= SHA1_DIGEST_SIZE
;
1312 case SHA224_DIGEST_SIZE
:
1313 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1314 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA224
;
1315 params
->result_size
= SHA256_DIGEST_SIZE
;
1317 case SHA256_DIGEST_SIZE
:
1318 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
1319 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA256
;
1320 params
->result_size
= SHA256_DIGEST_SIZE
;
1322 case SHA384_DIGEST_SIZE
:
1323 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1324 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_384
;
1325 params
->result_size
= SHA512_DIGEST_SIZE
;
1327 case SHA512_DIGEST_SIZE
:
1328 params
->mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_512
;
1329 params
->auth_mode
= CHCR_SCMD_AUTH_MODE_SHA512_512
;
1330 params
->result_size
= SHA512_DIGEST_SIZE
;
1333 pr_err("chcr : ERROR, unsupported digest size\n");
1339 static inline void chcr_free_shash(struct crypto_shash
*base_hash
)
1341 crypto_free_shash(base_hash
);
1345 * create_hash_wr - Create hash work request
1346 * @req - Cipher req base
1348 static struct sk_buff
*create_hash_wr(struct ahash_request
*req
,
1349 struct hash_wr_param
*param
)
1351 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1352 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1353 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1354 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1355 struct sk_buff
*skb
= NULL
;
1356 struct chcr_wr
*chcr_req
;
1357 unsigned int frags
= 0, transhdr_len
, iopad_alignment
= 0;
1358 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1359 unsigned int kctx_len
= 0;
1360 u8 hash_size_in_response
= 0;
1361 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1363 struct adapter
*adap
= padap(ctx
->dev
);
1365 iopad_alignment
= KEYCTX_ALIGN_PAD(digestsize
);
1366 kctx_len
= param
->alg_prm
.result_size
+ iopad_alignment
;
1367 if (param
->opad_needed
)
1368 kctx_len
+= param
->alg_prm
.result_size
+ iopad_alignment
;
1370 if (req_ctx
->result
)
1371 hash_size_in_response
= digestsize
;
1373 hash_size_in_response
= param
->alg_prm
.result_size
;
1374 transhdr_len
= HASH_TRANSHDR_SIZE(kctx_len
);
1375 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
1379 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
1380 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
1382 chcr_req
->sec_cpl
.op_ivinsrtofst
=
1383 FILL_SEC_CPL_OP_IVINSR(ctx
->dev
->rx_channel_id
, 2, 0);
1384 chcr_req
->sec_cpl
.pldlen
= htonl(param
->bfr_len
+ param
->sg_len
);
1386 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
=
1387 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1388 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
1389 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1390 chcr_req
->sec_cpl
.seqno_numivs
=
1391 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param
->alg_prm
.auth_mode
,
1392 param
->opad_needed
, 0);
1394 chcr_req
->sec_cpl
.ivgen_hdrlen
=
1395 FILL_SEC_CPL_IVGEN_HDRLEN(param
->last
, param
->more
, 0, 1, 0, 0);
1397 memcpy(chcr_req
->key_ctx
.key
, req_ctx
->partial_hash
,
1398 param
->alg_prm
.result_size
);
1400 if (param
->opad_needed
)
1401 memcpy(chcr_req
->key_ctx
.key
+
1402 ((param
->alg_prm
.result_size
<= 32) ? 32 :
1403 CHCR_HASH_MAX_DIGEST_SIZE
),
1404 hmacctx
->opad
, param
->alg_prm
.result_size
);
1406 chcr_req
->key_ctx
.ctx_hdr
= FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY
,
1407 param
->alg_prm
.mk_size
, 0,
1410 sizeof(chcr_req
->key_ctx
)) >> 4));
1411 chcr_req
->sec_cpl
.scmd1
= cpu_to_be64((u64
)param
->scmd1
);
1413 skb_set_transport_header(skb
, transhdr_len
);
1414 if (param
->bfr_len
!= 0)
1415 write_buffer_to_skb(skb
, &frags
, req_ctx
->reqbfr
,
1417 if (param
->sg_len
!= 0)
1418 write_sg_to_skb(skb
, &frags
, req
->src
, param
->sg_len
);
1419 atomic_inc(&adap
->chcr_stats
.digest_rqst
);
1420 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
,
1421 hash_size_in_response
, 0, DUMMY_BYTES
, 0);
1427 static int chcr_ahash_update(struct ahash_request
*req
)
1429 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1430 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1431 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1432 struct uld_ctx
*u_ctx
= NULL
;
1433 struct sk_buff
*skb
;
1434 u8 remainder
= 0, bs
;
1435 unsigned int nbytes
= req
->nbytes
;
1436 struct hash_wr_param params
;
1438 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1440 u_ctx
= ULD_CTX(ctx
);
1441 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1443 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1447 if (nbytes
+ req_ctx
->reqlen
>= bs
) {
1448 remainder
= (nbytes
+ req_ctx
->reqlen
) % bs
;
1449 nbytes
= nbytes
+ req_ctx
->reqlen
- remainder
;
1451 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
), req_ctx
->reqbfr
1452 + req_ctx
->reqlen
, nbytes
, 0);
1453 req_ctx
->reqlen
+= nbytes
;
1457 params
.opad_needed
= 0;
1460 params
.sg_len
= nbytes
- req_ctx
->reqlen
;
1461 params
.bfr_len
= req_ctx
->reqlen
;
1463 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1464 req_ctx
->result
= 0;
1465 req_ctx
->data_len
+= params
.sg_len
+ params
.bfr_len
;
1466 skb
= create_hash_wr(req
, ¶ms
);
1473 temp
= req_ctx
->reqbfr
;
1474 req_ctx
->reqbfr
= req_ctx
->skbfr
;
1475 req_ctx
->skbfr
= temp
;
1476 sg_pcopy_to_buffer(req
->src
, sg_nents(req
->src
),
1477 req_ctx
->reqbfr
, remainder
, req
->nbytes
-
1480 req_ctx
->reqlen
= remainder
;
1481 skb
->dev
= u_ctx
->lldi
.ports
[0];
1482 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1485 return -EINPROGRESS
;
1488 static void create_last_hash_block(char *bfr_ptr
, unsigned int bs
, u64 scmd1
)
1490 memset(bfr_ptr
, 0, bs
);
1493 *(__be64
*)(bfr_ptr
+ 56) = cpu_to_be64(scmd1
<< 3);
1495 *(__be64
*)(bfr_ptr
+ 120) = cpu_to_be64(scmd1
<< 3);
1498 static int chcr_ahash_final(struct ahash_request
*req
)
1500 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1501 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1502 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1503 struct hash_wr_param params
;
1504 struct sk_buff
*skb
;
1505 struct uld_ctx
*u_ctx
= NULL
;
1506 u8 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1508 u_ctx
= ULD_CTX(ctx
);
1509 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1510 params
.opad_needed
= 1;
1512 params
.opad_needed
= 0;
1514 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1515 req_ctx
->result
= 1;
1516 params
.bfr_len
= req_ctx
->reqlen
;
1517 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1518 if (req_ctx
->reqlen
== 0) {
1519 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1523 params
.bfr_len
= bs
;
1526 params
.scmd1
= req_ctx
->data_len
;
1530 skb
= create_hash_wr(req
, ¶ms
);
1534 skb
->dev
= u_ctx
->lldi
.ports
[0];
1535 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1537 return -EINPROGRESS
;
1540 static int chcr_ahash_finup(struct ahash_request
*req
)
1542 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1543 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1544 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1545 struct uld_ctx
*u_ctx
= NULL
;
1546 struct sk_buff
*skb
;
1547 struct hash_wr_param params
;
1550 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1551 u_ctx
= ULD_CTX(ctx
);
1553 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1555 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1559 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1560 params
.opad_needed
= 1;
1562 params
.opad_needed
= 0;
1564 params
.sg_len
= req
->nbytes
;
1565 params
.bfr_len
= req_ctx
->reqlen
;
1566 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1567 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1568 req_ctx
->result
= 1;
1569 if ((req_ctx
->reqlen
+ req
->nbytes
) == 0) {
1570 create_last_hash_block(req_ctx
->reqbfr
, bs
, req_ctx
->data_len
);
1574 params
.bfr_len
= bs
;
1576 params
.scmd1
= req_ctx
->data_len
;
1581 skb
= create_hash_wr(req
, ¶ms
);
1585 skb
->dev
= u_ctx
->lldi
.ports
[0];
1586 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1589 return -EINPROGRESS
;
1592 static int chcr_ahash_digest(struct ahash_request
*req
)
1594 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(req
);
1595 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(req
);
1596 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1597 struct uld_ctx
*u_ctx
= NULL
;
1598 struct sk_buff
*skb
;
1599 struct hash_wr_param params
;
1603 bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1605 u_ctx
= ULD_CTX(ctx
);
1606 if (unlikely(cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
1608 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
1612 if (is_hmac(crypto_ahash_tfm(rtfm
)))
1613 params
.opad_needed
= 1;
1615 params
.opad_needed
= 0;
1619 params
.sg_len
= req
->nbytes
;
1622 get_alg_config(¶ms
.alg_prm
, crypto_ahash_digestsize(rtfm
));
1623 req_ctx
->result
= 1;
1624 req_ctx
->data_len
+= params
.bfr_len
+ params
.sg_len
;
1626 if (req
->nbytes
== 0) {
1627 create_last_hash_block(req_ctx
->reqbfr
, bs
, 0);
1629 params
.bfr_len
= bs
;
1632 skb
= create_hash_wr(req
, ¶ms
);
1636 skb
->dev
= u_ctx
->lldi
.ports
[0];
1637 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
1639 return -EINPROGRESS
;
1642 static int chcr_ahash_export(struct ahash_request
*areq
, void *out
)
1644 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1645 struct chcr_ahash_req_ctx
*state
= out
;
1647 state
->reqlen
= req_ctx
->reqlen
;
1648 state
->data_len
= req_ctx
->data_len
;
1649 memcpy(state
->bfr1
, req_ctx
->reqbfr
, req_ctx
->reqlen
);
1650 memcpy(state
->partial_hash
, req_ctx
->partial_hash
,
1651 CHCR_HASH_MAX_DIGEST_SIZE
);
1655 static int chcr_ahash_import(struct ahash_request
*areq
, const void *in
)
1657 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1658 struct chcr_ahash_req_ctx
*state
= (struct chcr_ahash_req_ctx
*)in
;
1660 req_ctx
->reqlen
= state
->reqlen
;
1661 req_ctx
->data_len
= state
->data_len
;
1662 req_ctx
->reqbfr
= req_ctx
->bfr1
;
1663 req_ctx
->skbfr
= req_ctx
->bfr2
;
1664 memcpy(req_ctx
->bfr1
, state
->bfr1
, CHCR_HASH_MAX_BLOCK_SIZE_128
);
1665 memcpy(req_ctx
->partial_hash
, state
->partial_hash
,
1666 CHCR_HASH_MAX_DIGEST_SIZE
);
1670 static int chcr_ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1671 unsigned int keylen
)
1673 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1674 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1675 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1676 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1677 unsigned int i
, err
= 0, updated_digestsize
;
1679 SHASH_DESC_ON_STACK(shash
, hmacctx
->base_hash
);
1681 /* use the key to calculate the ipad and opad. ipad will sent with the
1682 * first request's data. opad will be sent with the final hash result
1683 * ipad in hmacctx->ipad and opad in hmacctx->opad location
1685 shash
->tfm
= hmacctx
->base_hash
;
1686 shash
->flags
= crypto_shash_get_flags(hmacctx
->base_hash
);
1688 err
= crypto_shash_digest(shash
, key
, keylen
,
1692 keylen
= digestsize
;
1694 memcpy(hmacctx
->ipad
, key
, keylen
);
1696 memset(hmacctx
->ipad
+ keylen
, 0, bs
- keylen
);
1697 memcpy(hmacctx
->opad
, hmacctx
->ipad
, bs
);
1699 for (i
= 0; i
< bs
/ sizeof(int); i
++) {
1700 *((unsigned int *)(&hmacctx
->ipad
) + i
) ^= IPAD_DATA
;
1701 *((unsigned int *)(&hmacctx
->opad
) + i
) ^= OPAD_DATA
;
1704 updated_digestsize
= digestsize
;
1705 if (digestsize
== SHA224_DIGEST_SIZE
)
1706 updated_digestsize
= SHA256_DIGEST_SIZE
;
1707 else if (digestsize
== SHA384_DIGEST_SIZE
)
1708 updated_digestsize
= SHA512_DIGEST_SIZE
;
1709 err
= chcr_compute_partial_hash(shash
, hmacctx
->ipad
,
1710 hmacctx
->ipad
, digestsize
);
1713 chcr_change_order(hmacctx
->ipad
, updated_digestsize
);
1715 err
= chcr_compute_partial_hash(shash
, hmacctx
->opad
,
1716 hmacctx
->opad
, digestsize
);
1719 chcr_change_order(hmacctx
->opad
, updated_digestsize
);
1724 static int chcr_aes_xts_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
1725 unsigned int key_len
)
1727 struct chcr_context
*ctx
= crypto_ablkcipher_ctx(cipher
);
1728 struct ablk_ctx
*ablkctx
= ABLK_CTX(ctx
);
1729 unsigned short context_size
= 0;
1732 err
= chcr_cipher_fallback_setkey(cipher
, key
, key_len
);
1736 memcpy(ablkctx
->key
, key
, key_len
);
1737 ablkctx
->enckey_len
= key_len
;
1738 get_aes_decrypt_key(ablkctx
->rrkey
, ablkctx
->key
, key_len
<< 2);
1739 context_size
= (KEY_CONTEXT_HDR_SALT_AND_PAD
+ key_len
) >> 4;
1740 ablkctx
->key_ctx_hdr
=
1741 FILL_KEY_CTX_HDR((key_len
== AES_KEYSIZE_256
) ?
1742 CHCR_KEYCTX_CIPHER_KEY_SIZE_128
:
1743 CHCR_KEYCTX_CIPHER_KEY_SIZE_256
,
1744 CHCR_KEYCTX_NO_KEY
, 1,
1746 ablkctx
->ciph_mode
= CHCR_SCMD_CIPHER_MODE_AES_XTS
;
1749 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1750 ablkctx
->enckey_len
= 0;
1755 static int chcr_sha_init(struct ahash_request
*areq
)
1757 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1758 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1759 int digestsize
= crypto_ahash_digestsize(tfm
);
1761 req_ctx
->data_len
= 0;
1762 req_ctx
->reqlen
= 0;
1763 req_ctx
->reqbfr
= req_ctx
->bfr1
;
1764 req_ctx
->skbfr
= req_ctx
->bfr2
;
1765 req_ctx
->skb
= NULL
;
1766 req_ctx
->result
= 0;
1767 copy_hash_init_values(req_ctx
->partial_hash
, digestsize
);
1771 static int chcr_sha_cra_init(struct crypto_tfm
*tfm
)
1773 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1774 sizeof(struct chcr_ahash_req_ctx
));
1775 return chcr_device_init(crypto_tfm_ctx(tfm
));
1778 static int chcr_hmac_init(struct ahash_request
*areq
)
1780 struct chcr_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1781 struct crypto_ahash
*rtfm
= crypto_ahash_reqtfm(areq
);
1782 struct chcr_context
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(rtfm
));
1783 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1784 unsigned int digestsize
= crypto_ahash_digestsize(rtfm
);
1785 unsigned int bs
= crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm
));
1787 chcr_sha_init(areq
);
1788 req_ctx
->data_len
= bs
;
1789 if (is_hmac(crypto_ahash_tfm(rtfm
))) {
1790 if (digestsize
== SHA224_DIGEST_SIZE
)
1791 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
1792 SHA256_DIGEST_SIZE
);
1793 else if (digestsize
== SHA384_DIGEST_SIZE
)
1794 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
1795 SHA512_DIGEST_SIZE
);
1797 memcpy(req_ctx
->partial_hash
, hmacctx
->ipad
,
1803 static int chcr_hmac_cra_init(struct crypto_tfm
*tfm
)
1805 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1806 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1807 unsigned int digestsize
=
1808 crypto_ahash_digestsize(__crypto_ahash_cast(tfm
));
1810 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
1811 sizeof(struct chcr_ahash_req_ctx
));
1812 hmacctx
->base_hash
= chcr_alloc_shash(digestsize
);
1813 if (IS_ERR(hmacctx
->base_hash
))
1814 return PTR_ERR(hmacctx
->base_hash
);
1815 return chcr_device_init(crypto_tfm_ctx(tfm
));
1818 static void chcr_hmac_cra_exit(struct crypto_tfm
*tfm
)
1820 struct chcr_context
*ctx
= crypto_tfm_ctx(tfm
);
1821 struct hmac_ctx
*hmacctx
= HMAC_CTX(ctx
);
1823 if (hmacctx
->base_hash
) {
1824 chcr_free_shash(hmacctx
->base_hash
);
1825 hmacctx
->base_hash
= NULL
;
1829 static int is_newsg(struct scatterlist
*sgl
, unsigned int *newents
)
1835 if (sgl
->length
> CHCR_SG_SIZE
)
1837 nents
+= DIV_ROUND_UP(sgl
->length
, CHCR_SG_SIZE
);
1844 static inline void free_new_sg(struct scatterlist
*sgl
)
1849 static struct scatterlist
*alloc_new_sg(struct scatterlist
*sgl
,
1852 struct scatterlist
*newsg
, *sg
;
1853 int i
, len
, processed
= 0;
1857 newsg
= kmalloc_array(nents
, sizeof(struct scatterlist
), GFP_KERNEL
);
1859 return ERR_PTR(-ENOMEM
);
1861 sg_init_table(sg
, nents
);
1862 offset
= sgl
->offset
;
1863 spage
= sg_page(sgl
);
1864 for (i
= 0; i
< nents
; i
++) {
1865 len
= min_t(u32
, sgl
->length
- processed
, CHCR_SG_SIZE
);
1866 sg_set_page(sg
, spage
, len
, offset
);
1869 if (offset
>= PAGE_SIZE
) {
1870 offset
= offset
% PAGE_SIZE
;
1873 if (processed
== sgl
->length
) {
1878 spage
= sg_page(sgl
);
1879 offset
= sgl
->offset
;
1886 static int chcr_copy_assoc(struct aead_request
*req
,
1887 struct chcr_aead_ctx
*ctx
)
1889 SKCIPHER_REQUEST_ON_STACK(skreq
, ctx
->null
);
1891 skcipher_request_set_tfm(skreq
, ctx
->null
);
1892 skcipher_request_set_callback(skreq
, aead_request_flags(req
),
1894 skcipher_request_set_crypt(skreq
, req
->src
, req
->dst
, req
->assoclen
,
1897 return crypto_skcipher_encrypt(skreq
);
1899 static int chcr_aead_need_fallback(struct aead_request
*req
, int src_nent
,
1900 int aadmax
, int wrlen
,
1901 unsigned short op_type
)
1903 unsigned int authsize
= crypto_aead_authsize(crypto_aead_reqtfm(req
));
1905 if (((req
->cryptlen
- (op_type
? authsize
: 0)) == 0) ||
1906 (req
->assoclen
> aadmax
) ||
1907 (src_nent
> MAX_SKB_FRAGS
) ||
1908 (wrlen
> MAX_WR_SIZE
))
1913 static int chcr_aead_fallback(struct aead_request
*req
, unsigned short op_type
)
1915 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1916 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
1917 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
1918 struct aead_request
*subreq
= aead_request_ctx(req
);
1920 aead_request_set_tfm(subreq
, aeadctx
->sw_cipher
);
1921 aead_request_set_callback(subreq
, req
->base
.flags
,
1922 req
->base
.complete
, req
->base
.data
);
1923 aead_request_set_crypt(subreq
, req
->src
, req
->dst
, req
->cryptlen
,
1925 aead_request_set_ad(subreq
, req
->assoclen
);
1926 return op_type
? crypto_aead_decrypt(subreq
) :
1927 crypto_aead_encrypt(subreq
);
1930 static struct sk_buff
*create_authenc_wr(struct aead_request
*req
,
1933 unsigned short op_type
)
1935 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1936 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
1937 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
1938 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
1939 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
1940 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
1941 struct sk_buff
*skb
= NULL
;
1942 struct chcr_wr
*chcr_req
;
1943 struct cpl_rx_phys_dsgl
*phys_cpl
;
1944 struct phys_sge_parm sg_param
;
1945 struct scatterlist
*src
;
1946 unsigned int frags
= 0, transhdr_len
;
1947 unsigned int ivsize
= crypto_aead_ivsize(tfm
), dst_size
= 0;
1948 unsigned int kctx_len
= 0, nents
;
1949 unsigned short stop_offset
= 0;
1950 unsigned int assoclen
= req
->assoclen
;
1951 unsigned int authsize
= crypto_aead_authsize(tfm
);
1952 int error
= -EINVAL
, src_nent
;
1954 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1956 struct adapter
*adap
= padap(ctx
->dev
);
1958 reqctx
->newdstsg
= NULL
;
1959 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
? -authsize
:
1961 if (aeadctx
->enckey_len
== 0 || (req
->cryptlen
<= 0))
1964 if (op_type
&& req
->cryptlen
< crypto_aead_authsize(tfm
))
1966 src_nent
= sg_nents_for_len(req
->src
, req
->assoclen
+ req
->cryptlen
);
1969 src
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
, req
->assoclen
);
1971 if (req
->src
!= req
->dst
) {
1972 error
= chcr_copy_assoc(req
, aeadctx
);
1974 return ERR_PTR(error
);
1976 if (dst_size
&& is_newsg(req
->dst
, &nents
)) {
1977 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
1978 if (IS_ERR(reqctx
->newdstsg
))
1979 return ERR_CAST(reqctx
->newdstsg
);
1980 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
1981 reqctx
->newdstsg
, req
->assoclen
);
1983 if (req
->src
== req
->dst
)
1986 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
1987 req
->dst
, req
->assoclen
);
1989 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL
) {
1993 reqctx
->dst_nents
= sg_nents_for_len(reqctx
->dst
, req
->cryptlen
+
1994 (op_type
? -authsize
: authsize
));
1995 if (reqctx
->dst_nents
< 0) {
1996 pr_err("AUTHENC:Invalid Destination sg entries\n");
2000 dst_size
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
2001 kctx_len
= (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx
->key_ctx_hdr
)) << 4)
2002 - sizeof(chcr_req
->key_ctx
);
2003 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2004 if (chcr_aead_need_fallback(req
, src_nent
+ MIN_AUTH_SG
,
2006 transhdr_len
+ (sgl_len(src_nent
+ MIN_AUTH_SG
) * 8),
2008 atomic_inc(&adap
->chcr_stats
.fallback
);
2009 free_new_sg(reqctx
->newdstsg
);
2010 reqctx
->newdstsg
= NULL
;
2011 return ERR_PTR(chcr_aead_fallback(req
, op_type
));
2013 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
2019 /* LLD is going to write the sge hdr. */
2020 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
2023 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2025 stop_offset
= (op_type
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2028 * Input order is AAD,IV and Payload. where IV should be included as
2029 * the part of authdata. All other fields should be filled according
2030 * to the hardware spec
2032 chcr_req
->sec_cpl
.op_ivinsrtofst
=
2033 FILL_SEC_CPL_OP_IVINSR(ctx
->dev
->rx_channel_id
, 2,
2034 (ivsize
? (assoclen
+ 1) : 0));
2035 chcr_req
->sec_cpl
.pldlen
= htonl(assoclen
+ ivsize
+ req
->cryptlen
);
2036 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2037 assoclen
? 1 : 0, assoclen
,
2038 assoclen
+ ivsize
+ 1,
2039 (stop_offset
& 0x1F0) >> 4);
2040 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(
2042 null
? 0 : assoclen
+ ivsize
+ 1,
2043 stop_offset
, stop_offset
);
2044 chcr_req
->sec_cpl
.seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2045 (op_type
== CHCR_ENCRYPT_OP
) ? 1 : 0,
2046 CHCR_SCMD_CIPHER_MODE_AES_CBC
,
2047 actx
->auth_mode
, aeadctx
->hmac_ctrl
,
2049 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2052 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2053 if (op_type
== CHCR_ENCRYPT_OP
)
2054 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
,
2055 aeadctx
->enckey_len
);
2057 memcpy(chcr_req
->key_ctx
.key
, actx
->dec_rrkey
,
2058 aeadctx
->enckey_len
);
2060 memcpy(chcr_req
->key_ctx
.key
+ (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) <<
2061 4), actx
->h_iopad
, kctx_len
-
2062 (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) << 4));
2064 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2065 sg_param
.nents
= reqctx
->dst_nents
;
2066 sg_param
.obsize
= req
->cryptlen
+ (op_type
? -authsize
: authsize
);
2068 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
2069 reqctx
->dst
, &sg_param
);
2073 skb_set_transport_header(skb
, transhdr_len
);
2077 write_sg_to_skb(skb
, &frags
, req
->src
, assoclen
);
2080 write_buffer_to_skb(skb
, &frags
, req
->iv
, ivsize
);
2081 write_sg_to_skb(skb
, &frags
, src
, req
->cryptlen
);
2082 atomic_inc(&adap
->chcr_stats
.cipher_rqst
);
2083 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
, size
, 1,
2084 sizeof(struct cpl_rx_phys_dsgl
) + dst_size
, 0);
2093 free_new_sg(reqctx
->newdstsg
);
2094 reqctx
->newdstsg
= NULL
;
2095 return ERR_PTR(error
);
2098 static int set_msg_len(u8
*block
, unsigned int msglen
, int csize
)
2102 memset(block
, 0, csize
);
2107 else if (msglen
> (unsigned int)(1 << (8 * csize
)))
2110 data
= cpu_to_be32(msglen
);
2111 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
2116 static void generate_b0(struct aead_request
*req
,
2117 struct chcr_aead_ctx
*aeadctx
,
2118 unsigned short op_type
)
2120 unsigned int l
, lp
, m
;
2122 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2123 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2124 u8
*b0
= reqctx
->scratch_pad
;
2126 m
= crypto_aead_authsize(aead
);
2128 memcpy(b0
, reqctx
->iv
, 16);
2133 /* set m, bits 3-5 */
2134 *b0
|= (8 * ((m
- 2) / 2));
2136 /* set adata, bit 6, if associated data is used */
2139 rc
= set_msg_len(b0
+ 16 - l
,
2140 (op_type
== CHCR_DECRYPT_OP
) ?
2141 req
->cryptlen
- m
: req
->cryptlen
, l
);
2144 static inline int crypto_ccm_check_iv(const u8
*iv
)
2146 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2147 if (iv
[0] < 1 || iv
[0] > 7)
2153 static int ccm_format_packet(struct aead_request
*req
,
2154 struct chcr_aead_ctx
*aeadctx
,
2155 unsigned int sub_type
,
2156 unsigned short op_type
)
2158 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2161 if (sub_type
== CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2163 memcpy(reqctx
->iv
+ 1, &aeadctx
->salt
[0], 3);
2164 memcpy(reqctx
->iv
+ 4, req
->iv
, 8);
2165 memset(reqctx
->iv
+ 12, 0, 4);
2166 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2167 htons(req
->assoclen
- 8);
2169 memcpy(reqctx
->iv
, req
->iv
, 16);
2170 *((unsigned short *)(reqctx
->scratch_pad
+ 16)) =
2171 htons(req
->assoclen
);
2173 generate_b0(req
, aeadctx
, op_type
);
2174 /* zero the ctr value */
2175 memset(reqctx
->iv
+ 15 - reqctx
->iv
[0], 0, reqctx
->iv
[0] + 1);
2179 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu
*sec_cpl
,
2180 unsigned int dst_size
,
2181 struct aead_request
*req
,
2182 unsigned short op_type
,
2183 struct chcr_context
*chcrctx
)
2185 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2186 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2187 unsigned int ivsize
= AES_BLOCK_SIZE
;
2188 unsigned int cipher_mode
= CHCR_SCMD_CIPHER_MODE_AES_CCM
;
2189 unsigned int mac_mode
= CHCR_SCMD_AUTH_MODE_CBCMAC
;
2190 unsigned int c_id
= chcrctx
->dev
->rx_channel_id
;
2191 unsigned int ccm_xtra
;
2192 unsigned char tag_offset
= 0, auth_offset
= 0;
2193 unsigned int assoclen
;
2195 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2196 assoclen
= req
->assoclen
- 8;
2198 assoclen
= req
->assoclen
;
2199 ccm_xtra
= CCM_B0_SIZE
+
2200 ((assoclen
) ? CCM_AAD_FIELD_SIZE
: 0);
2202 auth_offset
= req
->cryptlen
?
2203 (assoclen
+ ivsize
+ 1 + ccm_xtra
) : 0;
2204 if (op_type
== CHCR_DECRYPT_OP
) {
2205 if (crypto_aead_authsize(tfm
) != req
->cryptlen
)
2206 tag_offset
= crypto_aead_authsize(tfm
);
2212 sec_cpl
->op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(c_id
,
2213 2, (ivsize
? (assoclen
+ 1) : 0) +
2216 htonl(assoclen
+ ivsize
+ req
->cryptlen
+ ccm_xtra
);
2217 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2218 sec_cpl
->aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2219 1, assoclen
+ ccm_xtra
, assoclen
2220 + ivsize
+ 1 + ccm_xtra
, 0);
2222 sec_cpl
->cipherstop_lo_authinsert
= FILL_SEC_CPL_AUTHINSERT(0,
2223 auth_offset
, tag_offset
,
2224 (op_type
== CHCR_ENCRYPT_OP
) ? 0 :
2225 crypto_aead_authsize(tfm
));
2226 sec_cpl
->seqno_numivs
= FILL_SEC_CPL_SCMD0_SEQNO(op_type
,
2227 (op_type
== CHCR_ENCRYPT_OP
) ? 0 : 1,
2228 cipher_mode
, mac_mode
,
2229 aeadctx
->hmac_ctrl
, ivsize
>> 1);
2231 sec_cpl
->ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2235 int aead_ccm_validate_input(unsigned short op_type
,
2236 struct aead_request
*req
,
2237 struct chcr_aead_ctx
*aeadctx
,
2238 unsigned int sub_type
)
2240 if (sub_type
!= CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
) {
2241 if (crypto_ccm_check_iv(req
->iv
)) {
2242 pr_err("CCM: IV check fails\n");
2246 if (req
->assoclen
!= 16 && req
->assoclen
!= 20) {
2247 pr_err("RFC4309: Invalid AAD length %d\n",
2252 if (aeadctx
->enckey_len
== 0) {
2253 pr_err("CCM: Encryption key not set\n");
2259 unsigned int fill_aead_req_fields(struct sk_buff
*skb
,
2260 struct aead_request
*req
,
2261 struct scatterlist
*src
,
2262 unsigned int ivsize
,
2263 struct chcr_aead_ctx
*aeadctx
)
2265 unsigned int frags
= 0;
2266 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2267 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2268 /* b0 and aad length(if available) */
2270 write_buffer_to_skb(skb
, &frags
, reqctx
->scratch_pad
, CCM_B0_SIZE
+
2271 (req
->assoclen
? CCM_AAD_FIELD_SIZE
: 0));
2272 if (req
->assoclen
) {
2273 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
)
2274 write_sg_to_skb(skb
, &frags
, req
->src
,
2277 write_sg_to_skb(skb
, &frags
, req
->src
, req
->assoclen
);
2279 write_buffer_to_skb(skb
, &frags
, reqctx
->iv
, ivsize
);
2281 write_sg_to_skb(skb
, &frags
, src
, req
->cryptlen
);
2286 static struct sk_buff
*create_aead_ccm_wr(struct aead_request
*req
,
2289 unsigned short op_type
)
2291 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2292 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2293 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2294 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2295 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2296 struct sk_buff
*skb
= NULL
;
2297 struct chcr_wr
*chcr_req
;
2298 struct cpl_rx_phys_dsgl
*phys_cpl
;
2299 struct phys_sge_parm sg_param
;
2300 struct scatterlist
*src
;
2301 unsigned int frags
= 0, transhdr_len
, ivsize
= AES_BLOCK_SIZE
;
2302 unsigned int dst_size
= 0, kctx_len
, nents
;
2303 unsigned int sub_type
;
2304 unsigned int authsize
= crypto_aead_authsize(tfm
);
2305 int error
= -EINVAL
, src_nent
;
2306 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2308 struct adapter
*adap
= padap(ctx
->dev
);
2310 dst_size
= req
->assoclen
+ req
->cryptlen
+ (op_type
? -authsize
:
2312 reqctx
->newdstsg
= NULL
;
2313 if (op_type
&& req
->cryptlen
< crypto_aead_authsize(tfm
))
2315 src_nent
= sg_nents_for_len(req
->src
, req
->assoclen
+ req
->cryptlen
);
2319 sub_type
= get_aead_subtype(tfm
);
2320 src
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
, req
->assoclen
);
2321 if (req
->src
!= req
->dst
) {
2322 error
= chcr_copy_assoc(req
, aeadctx
);
2324 pr_err("AAD copy to destination buffer fails\n");
2325 return ERR_PTR(error
);
2328 if (dst_size
&& is_newsg(req
->dst
, &nents
)) {
2329 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
2330 if (IS_ERR(reqctx
->newdstsg
))
2331 return ERR_CAST(reqctx
->newdstsg
);
2332 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2333 reqctx
->newdstsg
, req
->assoclen
);
2335 if (req
->src
== req
->dst
)
2338 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2339 req
->dst
, req
->assoclen
);
2341 reqctx
->dst_nents
= sg_nents_for_len(reqctx
->dst
, req
->cryptlen
+
2342 (op_type
? -authsize
: authsize
));
2343 if (reqctx
->dst_nents
< 0) {
2344 pr_err("CCM:Invalid Destination sg entries\n");
2348 error
= aead_ccm_validate_input(op_type
, req
, aeadctx
, sub_type
);
2352 dst_size
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
2353 kctx_len
= ((DIV_ROUND_UP(aeadctx
->enckey_len
, 16)) << 4) * 2;
2354 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2355 if (chcr_aead_need_fallback(req
, src_nent
+ MIN_CCM_SG
,
2356 T6_MAX_AAD_SIZE
- 18,
2357 transhdr_len
+ (sgl_len(src_nent
+ MIN_CCM_SG
) * 8),
2359 atomic_inc(&adap
->chcr_stats
.fallback
);
2360 free_new_sg(reqctx
->newdstsg
);
2361 reqctx
->newdstsg
= NULL
;
2362 return ERR_PTR(chcr_aead_fallback(req
, op_type
));
2365 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
2372 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
2374 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2376 fill_sec_cpl_for_aead(&chcr_req
->sec_cpl
, dst_size
, req
, op_type
, ctx
);
2378 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2379 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2380 memcpy(chcr_req
->key_ctx
.key
+ (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) *
2381 16), aeadctx
->key
, aeadctx
->enckey_len
);
2383 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2384 error
= ccm_format_packet(req
, aeadctx
, sub_type
, op_type
);
2388 sg_param
.nents
= reqctx
->dst_nents
;
2389 sg_param
.obsize
= req
->cryptlen
+ (op_type
? -authsize
: authsize
);
2391 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
2392 reqctx
->dst
, &sg_param
);
2396 skb_set_transport_header(skb
, transhdr_len
);
2397 frags
= fill_aead_req_fields(skb
, req
, src
, ivsize
, aeadctx
);
2398 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2399 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
, 0, 1,
2400 sizeof(struct cpl_rx_phys_dsgl
) + dst_size
, 0);
2407 free_new_sg(reqctx
->newdstsg
);
2408 reqctx
->newdstsg
= NULL
;
2409 return ERR_PTR(error
);
2412 static struct sk_buff
*create_gcm_wr(struct aead_request
*req
,
2415 unsigned short op_type
)
2417 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2418 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2419 struct uld_ctx
*u_ctx
= ULD_CTX(ctx
);
2420 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2421 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
2422 struct sk_buff
*skb
= NULL
;
2423 struct chcr_wr
*chcr_req
;
2424 struct cpl_rx_phys_dsgl
*phys_cpl
;
2425 struct phys_sge_parm sg_param
;
2426 struct scatterlist
*src
;
2427 unsigned int frags
= 0, transhdr_len
;
2428 unsigned int ivsize
= AES_BLOCK_SIZE
;
2429 unsigned int dst_size
= 0, kctx_len
, nents
, assoclen
= req
->assoclen
;
2430 unsigned char tag_offset
= 0;
2431 unsigned int authsize
= crypto_aead_authsize(tfm
);
2432 int error
= -EINVAL
, src_nent
;
2433 gfp_t flags
= req
->base
.flags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
2435 struct adapter
*adap
= padap(ctx
->dev
);
2437 reqctx
->newdstsg
= NULL
;
2438 dst_size
= assoclen
+ req
->cryptlen
+ (op_type
? -authsize
:
2440 /* validate key size */
2441 if (aeadctx
->enckey_len
== 0)
2444 if (op_type
&& req
->cryptlen
< crypto_aead_authsize(tfm
))
2446 src_nent
= sg_nents_for_len(req
->src
, assoclen
+ req
->cryptlen
);
2450 src
= scatterwalk_ffwd(reqctx
->srcffwd
, req
->src
, assoclen
);
2451 if (req
->src
!= req
->dst
) {
2452 error
= chcr_copy_assoc(req
, aeadctx
);
2454 return ERR_PTR(error
);
2457 if (dst_size
&& is_newsg(req
->dst
, &nents
)) {
2458 reqctx
->newdstsg
= alloc_new_sg(req
->dst
, nents
);
2459 if (IS_ERR(reqctx
->newdstsg
))
2460 return ERR_CAST(reqctx
->newdstsg
);
2461 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2462 reqctx
->newdstsg
, assoclen
);
2464 if (req
->src
== req
->dst
)
2467 reqctx
->dst
= scatterwalk_ffwd(reqctx
->dstffwd
,
2468 req
->dst
, assoclen
);
2471 reqctx
->dst_nents
= sg_nents_for_len(reqctx
->dst
, req
->cryptlen
+
2472 (op_type
? -authsize
: authsize
));
2473 if (reqctx
->dst_nents
< 0) {
2474 pr_err("GCM:Invalid Destination sg entries\n");
2480 dst_size
= get_space_for_phys_dsgl(reqctx
->dst_nents
);
2481 kctx_len
= ((DIV_ROUND_UP(aeadctx
->enckey_len
, 16)) << 4) +
2483 transhdr_len
= CIPHER_TRANSHDR_SIZE(kctx_len
, dst_size
);
2484 if (chcr_aead_need_fallback(req
, src_nent
+ MIN_GCM_SG
,
2486 transhdr_len
+ (sgl_len(src_nent
+ MIN_GCM_SG
) * 8),
2488 atomic_inc(&adap
->chcr_stats
.fallback
);
2489 free_new_sg(reqctx
->newdstsg
);
2490 reqctx
->newdstsg
= NULL
;
2491 return ERR_PTR(chcr_aead_fallback(req
, op_type
));
2493 skb
= alloc_skb((transhdr_len
+ sizeof(struct sge_opaque_hdr
)), flags
);
2499 /* NIC driver is going to write the sge hdr. */
2500 skb_reserve(skb
, sizeof(struct sge_opaque_hdr
));
2502 chcr_req
= __skb_put_zero(skb
, transhdr_len
);
2504 if (get_aead_subtype(tfm
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
)
2505 assoclen
= req
->assoclen
- 8;
2507 tag_offset
= (op_type
== CHCR_ENCRYPT_OP
) ? 0 : authsize
;
2508 chcr_req
->sec_cpl
.op_ivinsrtofst
= FILL_SEC_CPL_OP_IVINSR(
2509 ctx
->dev
->rx_channel_id
, 2, (ivsize
?
2510 (assoclen
+ 1) : 0));
2511 chcr_req
->sec_cpl
.pldlen
=
2512 htonl(assoclen
+ ivsize
+ req
->cryptlen
);
2513 chcr_req
->sec_cpl
.aadstart_cipherstop_hi
= FILL_SEC_CPL_CIPHERSTOP_HI(
2514 assoclen
? 1 : 0, assoclen
,
2515 assoclen
+ ivsize
+ 1, 0);
2516 chcr_req
->sec_cpl
.cipherstop_lo_authinsert
=
2517 FILL_SEC_CPL_AUTHINSERT(0, assoclen
+ ivsize
+ 1,
2518 tag_offset
, tag_offset
);
2519 chcr_req
->sec_cpl
.seqno_numivs
=
2520 FILL_SEC_CPL_SCMD0_SEQNO(op_type
, (op_type
==
2521 CHCR_ENCRYPT_OP
) ? 1 : 0,
2522 CHCR_SCMD_CIPHER_MODE_AES_GCM
,
2523 CHCR_SCMD_AUTH_MODE_GHASH
,
2524 aeadctx
->hmac_ctrl
, ivsize
>> 1);
2525 chcr_req
->sec_cpl
.ivgen_hdrlen
= FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2527 chcr_req
->key_ctx
.ctx_hdr
= aeadctx
->key_ctx_hdr
;
2528 memcpy(chcr_req
->key_ctx
.key
, aeadctx
->key
, aeadctx
->enckey_len
);
2529 memcpy(chcr_req
->key_ctx
.key
+ (DIV_ROUND_UP(aeadctx
->enckey_len
, 16) *
2530 16), GCM_CTX(aeadctx
)->ghash_h
, AEAD_H_SIZE
);
2532 /* prepare a 16 byte iv */
2533 /* S A L T | IV | 0x00000001 */
2534 if (get_aead_subtype(tfm
) ==
2535 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
) {
2536 memcpy(reqctx
->iv
, aeadctx
->salt
, 4);
2537 memcpy(reqctx
->iv
+ 4, req
->iv
, 8);
2539 memcpy(reqctx
->iv
, req
->iv
, 12);
2541 *((unsigned int *)(reqctx
->iv
+ 12)) = htonl(0x01);
2543 phys_cpl
= (struct cpl_rx_phys_dsgl
*)((u8
*)(chcr_req
+ 1) + kctx_len
);
2544 sg_param
.nents
= reqctx
->dst_nents
;
2545 sg_param
.obsize
= req
->cryptlen
+ (op_type
? -authsize
: authsize
);
2547 error
= map_writesg_phys_cpl(&u_ctx
->lldi
.pdev
->dev
, phys_cpl
,
2548 reqctx
->dst
, &sg_param
);
2552 skb_set_transport_header(skb
, transhdr_len
);
2553 write_sg_to_skb(skb
, &frags
, req
->src
, assoclen
);
2554 write_buffer_to_skb(skb
, &frags
, reqctx
->iv
, ivsize
);
2555 write_sg_to_skb(skb
, &frags
, src
, req
->cryptlen
);
2556 atomic_inc(&adap
->chcr_stats
.aead_rqst
);
2557 create_wreq(ctx
, chcr_req
, &req
->base
, skb
, kctx_len
, size
, 1,
2558 sizeof(struct cpl_rx_phys_dsgl
) + dst_size
,
2568 free_new_sg(reqctx
->newdstsg
);
2569 reqctx
->newdstsg
= NULL
;
2570 return ERR_PTR(error
);
2575 static int chcr_aead_cra_init(struct crypto_aead
*tfm
)
2577 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2578 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2579 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
2581 aeadctx
->sw_cipher
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
2582 CRYPTO_ALG_NEED_FALLBACK
|
2584 if (IS_ERR(aeadctx
->sw_cipher
))
2585 return PTR_ERR(aeadctx
->sw_cipher
);
2586 crypto_aead_set_reqsize(tfm
, max(sizeof(struct chcr_aead_reqctx
),
2587 sizeof(struct aead_request
) +
2588 crypto_aead_reqsize(aeadctx
->sw_cipher
)));
2589 aeadctx
->null
= crypto_get_default_null_skcipher();
2590 if (IS_ERR(aeadctx
->null
))
2591 return PTR_ERR(aeadctx
->null
);
2592 return chcr_device_init(ctx
);
2595 static void chcr_aead_cra_exit(struct crypto_aead
*tfm
)
2597 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
2598 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2600 crypto_put_default_null_skcipher();
2601 crypto_free_aead(aeadctx
->sw_cipher
);
2604 static int chcr_authenc_null_setauthsize(struct crypto_aead
*tfm
,
2605 unsigned int authsize
)
2607 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2609 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NOP
;
2610 aeadctx
->mayverify
= VERIFY_HW
;
2611 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2613 static int chcr_authenc_setauthsize(struct crypto_aead
*tfm
,
2614 unsigned int authsize
)
2616 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2617 u32 maxauth
= crypto_aead_maxauthsize(tfm
);
2619 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2620 * true for sha1. authsize == 12 condition should be before
2621 * authsize == (maxauth >> 1)
2623 if (authsize
== ICV_4
) {
2624 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
2625 aeadctx
->mayverify
= VERIFY_HW
;
2626 } else if (authsize
== ICV_6
) {
2627 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
2628 aeadctx
->mayverify
= VERIFY_HW
;
2629 } else if (authsize
== ICV_10
) {
2630 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
2631 aeadctx
->mayverify
= VERIFY_HW
;
2632 } else if (authsize
== ICV_12
) {
2633 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2634 aeadctx
->mayverify
= VERIFY_HW
;
2635 } else if (authsize
== ICV_14
) {
2636 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
2637 aeadctx
->mayverify
= VERIFY_HW
;
2638 } else if (authsize
== (maxauth
>> 1)) {
2639 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2640 aeadctx
->mayverify
= VERIFY_HW
;
2641 } else if (authsize
== maxauth
) {
2642 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2643 aeadctx
->mayverify
= VERIFY_HW
;
2645 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2646 aeadctx
->mayverify
= VERIFY_SW
;
2648 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2652 static int chcr_gcm_setauthsize(struct crypto_aead
*tfm
, unsigned int authsize
)
2654 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2658 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
2659 aeadctx
->mayverify
= VERIFY_HW
;
2662 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2663 aeadctx
->mayverify
= VERIFY_HW
;
2666 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2667 aeadctx
->mayverify
= VERIFY_HW
;
2670 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
2671 aeadctx
->mayverify
= VERIFY_HW
;
2674 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2675 aeadctx
->mayverify
= VERIFY_HW
;
2679 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2680 aeadctx
->mayverify
= VERIFY_SW
;
2684 crypto_tfm_set_flags((struct crypto_tfm
*) tfm
,
2685 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2688 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2691 static int chcr_4106_4309_setauthsize(struct crypto_aead
*tfm
,
2692 unsigned int authsize
)
2694 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2698 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2699 aeadctx
->mayverify
= VERIFY_HW
;
2702 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2703 aeadctx
->mayverify
= VERIFY_HW
;
2706 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2707 aeadctx
->mayverify
= VERIFY_HW
;
2710 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
2711 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2714 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2717 static int chcr_ccm_setauthsize(struct crypto_aead
*tfm
,
2718 unsigned int authsize
)
2720 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
2724 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL1
;
2725 aeadctx
->mayverify
= VERIFY_HW
;
2728 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL2
;
2729 aeadctx
->mayverify
= VERIFY_HW
;
2732 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_DIV2
;
2733 aeadctx
->mayverify
= VERIFY_HW
;
2736 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366
;
2737 aeadctx
->mayverify
= VERIFY_HW
;
2740 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT
;
2741 aeadctx
->mayverify
= VERIFY_HW
;
2744 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_PL3
;
2745 aeadctx
->mayverify
= VERIFY_HW
;
2748 aeadctx
->hmac_ctrl
= CHCR_SCMD_HMAC_CTRL_NO_TRUNC
;
2749 aeadctx
->mayverify
= VERIFY_HW
;
2752 crypto_tfm_set_flags((struct crypto_tfm
*)tfm
,
2753 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2756 return crypto_aead_setauthsize(aeadctx
->sw_cipher
, authsize
);
2759 static int chcr_ccm_common_setkey(struct crypto_aead
*aead
,
2761 unsigned int keylen
)
2763 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2764 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2765 unsigned char ck_size
, mk_size
;
2766 int key_ctx_size
= 0;
2768 key_ctx_size
= sizeof(struct _key_ctx
) +
2769 ((DIV_ROUND_UP(keylen
, 16)) << 4) * 2;
2770 if (keylen
== AES_KEYSIZE_128
) {
2771 mk_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2772 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2773 } else if (keylen
== AES_KEYSIZE_192
) {
2774 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
2775 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_192
;
2776 } else if (keylen
== AES_KEYSIZE_256
) {
2777 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
2778 mk_size
= CHCR_KEYCTX_MAC_KEY_SIZE_256
;
2780 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
2781 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2782 aeadctx
->enckey_len
= 0;
2785 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, mk_size
, 0, 0,
2787 memcpy(aeadctx
->key
, key
, keylen
);
2788 aeadctx
->enckey_len
= keylen
;
2793 static int chcr_aead_ccm_setkey(struct crypto_aead
*aead
,
2795 unsigned int keylen
)
2797 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2798 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2801 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2802 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
2803 CRYPTO_TFM_REQ_MASK
);
2804 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2805 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
2806 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
2807 CRYPTO_TFM_RES_MASK
);
2810 return chcr_ccm_common_setkey(aead
, key
, keylen
);
2813 static int chcr_aead_rfc4309_setkey(struct crypto_aead
*aead
, const u8
*key
,
2814 unsigned int keylen
)
2816 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2817 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2821 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
2822 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2823 aeadctx
->enckey_len
= 0;
2826 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2827 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
) &
2828 CRYPTO_TFM_REQ_MASK
);
2829 error
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2830 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
2831 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
2832 CRYPTO_TFM_RES_MASK
);
2836 memcpy(aeadctx
->salt
, key
+ keylen
, 3);
2837 return chcr_ccm_common_setkey(aead
, key
, keylen
);
2840 static int chcr_gcm_setkey(struct crypto_aead
*aead
, const u8
*key
,
2841 unsigned int keylen
)
2843 struct chcr_context
*ctx
= crypto_aead_ctx(aead
);
2844 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2845 struct chcr_gcm_ctx
*gctx
= GCM_CTX(aeadctx
);
2846 struct crypto_cipher
*cipher
;
2847 unsigned int ck_size
;
2848 int ret
= 0, key_ctx_size
= 0;
2850 aeadctx
->enckey_len
= 0;
2851 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2852 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(aead
)
2853 & CRYPTO_TFM_REQ_MASK
);
2854 ret
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2855 crypto_aead_clear_flags(aead
, CRYPTO_TFM_RES_MASK
);
2856 crypto_aead_set_flags(aead
, crypto_aead_get_flags(aeadctx
->sw_cipher
) &
2857 CRYPTO_TFM_RES_MASK
);
2861 if (get_aead_subtype(aead
) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
&&
2863 keylen
-= 4; /* nonce/salt is present in the last 4 bytes */
2864 memcpy(aeadctx
->salt
, key
+ keylen
, 4);
2866 if (keylen
== AES_KEYSIZE_128
) {
2867 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2868 } else if (keylen
== AES_KEYSIZE_192
) {
2869 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
2870 } else if (keylen
== AES_KEYSIZE_256
) {
2871 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
2873 crypto_tfm_set_flags((struct crypto_tfm
*)aead
,
2874 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2875 pr_err("GCM: Invalid key length %d\n", keylen
);
2880 memcpy(aeadctx
->key
, key
, keylen
);
2881 aeadctx
->enckey_len
= keylen
;
2882 key_ctx_size
= sizeof(struct _key_ctx
) +
2883 ((DIV_ROUND_UP(keylen
, 16)) << 4) +
2885 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
,
2886 CHCR_KEYCTX_MAC_KEY_SIZE_128
,
2889 /* Calculate the H = CIPH(K, 0 repeated 16 times).
2890 * It will go in key context
2892 cipher
= crypto_alloc_cipher("aes-generic", 0, 0);
2893 if (IS_ERR(cipher
)) {
2894 aeadctx
->enckey_len
= 0;
2899 ret
= crypto_cipher_setkey(cipher
, key
, keylen
);
2901 aeadctx
->enckey_len
= 0;
2904 memset(gctx
->ghash_h
, 0, AEAD_H_SIZE
);
2905 crypto_cipher_encrypt_one(cipher
, gctx
->ghash_h
, gctx
->ghash_h
);
2908 crypto_free_cipher(cipher
);
2913 static int chcr_authenc_setkey(struct crypto_aead
*authenc
, const u8
*key
,
2914 unsigned int keylen
)
2916 struct chcr_context
*ctx
= crypto_aead_ctx(authenc
);
2917 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
2918 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
2919 /* it contains auth and cipher key both*/
2920 struct crypto_authenc_keys keys
;
2922 unsigned int max_authsize
= crypto_aead_alg(authenc
)->maxauthsize
;
2923 int err
= 0, i
, key_ctx_len
= 0;
2924 unsigned char ck_size
= 0;
2925 unsigned char pad
[CHCR_HASH_MAX_BLOCK_SIZE_128
] = { 0 };
2926 struct crypto_shash
*base_hash
= ERR_PTR(-EINVAL
);
2927 struct algo_param param
;
2931 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
2932 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
2933 & CRYPTO_TFM_REQ_MASK
);
2934 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
2935 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
2936 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
2937 & CRYPTO_TFM_RES_MASK
);
2941 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
2942 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2946 if (get_alg_config(¶m
, max_authsize
)) {
2947 pr_err("chcr : Unsupported digest size\n");
2950 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
2951 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
2952 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
2953 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
2954 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
2955 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
2957 pr_err("chcr : Unsupported cipher key\n");
2961 /* Copy only encryption key. We use authkey to generate h(ipad) and
2962 * h(opad) so authkey is not needed again. authkeylen size have the
2963 * size of the hash digest size.
2965 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
2966 aeadctx
->enckey_len
= keys
.enckeylen
;
2967 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
2968 aeadctx
->enckey_len
<< 3);
2970 base_hash
= chcr_alloc_shash(max_authsize
);
2971 if (IS_ERR(base_hash
)) {
2972 pr_err("chcr : Base driver cannot be loaded\n");
2973 aeadctx
->enckey_len
= 0;
2977 SHASH_DESC_ON_STACK(shash
, base_hash
);
2978 shash
->tfm
= base_hash
;
2979 shash
->flags
= crypto_shash_get_flags(base_hash
);
2980 bs
= crypto_shash_blocksize(base_hash
);
2981 align
= KEYCTX_ALIGN_PAD(max_authsize
);
2982 o_ptr
= actx
->h_iopad
+ param
.result_size
+ align
;
2984 if (keys
.authkeylen
> bs
) {
2985 err
= crypto_shash_digest(shash
, keys
.authkey
,
2989 pr_err("chcr : Base driver cannot be loaded\n");
2992 keys
.authkeylen
= max_authsize
;
2994 memcpy(o_ptr
, keys
.authkey
, keys
.authkeylen
);
2996 /* Compute the ipad-digest*/
2997 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
2998 memcpy(pad
, o_ptr
, keys
.authkeylen
);
2999 for (i
= 0; i
< bs
>> 2; i
++)
3000 *((unsigned int *)pad
+ i
) ^= IPAD_DATA
;
3002 if (chcr_compute_partial_hash(shash
, pad
, actx
->h_iopad
,
3005 /* Compute the opad-digest */
3006 memset(pad
+ keys
.authkeylen
, 0, bs
- keys
.authkeylen
);
3007 memcpy(pad
, o_ptr
, keys
.authkeylen
);
3008 for (i
= 0; i
< bs
>> 2; i
++)
3009 *((unsigned int *)pad
+ i
) ^= OPAD_DATA
;
3011 if (chcr_compute_partial_hash(shash
, pad
, o_ptr
, max_authsize
))
3014 /* convert the ipad and opad digest to network order */
3015 chcr_change_order(actx
->h_iopad
, param
.result_size
);
3016 chcr_change_order(o_ptr
, param
.result_size
);
3017 key_ctx_len
= sizeof(struct _key_ctx
) +
3018 ((DIV_ROUND_UP(keys
.enckeylen
, 16)) << 4) +
3019 (param
.result_size
+ align
) * 2;
3020 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, param
.mk_size
,
3021 0, 1, key_ctx_len
>> 4);
3022 actx
->auth_mode
= param
.auth_mode
;
3023 chcr_free_shash(base_hash
);
3028 aeadctx
->enckey_len
= 0;
3029 if (!IS_ERR(base_hash
))
3030 chcr_free_shash(base_hash
);
3034 static int chcr_aead_digest_null_setkey(struct crypto_aead
*authenc
,
3035 const u8
*key
, unsigned int keylen
)
3037 struct chcr_context
*ctx
= crypto_aead_ctx(authenc
);
3038 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(ctx
);
3039 struct chcr_authenc_ctx
*actx
= AUTHENC_CTX(aeadctx
);
3040 struct crypto_authenc_keys keys
;
3042 /* it contains auth and cipher key both*/
3043 int key_ctx_len
= 0;
3044 unsigned char ck_size
= 0;
3046 crypto_aead_clear_flags(aeadctx
->sw_cipher
, CRYPTO_TFM_REQ_MASK
);
3047 crypto_aead_set_flags(aeadctx
->sw_cipher
, crypto_aead_get_flags(authenc
)
3048 & CRYPTO_TFM_REQ_MASK
);
3049 err
= crypto_aead_setkey(aeadctx
->sw_cipher
, key
, keylen
);
3050 crypto_aead_clear_flags(authenc
, CRYPTO_TFM_RES_MASK
);
3051 crypto_aead_set_flags(authenc
, crypto_aead_get_flags(aeadctx
->sw_cipher
)
3052 & CRYPTO_TFM_RES_MASK
);
3056 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0) {
3057 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3060 if (keys
.enckeylen
== AES_KEYSIZE_128
) {
3061 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_128
;
3062 } else if (keys
.enckeylen
== AES_KEYSIZE_192
) {
3063 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_192
;
3064 } else if (keys
.enckeylen
== AES_KEYSIZE_256
) {
3065 ck_size
= CHCR_KEYCTX_CIPHER_KEY_SIZE_256
;
3067 pr_err("chcr : Unsupported cipher key\n");
3070 memcpy(aeadctx
->key
, keys
.enckey
, keys
.enckeylen
);
3071 aeadctx
->enckey_len
= keys
.enckeylen
;
3072 get_aes_decrypt_key(actx
->dec_rrkey
, aeadctx
->key
,
3073 aeadctx
->enckey_len
<< 3);
3074 key_ctx_len
= sizeof(struct _key_ctx
)
3075 + ((DIV_ROUND_UP(keys
.enckeylen
, 16)) << 4);
3077 aeadctx
->key_ctx_hdr
= FILL_KEY_CTX_HDR(ck_size
, CHCR_KEYCTX_NO_KEY
, 0,
3078 0, key_ctx_len
>> 4);
3079 actx
->auth_mode
= CHCR_SCMD_AUTH_MODE_NOP
;
3082 aeadctx
->enckey_len
= 0;
3085 static int chcr_aead_encrypt(struct aead_request
*req
)
3087 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3088 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3090 reqctx
->verify
= VERIFY_HW
;
3092 switch (get_aead_subtype(tfm
)) {
3093 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
:
3094 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL
:
3095 return chcr_aead_op(req
, CHCR_ENCRYPT_OP
, 0,
3097 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3098 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3099 return chcr_aead_op(req
, CHCR_ENCRYPT_OP
, 0,
3100 create_aead_ccm_wr
);
3102 return chcr_aead_op(req
, CHCR_ENCRYPT_OP
, 0,
3107 static int chcr_aead_decrypt(struct aead_request
*req
)
3109 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3110 struct chcr_aead_ctx
*aeadctx
= AEAD_CTX(crypto_aead_ctx(tfm
));
3111 struct chcr_aead_reqctx
*reqctx
= aead_request_ctx(req
);
3114 if (aeadctx
->mayverify
== VERIFY_SW
) {
3115 size
= crypto_aead_maxauthsize(tfm
);
3116 reqctx
->verify
= VERIFY_SW
;
3119 reqctx
->verify
= VERIFY_HW
;
3122 switch (get_aead_subtype(tfm
)) {
3123 case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
:
3124 case CRYPTO_ALG_SUB_TYPE_AEAD_NULL
:
3125 return chcr_aead_op(req
, CHCR_DECRYPT_OP
, size
,
3127 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM
:
3128 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
:
3129 return chcr_aead_op(req
, CHCR_DECRYPT_OP
, size
,
3130 create_aead_ccm_wr
);
3132 return chcr_aead_op(req
, CHCR_DECRYPT_OP
, size
,
3137 static int chcr_aead_op(struct aead_request
*req
,
3138 unsigned short op_type
,
3140 create_wr_t create_wr_fn
)
3142 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
3143 struct chcr_context
*ctx
= crypto_aead_ctx(tfm
);
3144 struct uld_ctx
*u_ctx
;
3145 struct sk_buff
*skb
;
3148 pr_err("chcr : %s : No crypto device.\n", __func__
);
3151 u_ctx
= ULD_CTX(ctx
);
3152 if (cxgb4_is_crypto_q_full(u_ctx
->lldi
.ports
[0],
3154 if (!(req
->base
.flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
3158 /* Form a WR from req */
3159 skb
= create_wr_fn(req
, u_ctx
->lldi
.rxq_ids
[ctx
->rx_qidx
], size
,
3162 if (IS_ERR(skb
) || !skb
)
3163 return PTR_ERR(skb
);
3165 skb
->dev
= u_ctx
->lldi
.ports
[0];
3166 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ctx
->tx_qidx
);
3168 return -EINPROGRESS
;
3170 static struct chcr_alg_template driver_algs
[] = {
3173 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CBC
,
3176 .cra_name
= "cbc(aes)",
3177 .cra_driver_name
= "cbc-aes-chcr",
3178 .cra_blocksize
= AES_BLOCK_SIZE
,
3179 .cra_init
= chcr_cra_init
,
3180 .cra_exit
= chcr_cra_exit
,
3181 .cra_u
.ablkcipher
= {
3182 .min_keysize
= AES_MIN_KEY_SIZE
,
3183 .max_keysize
= AES_MAX_KEY_SIZE
,
3184 .ivsize
= AES_BLOCK_SIZE
,
3185 .setkey
= chcr_aes_cbc_setkey
,
3186 .encrypt
= chcr_aes_encrypt
,
3187 .decrypt
= chcr_aes_decrypt
,
3192 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_XTS
,
3195 .cra_name
= "xts(aes)",
3196 .cra_driver_name
= "xts-aes-chcr",
3197 .cra_blocksize
= AES_BLOCK_SIZE
,
3198 .cra_init
= chcr_cra_init
,
3200 .cra_u
.ablkcipher
= {
3201 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3202 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3203 .ivsize
= AES_BLOCK_SIZE
,
3204 .setkey
= chcr_aes_xts_setkey
,
3205 .encrypt
= chcr_aes_encrypt
,
3206 .decrypt
= chcr_aes_decrypt
,
3211 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_SUB_TYPE_CTR
,
3214 .cra_name
= "ctr(aes)",
3215 .cra_driver_name
= "ctr-aes-chcr",
3217 .cra_init
= chcr_cra_init
,
3218 .cra_exit
= chcr_cra_exit
,
3219 .cra_u
.ablkcipher
= {
3220 .min_keysize
= AES_MIN_KEY_SIZE
,
3221 .max_keysize
= AES_MAX_KEY_SIZE
,
3222 .ivsize
= AES_BLOCK_SIZE
,
3223 .setkey
= chcr_aes_ctr_setkey
,
3224 .encrypt
= chcr_aes_encrypt
,
3225 .decrypt
= chcr_aes_decrypt
,
3230 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
3231 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686
,
3234 .cra_name
= "rfc3686(ctr(aes))",
3235 .cra_driver_name
= "rfc3686-ctr-aes-chcr",
3237 .cra_init
= chcr_rfc3686_init
,
3238 .cra_exit
= chcr_cra_exit
,
3239 .cra_u
.ablkcipher
= {
3240 .min_keysize
= AES_MIN_KEY_SIZE
+
3241 CTR_RFC3686_NONCE_SIZE
,
3242 .max_keysize
= AES_MAX_KEY_SIZE
+
3243 CTR_RFC3686_NONCE_SIZE
,
3244 .ivsize
= CTR_RFC3686_IV_SIZE
,
3245 .setkey
= chcr_aes_rfc3686_setkey
,
3246 .encrypt
= chcr_aes_encrypt
,
3247 .decrypt
= chcr_aes_decrypt
,
3254 .type
= CRYPTO_ALG_TYPE_AHASH
,
3257 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3260 .cra_driver_name
= "sha1-chcr",
3261 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3266 .type
= CRYPTO_ALG_TYPE_AHASH
,
3269 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3271 .cra_name
= "sha256",
3272 .cra_driver_name
= "sha256-chcr",
3273 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3278 .type
= CRYPTO_ALG_TYPE_AHASH
,
3281 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3283 .cra_name
= "sha224",
3284 .cra_driver_name
= "sha224-chcr",
3285 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3290 .type
= CRYPTO_ALG_TYPE_AHASH
,
3293 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3295 .cra_name
= "sha384",
3296 .cra_driver_name
= "sha384-chcr",
3297 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3302 .type
= CRYPTO_ALG_TYPE_AHASH
,
3305 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3307 .cra_name
= "sha512",
3308 .cra_driver_name
= "sha512-chcr",
3309 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3315 .type
= CRYPTO_ALG_TYPE_HMAC
,
3318 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3320 .cra_name
= "hmac(sha1)",
3321 .cra_driver_name
= "hmac-sha1-chcr",
3322 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3327 .type
= CRYPTO_ALG_TYPE_HMAC
,
3330 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3332 .cra_name
= "hmac(sha224)",
3333 .cra_driver_name
= "hmac-sha224-chcr",
3334 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3339 .type
= CRYPTO_ALG_TYPE_HMAC
,
3342 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
3344 .cra_name
= "hmac(sha256)",
3345 .cra_driver_name
= "hmac-sha256-chcr",
3346 .cra_blocksize
= SHA256_BLOCK_SIZE
,
3351 .type
= CRYPTO_ALG_TYPE_HMAC
,
3354 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
3356 .cra_name
= "hmac(sha384)",
3357 .cra_driver_name
= "hmac-sha384-chcr",
3358 .cra_blocksize
= SHA384_BLOCK_SIZE
,
3363 .type
= CRYPTO_ALG_TYPE_HMAC
,
3366 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
3368 .cra_name
= "hmac(sha512)",
3369 .cra_driver_name
= "hmac-sha512-chcr",
3370 .cra_blocksize
= SHA512_BLOCK_SIZE
,
3374 /* Add AEAD Algorithms */
3376 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_GCM
,
3380 .cra_name
= "gcm(aes)",
3381 .cra_driver_name
= "gcm-aes-chcr",
3383 .cra_priority
= CHCR_AEAD_PRIORITY
,
3384 .cra_ctxsize
= sizeof(struct chcr_context
) +
3385 sizeof(struct chcr_aead_ctx
) +
3386 sizeof(struct chcr_gcm_ctx
),
3389 .maxauthsize
= GHASH_DIGEST_SIZE
,
3390 .setkey
= chcr_gcm_setkey
,
3391 .setauthsize
= chcr_gcm_setauthsize
,
3395 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106
,
3399 .cra_name
= "rfc4106(gcm(aes))",
3400 .cra_driver_name
= "rfc4106-gcm-aes-chcr",
3402 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3403 .cra_ctxsize
= sizeof(struct chcr_context
) +
3404 sizeof(struct chcr_aead_ctx
) +
3405 sizeof(struct chcr_gcm_ctx
),
3409 .maxauthsize
= GHASH_DIGEST_SIZE
,
3410 .setkey
= chcr_gcm_setkey
,
3411 .setauthsize
= chcr_4106_4309_setauthsize
,
3415 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_CCM
,
3419 .cra_name
= "ccm(aes)",
3420 .cra_driver_name
= "ccm-aes-chcr",
3422 .cra_priority
= CHCR_AEAD_PRIORITY
,
3423 .cra_ctxsize
= sizeof(struct chcr_context
) +
3424 sizeof(struct chcr_aead_ctx
),
3427 .ivsize
= AES_BLOCK_SIZE
,
3428 .maxauthsize
= GHASH_DIGEST_SIZE
,
3429 .setkey
= chcr_aead_ccm_setkey
,
3430 .setauthsize
= chcr_ccm_setauthsize
,
3434 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309
,
3438 .cra_name
= "rfc4309(ccm(aes))",
3439 .cra_driver_name
= "rfc4309-ccm-aes-chcr",
3441 .cra_priority
= CHCR_AEAD_PRIORITY
+ 1,
3442 .cra_ctxsize
= sizeof(struct chcr_context
) +
3443 sizeof(struct chcr_aead_ctx
),
3447 .maxauthsize
= GHASH_DIGEST_SIZE
,
3448 .setkey
= chcr_aead_rfc4309_setkey
,
3449 .setauthsize
= chcr_4106_4309_setauthsize
,
3453 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3457 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
3459 "authenc-hmac-sha1-cbc-aes-chcr",
3460 .cra_blocksize
= AES_BLOCK_SIZE
,
3461 .cra_priority
= CHCR_AEAD_PRIORITY
,
3462 .cra_ctxsize
= sizeof(struct chcr_context
) +
3463 sizeof(struct chcr_aead_ctx
) +
3464 sizeof(struct chcr_authenc_ctx
),
3467 .ivsize
= AES_BLOCK_SIZE
,
3468 .maxauthsize
= SHA1_DIGEST_SIZE
,
3469 .setkey
= chcr_authenc_setkey
,
3470 .setauthsize
= chcr_authenc_setauthsize
,
3474 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3479 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
3481 "authenc-hmac-sha256-cbc-aes-chcr",
3482 .cra_blocksize
= AES_BLOCK_SIZE
,
3483 .cra_priority
= CHCR_AEAD_PRIORITY
,
3484 .cra_ctxsize
= sizeof(struct chcr_context
) +
3485 sizeof(struct chcr_aead_ctx
) +
3486 sizeof(struct chcr_authenc_ctx
),
3489 .ivsize
= AES_BLOCK_SIZE
,
3490 .maxauthsize
= SHA256_DIGEST_SIZE
,
3491 .setkey
= chcr_authenc_setkey
,
3492 .setauthsize
= chcr_authenc_setauthsize
,
3496 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3500 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
3502 "authenc-hmac-sha224-cbc-aes-chcr",
3503 .cra_blocksize
= AES_BLOCK_SIZE
,
3504 .cra_priority
= CHCR_AEAD_PRIORITY
,
3505 .cra_ctxsize
= sizeof(struct chcr_context
) +
3506 sizeof(struct chcr_aead_ctx
) +
3507 sizeof(struct chcr_authenc_ctx
),
3509 .ivsize
= AES_BLOCK_SIZE
,
3510 .maxauthsize
= SHA224_DIGEST_SIZE
,
3511 .setkey
= chcr_authenc_setkey
,
3512 .setauthsize
= chcr_authenc_setauthsize
,
3516 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3520 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
3522 "authenc-hmac-sha384-cbc-aes-chcr",
3523 .cra_blocksize
= AES_BLOCK_SIZE
,
3524 .cra_priority
= CHCR_AEAD_PRIORITY
,
3525 .cra_ctxsize
= sizeof(struct chcr_context
) +
3526 sizeof(struct chcr_aead_ctx
) +
3527 sizeof(struct chcr_authenc_ctx
),
3530 .ivsize
= AES_BLOCK_SIZE
,
3531 .maxauthsize
= SHA384_DIGEST_SIZE
,
3532 .setkey
= chcr_authenc_setkey
,
3533 .setauthsize
= chcr_authenc_setauthsize
,
3537 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC
,
3541 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
3543 "authenc-hmac-sha512-cbc-aes-chcr",
3544 .cra_blocksize
= AES_BLOCK_SIZE
,
3545 .cra_priority
= CHCR_AEAD_PRIORITY
,
3546 .cra_ctxsize
= sizeof(struct chcr_context
) +
3547 sizeof(struct chcr_aead_ctx
) +
3548 sizeof(struct chcr_authenc_ctx
),
3551 .ivsize
= AES_BLOCK_SIZE
,
3552 .maxauthsize
= SHA512_DIGEST_SIZE
,
3553 .setkey
= chcr_authenc_setkey
,
3554 .setauthsize
= chcr_authenc_setauthsize
,
3558 .type
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_SUB_TYPE_AEAD_NULL
,
3562 .cra_name
= "authenc(digest_null,cbc(aes))",
3564 "authenc-digest_null-cbc-aes-chcr",
3565 .cra_blocksize
= AES_BLOCK_SIZE
,
3566 .cra_priority
= CHCR_AEAD_PRIORITY
,
3567 .cra_ctxsize
= sizeof(struct chcr_context
) +
3568 sizeof(struct chcr_aead_ctx
) +
3569 sizeof(struct chcr_authenc_ctx
),
3572 .ivsize
= AES_BLOCK_SIZE
,
3574 .setkey
= chcr_aead_digest_null_setkey
,
3575 .setauthsize
= chcr_authenc_null_setauthsize
,
3581 * chcr_unregister_alg - Deregister crypto algorithms with
3584 static int chcr_unregister_alg(void)
3588 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3589 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
3590 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3591 if (driver_algs
[i
].is_registered
)
3592 crypto_unregister_alg(
3593 &driver_algs
[i
].alg
.crypto
);
3595 case CRYPTO_ALG_TYPE_AEAD
:
3596 if (driver_algs
[i
].is_registered
)
3597 crypto_unregister_aead(
3598 &driver_algs
[i
].alg
.aead
);
3600 case CRYPTO_ALG_TYPE_AHASH
:
3601 if (driver_algs
[i
].is_registered
)
3602 crypto_unregister_ahash(
3603 &driver_algs
[i
].alg
.hash
);
3606 driver_algs
[i
].is_registered
= 0;
3611 #define SZ_AHASH_CTX sizeof(struct chcr_context)
3612 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3613 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3614 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3617 * chcr_register_alg - Register crypto algorithms with kernel framework.
3619 static int chcr_register_alg(void)
3621 struct crypto_alg ai
;
3622 struct ahash_alg
*a_hash
;
3626 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3627 if (driver_algs
[i
].is_registered
)
3629 switch (driver_algs
[i
].type
& CRYPTO_ALG_TYPE_MASK
) {
3630 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3631 driver_algs
[i
].alg
.crypto
.cra_priority
=
3633 driver_algs
[i
].alg
.crypto
.cra_module
= THIS_MODULE
;
3634 driver_algs
[i
].alg
.crypto
.cra_flags
=
3635 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
3636 CRYPTO_ALG_NEED_FALLBACK
;
3637 driver_algs
[i
].alg
.crypto
.cra_ctxsize
=
3638 sizeof(struct chcr_context
) +
3639 sizeof(struct ablk_ctx
);
3640 driver_algs
[i
].alg
.crypto
.cra_alignmask
= 0;
3641 driver_algs
[i
].alg
.crypto
.cra_type
=
3642 &crypto_ablkcipher_type
;
3643 err
= crypto_register_alg(&driver_algs
[i
].alg
.crypto
);
3644 name
= driver_algs
[i
].alg
.crypto
.cra_driver_name
;
3646 case CRYPTO_ALG_TYPE_AEAD
:
3647 driver_algs
[i
].alg
.aead
.base
.cra_flags
=
3648 CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
|
3649 CRYPTO_ALG_NEED_FALLBACK
;
3650 driver_algs
[i
].alg
.aead
.encrypt
= chcr_aead_encrypt
;
3651 driver_algs
[i
].alg
.aead
.decrypt
= chcr_aead_decrypt
;
3652 driver_algs
[i
].alg
.aead
.init
= chcr_aead_cra_init
;
3653 driver_algs
[i
].alg
.aead
.exit
= chcr_aead_cra_exit
;
3654 driver_algs
[i
].alg
.aead
.base
.cra_module
= THIS_MODULE
;
3655 err
= crypto_register_aead(&driver_algs
[i
].alg
.aead
);
3656 name
= driver_algs
[i
].alg
.aead
.base
.cra_driver_name
;
3658 case CRYPTO_ALG_TYPE_AHASH
:
3659 a_hash
= &driver_algs
[i
].alg
.hash
;
3660 a_hash
->update
= chcr_ahash_update
;
3661 a_hash
->final
= chcr_ahash_final
;
3662 a_hash
->finup
= chcr_ahash_finup
;
3663 a_hash
->digest
= chcr_ahash_digest
;
3664 a_hash
->export
= chcr_ahash_export
;
3665 a_hash
->import
= chcr_ahash_import
;
3666 a_hash
->halg
.statesize
= SZ_AHASH_REQ_CTX
;
3667 a_hash
->halg
.base
.cra_priority
= CHCR_CRA_PRIORITY
;
3668 a_hash
->halg
.base
.cra_module
= THIS_MODULE
;
3669 a_hash
->halg
.base
.cra_flags
= AHASH_CRA_FLAGS
;
3670 a_hash
->halg
.base
.cra_alignmask
= 0;
3671 a_hash
->halg
.base
.cra_exit
= NULL
;
3672 a_hash
->halg
.base
.cra_type
= &crypto_ahash_type
;
3674 if (driver_algs
[i
].type
== CRYPTO_ALG_TYPE_HMAC
) {
3675 a_hash
->halg
.base
.cra_init
= chcr_hmac_cra_init
;
3676 a_hash
->halg
.base
.cra_exit
= chcr_hmac_cra_exit
;
3677 a_hash
->init
= chcr_hmac_init
;
3678 a_hash
->setkey
= chcr_ahash_setkey
;
3679 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_H_CTX
;
3681 a_hash
->init
= chcr_sha_init
;
3682 a_hash
->halg
.base
.cra_ctxsize
= SZ_AHASH_CTX
;
3683 a_hash
->halg
.base
.cra_init
= chcr_sha_cra_init
;
3685 err
= crypto_register_ahash(&driver_algs
[i
].alg
.hash
);
3686 ai
= driver_algs
[i
].alg
.hash
.halg
.base
;
3687 name
= ai
.cra_driver_name
;
3691 pr_err("chcr : %s : Algorithm registration failed\n",
3695 driver_algs
[i
].is_registered
= 1;
3701 chcr_unregister_alg();
3706 * start_crypto - Register the crypto algorithms.
3707 * This should called once when the first device comesup. After this
3708 * kernel will start calling driver APIs for crypto operations.
3710 int start_crypto(void)
3712 return chcr_register_alg();
3716 * stop_crypto - Deregister all the crypto algorithms with kernel.
3717 * This should be called once when the last device goes down. After this
3718 * kernel will not call the driver API for crypto operations.
3720 int stop_crypto(void)
3722 chcr_unregister_alg();