2 * Copyright 2016 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/crypto.h>
26 #include <linux/kthread.h>
27 #include <linux/rtnetlink.h>
28 #include <linux/sched.h>
29 #include <linux/of_address.h>
30 #include <linux/of_device.h>
32 #include <linux/bitops.h>
34 #include <crypto/algapi.h>
35 #include <crypto/aead.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/aes.h>
38 #include <crypto/des.h>
39 #include <crypto/hmac.h>
40 #include <crypto/sha.h>
41 #include <crypto/md5.h>
42 #include <crypto/authenc.h>
43 #include <crypto/skcipher.h>
44 #include <crypto/hash.h>
45 #include <crypto/sha3.h>
53 /* ================= Device Structure ================== */
55 struct device_private iproc_priv
;
57 /* ==================== Parameters ===================== */
59 int flow_debug_logging
;
60 module_param(flow_debug_logging
, int, 0644);
61 MODULE_PARM_DESC(flow_debug_logging
, "Enable Flow Debug Logging");
63 int packet_debug_logging
;
64 module_param(packet_debug_logging
, int, 0644);
65 MODULE_PARM_DESC(packet_debug_logging
, "Enable Packet Debug Logging");
67 int debug_logging_sleep
;
68 module_param(debug_logging_sleep
, int, 0644);
69 MODULE_PARM_DESC(debug_logging_sleep
, "Packet Debug Logging Sleep");
72 * The value of these module parameters is used to set the priority for each
73 * algo type when this driver registers algos with the kernel crypto API.
74 * To use a priority other than the default, set the priority in the insmod or
75 * modprobe. Changing the module priority after init time has no effect.
77 * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
78 * algos, but more preferred than generic software algos.
80 static int cipher_pri
= 150;
81 module_param(cipher_pri
, int, 0644);
82 MODULE_PARM_DESC(cipher_pri
, "Priority for cipher algos");
84 static int hash_pri
= 100;
85 module_param(hash_pri
, int, 0644);
86 MODULE_PARM_DESC(hash_pri
, "Priority for hash algos");
88 static int aead_pri
= 150;
89 module_param(aead_pri
, int, 0644);
90 MODULE_PARM_DESC(aead_pri
, "Priority for AEAD algos");
92 /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
93 * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
99 char BCMHEADER
[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
101 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
102 * is set dynamically after reading SPU type from device tree.
104 #define BCM_HDR_LEN iproc_priv.bcm_hdr_len
106 /* min and max time to sleep before retrying when mbox queue is full. usec */
107 #define MBOX_SLEEP_MIN 800
108 #define MBOX_SLEEP_MAX 1000
111 * select_channel() - Select a SPU channel to handle a crypto request. Selects
112 * channel in round robin order.
114 * Return: channel index
116 static u8
select_channel(void)
118 u8 chan_idx
= atomic_inc_return(&iproc_priv
.next_chan
);
120 return chan_idx
% iproc_priv
.spu
.num_chan
;
124 * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
125 * receive a SPU response message for an ablkcipher request. Includes buffers to
126 * catch SPU message headers and the response data.
127 * @mssg: mailbox message containing the receive sg
128 * @rctx: crypto request context
129 * @rx_frag_num: number of scatterlist elements required to hold the
130 * SPU response message
131 * @chunksize: Number of bytes of response data expected
132 * @stat_pad_len: Number of bytes required to pad the STAT field to
135 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
136 * when the request completes, whether the request is handled successfully or
144 spu_ablkcipher_rx_sg_create(struct brcm_message
*mssg
,
145 struct iproc_reqctx_s
*rctx
,
147 unsigned int chunksize
, u32 stat_pad_len
)
149 struct spu_hw
*spu
= &iproc_priv
.spu
;
150 struct scatterlist
*sg
; /* used to build sgs in mbox message */
151 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
152 u32 datalen
; /* Number of bytes of response data expected */
154 mssg
->spu
.dst
= kcalloc(rx_frag_num
, sizeof(struct scatterlist
),
160 sg_init_table(sg
, rx_frag_num
);
161 /* Space for SPU message header */
162 sg_set_buf(sg
++, rctx
->msg_buf
.spu_resp_hdr
, ctx
->spu_resp_hdr_len
);
164 /* If XTS tweak in payload, add buffer to receive encrypted tweak */
165 if ((ctx
->cipher
.mode
== CIPHER_MODE_XTS
) &&
166 spu
->spu_xts_tweak_in_payload())
167 sg_set_buf(sg
++, rctx
->msg_buf
.c
.supdt_tweak
,
170 /* Copy in each dst sg entry from request, up to chunksize */
171 datalen
= spu_msg_sg_add(&sg
, &rctx
->dst_sg
, &rctx
->dst_skip
,
172 rctx
->dst_nents
, chunksize
);
173 if (datalen
< chunksize
) {
174 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
175 __func__
, chunksize
, datalen
);
179 if (ctx
->cipher
.alg
== CIPHER_ALG_RC4
)
180 /* Add buffer to catch 260-byte SUPDT field for RC4 */
181 sg_set_buf(sg
++, rctx
->msg_buf
.c
.supdt_tweak
, SPU_SUPDT_LEN
);
184 sg_set_buf(sg
++, rctx
->msg_buf
.rx_stat_pad
, stat_pad_len
);
186 memset(rctx
->msg_buf
.rx_stat
, 0, SPU_RX_STATUS_LEN
);
187 sg_set_buf(sg
, rctx
->msg_buf
.rx_stat
, spu
->spu_rx_status_len());
193 * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
194 * send a SPU request message for an ablkcipher request. Includes SPU message
195 * headers and the request data.
196 * @mssg: mailbox message containing the transmit sg
197 * @rctx: crypto request context
198 * @tx_frag_num: number of scatterlist elements required to construct the
199 * SPU request message
200 * @chunksize: Number of bytes of request data
201 * @pad_len: Number of pad bytes
203 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
204 * when the request completes, whether the request is handled successfully or
212 spu_ablkcipher_tx_sg_create(struct brcm_message
*mssg
,
213 struct iproc_reqctx_s
*rctx
,
214 u8 tx_frag_num
, unsigned int chunksize
, u32 pad_len
)
216 struct spu_hw
*spu
= &iproc_priv
.spu
;
217 struct scatterlist
*sg
; /* used to build sgs in mbox message */
218 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
219 u32 datalen
; /* Number of bytes of response data expected */
222 mssg
->spu
.src
= kcalloc(tx_frag_num
, sizeof(struct scatterlist
),
224 if (unlikely(!mssg
->spu
.src
))
228 sg_init_table(sg
, tx_frag_num
);
230 sg_set_buf(sg
++, rctx
->msg_buf
.bcm_spu_req_hdr
,
231 BCM_HDR_LEN
+ ctx
->spu_req_hdr_len
);
233 /* if XTS tweak in payload, copy from IV (where crypto API puts it) */
234 if ((ctx
->cipher
.mode
== CIPHER_MODE_XTS
) &&
235 spu
->spu_xts_tweak_in_payload())
236 sg_set_buf(sg
++, rctx
->msg_buf
.iv_ctr
, SPU_XTS_TWEAK_SIZE
);
238 /* Copy in each src sg entry from request, up to chunksize */
239 datalen
= spu_msg_sg_add(&sg
, &rctx
->src_sg
, &rctx
->src_skip
,
240 rctx
->src_nents
, chunksize
);
241 if (unlikely(datalen
< chunksize
)) {
242 pr_err("%s(): failed to copy src sg to mbox msg",
248 sg_set_buf(sg
++, rctx
->msg_buf
.spu_req_pad
, pad_len
);
250 stat_len
= spu
->spu_tx_status_len();
252 memset(rctx
->msg_buf
.tx_stat
, 0, stat_len
);
253 sg_set_buf(sg
, rctx
->msg_buf
.tx_stat
, stat_len
);
258 static int mailbox_send_message(struct brcm_message
*mssg
, u32 flags
,
263 struct device
*dev
= &(iproc_priv
.pdev
->dev
);
265 err
= mbox_send_message(iproc_priv
.mbox
[chan_idx
], mssg
);
266 if (flags
& CRYPTO_TFM_REQ_MAY_SLEEP
) {
267 while ((err
== -ENOBUFS
) && (retry_cnt
< SPU_MB_RETRY_MAX
)) {
269 * Mailbox queue is full. Since MAY_SLEEP is set, assume
270 * not in atomic context and we can wait and try again.
273 usleep_range(MBOX_SLEEP_MIN
, MBOX_SLEEP_MAX
);
274 err
= mbox_send_message(iproc_priv
.mbox
[chan_idx
],
276 atomic_inc(&iproc_priv
.mb_no_spc
);
280 atomic_inc(&iproc_priv
.mb_send_fail
);
284 /* Check error returned by mailbox controller */
286 if (unlikely(err
< 0)) {
287 dev_err(dev
, "message error %d", err
);
288 /* Signal txdone for mailbox channel */
291 /* Signal txdone for mailbox channel */
292 mbox_client_txdone(iproc_priv
.mbox
[chan_idx
], err
);
297 * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
298 * a single SPU request message, starting at the current position in the request
300 * @rctx: Crypto request context
302 * This may be called on the crypto API thread, or, when a request is so large
303 * it must be broken into multiple SPU messages, on the thread used to invoke
304 * the response callback. When requests are broken into multiple SPU
305 * messages, we assume subsequent messages depend on previous results, and
306 * thus always wait for previous results before submitting the next message.
307 * Because requests are submitted in lock step like this, there is no need
308 * to synchronize access to request data structures.
310 * Return: -EINPROGRESS: request has been accepted and result will be returned
312 * Any other value indicates an error
314 static int handle_ablkcipher_req(struct iproc_reqctx_s
*rctx
)
316 struct spu_hw
*spu
= &iproc_priv
.spu
;
317 struct crypto_async_request
*areq
= rctx
->parent
;
318 struct ablkcipher_request
*req
=
319 container_of(areq
, struct ablkcipher_request
, base
);
320 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
321 struct spu_cipher_parms cipher_parms
;
323 unsigned int chunksize
= 0; /* Num bytes of request to submit */
324 int remaining
= 0; /* Bytes of request still to process */
325 int chunk_start
; /* Beginning of data for current SPU msg */
327 /* IV or ctr value to use in this SPU msg */
328 u8 local_iv_ctr
[MAX_IV_SIZE
];
329 u32 stat_pad_len
; /* num bytes to align status field */
330 u32 pad_len
; /* total length of all padding */
331 bool update_key
= false;
332 struct brcm_message
*mssg
; /* mailbox message */
334 /* number of entries in src and dst sg in mailbox message. */
335 u8 rx_frag_num
= 2; /* response header and STATUS */
336 u8 tx_frag_num
= 1; /* request header */
338 flow_log("%s\n", __func__
);
340 cipher_parms
.alg
= ctx
->cipher
.alg
;
341 cipher_parms
.mode
= ctx
->cipher
.mode
;
342 cipher_parms
.type
= ctx
->cipher_type
;
343 cipher_parms
.key_len
= ctx
->enckeylen
;
344 cipher_parms
.key_buf
= ctx
->enckey
;
345 cipher_parms
.iv_buf
= local_iv_ctr
;
346 cipher_parms
.iv_len
= rctx
->iv_ctr_len
;
348 mssg
= &rctx
->mb_mssg
;
349 chunk_start
= rctx
->src_sent
;
350 remaining
= rctx
->total_todo
- chunk_start
;
352 /* determine the chunk we are breaking off and update the indexes */
353 if ((ctx
->max_payload
!= SPU_MAX_PAYLOAD_INF
) &&
354 (remaining
> ctx
->max_payload
))
355 chunksize
= ctx
->max_payload
;
357 chunksize
= remaining
;
359 rctx
->src_sent
+= chunksize
;
360 rctx
->total_sent
= rctx
->src_sent
;
362 /* Count number of sg entries to be included in this request */
363 rctx
->src_nents
= spu_sg_count(rctx
->src_sg
, rctx
->src_skip
, chunksize
);
364 rctx
->dst_nents
= spu_sg_count(rctx
->dst_sg
, rctx
->dst_skip
, chunksize
);
366 if ((ctx
->cipher
.mode
== CIPHER_MODE_CBC
) &&
367 rctx
->is_encrypt
&& chunk_start
)
369 * Encrypting non-first first chunk. Copy last block of
370 * previous result to IV for this chunk.
372 sg_copy_part_to_buf(req
->dst
, rctx
->msg_buf
.iv_ctr
,
374 chunk_start
- rctx
->iv_ctr_len
);
376 if (rctx
->iv_ctr_len
) {
377 /* get our local copy of the iv */
378 __builtin_memcpy(local_iv_ctr
, rctx
->msg_buf
.iv_ctr
,
381 /* generate the next IV if possible */
382 if ((ctx
->cipher
.mode
== CIPHER_MODE_CBC
) &&
385 * CBC Decrypt: next IV is the last ciphertext block in
388 sg_copy_part_to_buf(req
->src
, rctx
->msg_buf
.iv_ctr
,
390 rctx
->src_sent
- rctx
->iv_ctr_len
);
391 } else if (ctx
->cipher
.mode
== CIPHER_MODE_CTR
) {
393 * The SPU hardware increments the counter once for
394 * each AES block of 16 bytes. So update the counter
395 * for the next chunk, if there is one. Note that for
396 * this chunk, the counter has already been copied to
397 * local_iv_ctr. We can assume a block size of 16,
398 * because we only support CTR mode for AES, not for
399 * any other cipher alg.
401 add_to_ctr(rctx
->msg_buf
.iv_ctr
, chunksize
>> 4);
405 if (ctx
->cipher
.alg
== CIPHER_ALG_RC4
) {
409 * for non-first RC4 chunks, use SUPDT from previous
410 * response as key for this chunk.
412 cipher_parms
.key_buf
= rctx
->msg_buf
.c
.supdt_tweak
;
414 cipher_parms
.type
= CIPHER_TYPE_UPDT
;
415 } else if (!rctx
->is_encrypt
) {
417 * First RC4 chunk. For decrypt, key in pre-built msg
418 * header may have been changed if encrypt required
419 * multiple chunks. So revert the key to the
423 cipher_parms
.type
= CIPHER_TYPE_INIT
;
427 if (ctx
->max_payload
== SPU_MAX_PAYLOAD_INF
)
428 flow_log("max_payload infinite\n");
430 flow_log("max_payload %u\n", ctx
->max_payload
);
432 flow_log("sent:%u start:%u remains:%u size:%u\n",
433 rctx
->src_sent
, chunk_start
, remaining
, chunksize
);
435 /* Copy SPU header template created at setkey time */
436 memcpy(rctx
->msg_buf
.bcm_spu_req_hdr
, ctx
->bcm_spu_req_hdr
,
437 sizeof(rctx
->msg_buf
.bcm_spu_req_hdr
));
440 * Pass SUPDT field as key. Key field in finish() call is only used
441 * when update_key has been set above for RC4. Will be ignored in
444 spu
->spu_cipher_req_finish(rctx
->msg_buf
.bcm_spu_req_hdr
+ BCM_HDR_LEN
,
445 ctx
->spu_req_hdr_len
, !(rctx
->is_encrypt
),
446 &cipher_parms
, update_key
, chunksize
);
448 atomic64_add(chunksize
, &iproc_priv
.bytes_out
);
450 stat_pad_len
= spu
->spu_wordalign_padlen(chunksize
);
453 pad_len
= stat_pad_len
;
456 spu
->spu_request_pad(rctx
->msg_buf
.spu_req_pad
, 0,
457 0, ctx
->auth
.alg
, ctx
->auth
.mode
,
458 rctx
->total_sent
, stat_pad_len
);
461 spu
->spu_dump_msg_hdr(rctx
->msg_buf
.bcm_spu_req_hdr
+ BCM_HDR_LEN
,
462 ctx
->spu_req_hdr_len
);
463 packet_log("payload:\n");
464 dump_sg(rctx
->src_sg
, rctx
->src_skip
, chunksize
);
465 packet_dump(" pad: ", rctx
->msg_buf
.spu_req_pad
, pad_len
);
468 * Build mailbox message containing SPU request msg and rx buffers
469 * to catch response message
471 memset(mssg
, 0, sizeof(*mssg
));
472 mssg
->type
= BRCM_MESSAGE_SPU
;
473 mssg
->ctx
= rctx
; /* Will be returned in response */
475 /* Create rx scatterlist to catch result */
476 rx_frag_num
+= rctx
->dst_nents
;
478 if ((ctx
->cipher
.mode
== CIPHER_MODE_XTS
) &&
479 spu
->spu_xts_tweak_in_payload())
480 rx_frag_num
++; /* extra sg to insert tweak */
482 err
= spu_ablkcipher_rx_sg_create(mssg
, rctx
, rx_frag_num
, chunksize
,
487 /* Create tx scatterlist containing SPU request message */
488 tx_frag_num
+= rctx
->src_nents
;
489 if (spu
->spu_tx_status_len())
492 if ((ctx
->cipher
.mode
== CIPHER_MODE_XTS
) &&
493 spu
->spu_xts_tweak_in_payload())
494 tx_frag_num
++; /* extra sg to insert tweak */
496 err
= spu_ablkcipher_tx_sg_create(mssg
, rctx
, tx_frag_num
, chunksize
,
501 err
= mailbox_send_message(mssg
, req
->base
.flags
, rctx
->chan_idx
);
502 if (unlikely(err
< 0))
509 * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
510 * total received count for the request and updates global stats.
511 * @rctx: Crypto request context
513 static void handle_ablkcipher_resp(struct iproc_reqctx_s
*rctx
)
515 struct spu_hw
*spu
= &iproc_priv
.spu
;
517 struct crypto_async_request
*areq
= rctx
->parent
;
518 struct ablkcipher_request
*req
= ablkcipher_request_cast(areq
);
520 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
523 /* See how much data was returned */
524 payload_len
= spu
->spu_payload_length(rctx
->msg_buf
.spu_resp_hdr
);
527 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
528 * encrypted tweak ("i") value; we don't count those.
530 if ((ctx
->cipher
.mode
== CIPHER_MODE_XTS
) &&
531 spu
->spu_xts_tweak_in_payload() &&
532 (payload_len
>= SPU_XTS_TWEAK_SIZE
))
533 payload_len
-= SPU_XTS_TWEAK_SIZE
;
535 atomic64_add(payload_len
, &iproc_priv
.bytes_in
);
537 flow_log("%s() offset: %u, bd_len: %u BD:\n",
538 __func__
, rctx
->total_received
, payload_len
);
540 dump_sg(req
->dst
, rctx
->total_received
, payload_len
);
541 if (ctx
->cipher
.alg
== CIPHER_ALG_RC4
)
542 packet_dump(" supdt ", rctx
->msg_buf
.c
.supdt_tweak
,
545 rctx
->total_received
+= payload_len
;
546 if (rctx
->total_received
== rctx
->total_todo
) {
547 atomic_inc(&iproc_priv
.op_counts
[SPU_OP_CIPHER
]);
549 &iproc_priv
.cipher_cnt
[ctx
->cipher
.alg
][ctx
->cipher
.mode
]);
554 * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
555 * receive a SPU response message for an ahash request.
556 * @mssg: mailbox message containing the receive sg
557 * @rctx: crypto request context
558 * @rx_frag_num: number of scatterlist elements required to hold the
559 * SPU response message
560 * @digestsize: length of hash digest, in bytes
561 * @stat_pad_len: Number of bytes required to pad the STAT field to
564 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
565 * when the request completes, whether the request is handled successfully or
573 spu_ahash_rx_sg_create(struct brcm_message
*mssg
,
574 struct iproc_reqctx_s
*rctx
,
575 u8 rx_frag_num
, unsigned int digestsize
,
578 struct spu_hw
*spu
= &iproc_priv
.spu
;
579 struct scatterlist
*sg
; /* used to build sgs in mbox message */
580 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
582 mssg
->spu
.dst
= kcalloc(rx_frag_num
, sizeof(struct scatterlist
),
588 sg_init_table(sg
, rx_frag_num
);
589 /* Space for SPU message header */
590 sg_set_buf(sg
++, rctx
->msg_buf
.spu_resp_hdr
, ctx
->spu_resp_hdr_len
);
592 /* Space for digest */
593 sg_set_buf(sg
++, rctx
->msg_buf
.digest
, digestsize
);
596 sg_set_buf(sg
++, rctx
->msg_buf
.rx_stat_pad
, stat_pad_len
);
598 memset(rctx
->msg_buf
.rx_stat
, 0, SPU_RX_STATUS_LEN
);
599 sg_set_buf(sg
, rctx
->msg_buf
.rx_stat
, spu
->spu_rx_status_len());
604 * spu_ahash_tx_sg_create() - Build up the scatterlist of buffers used to send
605 * a SPU request message for an ahash request. Includes SPU message headers and
607 * @mssg: mailbox message containing the transmit sg
608 * @rctx: crypto request context
609 * @tx_frag_num: number of scatterlist elements required to construct the
610 * SPU request message
611 * @spu_hdr_len: length in bytes of SPU message header
612 * @hash_carry_len: Number of bytes of data carried over from previous req
613 * @new_data_len: Number of bytes of new request data
614 * @pad_len: Number of pad bytes
616 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
617 * when the request completes, whether the request is handled successfully or
625 spu_ahash_tx_sg_create(struct brcm_message
*mssg
,
626 struct iproc_reqctx_s
*rctx
,
629 unsigned int hash_carry_len
,
630 unsigned int new_data_len
, u32 pad_len
)
632 struct spu_hw
*spu
= &iproc_priv
.spu
;
633 struct scatterlist
*sg
; /* used to build sgs in mbox message */
634 u32 datalen
; /* Number of bytes of response data expected */
637 mssg
->spu
.src
= kcalloc(tx_frag_num
, sizeof(struct scatterlist
),
643 sg_init_table(sg
, tx_frag_num
);
645 sg_set_buf(sg
++, rctx
->msg_buf
.bcm_spu_req_hdr
,
646 BCM_HDR_LEN
+ spu_hdr_len
);
649 sg_set_buf(sg
++, rctx
->hash_carry
, hash_carry_len
);
652 /* Copy in each src sg entry from request, up to chunksize */
653 datalen
= spu_msg_sg_add(&sg
, &rctx
->src_sg
, &rctx
->src_skip
,
654 rctx
->src_nents
, new_data_len
);
655 if (datalen
< new_data_len
) {
656 pr_err("%s(): failed to copy src sg to mbox msg",
663 sg_set_buf(sg
++, rctx
->msg_buf
.spu_req_pad
, pad_len
);
665 stat_len
= spu
->spu_tx_status_len();
667 memset(rctx
->msg_buf
.tx_stat
, 0, stat_len
);
668 sg_set_buf(sg
, rctx
->msg_buf
.tx_stat
, stat_len
);
675 * handle_ahash_req() - Process an asynchronous hash request from the crypto
677 * @rctx: Crypto request context
679 * Builds a SPU request message embedded in a mailbox message and submits the
680 * mailbox message on a selected mailbox channel. The SPU request message is
681 * constructed as a scatterlist, including entries from the crypto API's
682 * src scatterlist to avoid copying the data to be hashed. This function is
683 * called either on the thread from the crypto API, or, in the case that the
684 * crypto API request is too large to fit in a single SPU request message,
685 * on the thread that invokes the receive callback with a response message.
686 * Because some operations require the response from one chunk before the next
687 * chunk can be submitted, we always wait for the response for the previous
688 * chunk before submitting the next chunk. Because requests are submitted in
689 * lock step like this, there is no need to synchronize access to request data
693 * -EINPROGRESS: request has been submitted to SPU and response will be
694 * returned asynchronously
695 * -EAGAIN: non-final request included a small amount of data, which for
696 * efficiency we did not submit to the SPU, but instead stored
697 * to be submitted to the SPU with the next part of the request
698 * other: an error code
700 static int handle_ahash_req(struct iproc_reqctx_s
*rctx
)
702 struct spu_hw
*spu
= &iproc_priv
.spu
;
703 struct crypto_async_request
*areq
= rctx
->parent
;
704 struct ahash_request
*req
= ahash_request_cast(areq
);
705 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
706 struct crypto_tfm
*tfm
= crypto_ahash_tfm(ahash
);
707 unsigned int blocksize
= crypto_tfm_alg_blocksize(tfm
);
708 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
710 /* number of bytes still to be hashed in this req */
711 unsigned int nbytes_to_hash
= 0;
713 unsigned int chunksize
= 0; /* length of hash carry + new data */
715 * length of new data, not from hash carry, to be submitted in
718 unsigned int new_data_len
;
720 unsigned int chunk_start
= 0;
721 u32 db_size
; /* Length of data field, incl gcm and hash padding */
722 int pad_len
= 0; /* total pad len, including gcm, hash, stat padding */
723 u32 data_pad_len
= 0; /* length of GCM/CCM padding */
724 u32 stat_pad_len
= 0; /* length of padding to align STATUS word */
725 struct brcm_message
*mssg
; /* mailbox message */
726 struct spu_request_opts req_opts
;
727 struct spu_cipher_parms cipher_parms
;
728 struct spu_hash_parms hash_parms
;
729 struct spu_aead_parms aead_parms
;
730 unsigned int local_nbuf
;
732 unsigned int digestsize
;
736 * number of entries in src and dst sg. Always includes SPU msg header.
737 * rx always includes a buffer to catch digest and STATUS.
742 flow_log("total_todo %u, total_sent %u\n",
743 rctx
->total_todo
, rctx
->total_sent
);
745 memset(&req_opts
, 0, sizeof(req_opts
));
746 memset(&cipher_parms
, 0, sizeof(cipher_parms
));
747 memset(&hash_parms
, 0, sizeof(hash_parms
));
748 memset(&aead_parms
, 0, sizeof(aead_parms
));
750 req_opts
.bd_suppress
= true;
751 hash_parms
.alg
= ctx
->auth
.alg
;
752 hash_parms
.mode
= ctx
->auth
.mode
;
753 hash_parms
.type
= HASH_TYPE_NONE
;
754 hash_parms
.key_buf
= (u8
*)ctx
->authkey
;
755 hash_parms
.key_len
= ctx
->authkeylen
;
758 * For hash algorithms below assignment looks bit odd but
759 * it's needed for AES-XCBC and AES-CMAC hash algorithms
760 * to differentiate between 128, 192, 256 bit key values.
761 * Based on the key values, hash algorithm is selected.
762 * For example for 128 bit key, hash algorithm is AES-128.
764 cipher_parms
.type
= ctx
->cipher_type
;
766 mssg
= &rctx
->mb_mssg
;
767 chunk_start
= rctx
->src_sent
;
770 * Compute the amount remaining to hash. This may include data
771 * carried over from previous requests.
773 nbytes_to_hash
= rctx
->total_todo
- rctx
->total_sent
;
774 chunksize
= nbytes_to_hash
;
775 if ((ctx
->max_payload
!= SPU_MAX_PAYLOAD_INF
) &&
776 (chunksize
> ctx
->max_payload
))
777 chunksize
= ctx
->max_payload
;
780 * If this is not a final request and the request data is not a multiple
781 * of a full block, then simply park the extra data and prefix it to the
782 * data for the next request.
784 if (!rctx
->is_final
) {
785 u8
*dest
= rctx
->hash_carry
+ rctx
->hash_carry_len
;
786 u16 new_len
; /* len of data to add to hash carry */
788 rem
= chunksize
% blocksize
; /* remainder */
790 /* chunksize not a multiple of blocksize */
792 if (chunksize
== 0) {
793 /* Don't have a full block to submit to hw */
794 new_len
= rem
- rctx
->hash_carry_len
;
795 sg_copy_part_to_buf(req
->src
, dest
, new_len
,
797 rctx
->hash_carry_len
= rem
;
798 flow_log("Exiting with hash carry len: %u\n",
799 rctx
->hash_carry_len
);
800 packet_dump(" buf: ",
802 rctx
->hash_carry_len
);
808 /* if we have hash carry, then prefix it to the data in this request */
809 local_nbuf
= rctx
->hash_carry_len
;
810 rctx
->hash_carry_len
= 0;
813 new_data_len
= chunksize
- local_nbuf
;
815 /* Count number of sg entries to be used in this request */
816 rctx
->src_nents
= spu_sg_count(rctx
->src_sg
, rctx
->src_skip
,
819 /* AES hashing keeps key size in type field, so need to copy it here */
820 if (hash_parms
.alg
== HASH_ALG_AES
)
821 hash_parms
.type
= cipher_parms
.type
;
823 hash_parms
.type
= spu
->spu_hash_type(rctx
->total_sent
);
825 digestsize
= spu
->spu_digest_size(ctx
->digestsize
, ctx
->auth
.alg
,
827 hash_parms
.digestsize
= digestsize
;
829 /* update the indexes */
830 rctx
->total_sent
+= chunksize
;
831 /* if you sent a prebuf then that wasn't from this req->src */
832 rctx
->src_sent
+= new_data_len
;
834 if ((rctx
->total_sent
== rctx
->total_todo
) && rctx
->is_final
)
835 hash_parms
.pad_len
= spu
->spu_hash_pad_len(hash_parms
.alg
,
841 * If a non-first chunk, then include the digest returned from the
842 * previous chunk so that hw can add to it (except for AES types).
844 if ((hash_parms
.type
== HASH_TYPE_UPDT
) &&
845 (hash_parms
.alg
!= HASH_ALG_AES
)) {
846 hash_parms
.key_buf
= rctx
->incr_hash
;
847 hash_parms
.key_len
= digestsize
;
850 atomic64_add(chunksize
, &iproc_priv
.bytes_out
);
852 flow_log("%s() final: %u nbuf: %u ",
853 __func__
, rctx
->is_final
, local_nbuf
);
855 if (ctx
->max_payload
== SPU_MAX_PAYLOAD_INF
)
856 flow_log("max_payload infinite\n");
858 flow_log("max_payload %u\n", ctx
->max_payload
);
860 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start
, chunksize
);
862 /* Prepend SPU header with type 3 BCM header */
863 memcpy(rctx
->msg_buf
.bcm_spu_req_hdr
, BCMHEADER
, BCM_HDR_LEN
);
865 hash_parms
.prebuf_len
= local_nbuf
;
866 spu_hdr_len
= spu
->spu_create_request(rctx
->msg_buf
.bcm_spu_req_hdr
+
868 &req_opts
, &cipher_parms
,
869 &hash_parms
, &aead_parms
,
872 if (spu_hdr_len
== 0) {
873 pr_err("Failed to create SPU request header\n");
878 * Determine total length of padding required. Put all padding in one
881 data_pad_len
= spu
->spu_gcm_ccm_pad_len(ctx
->cipher
.mode
, chunksize
);
882 db_size
= spu_real_db_size(0, 0, local_nbuf
, new_data_len
,
883 0, 0, hash_parms
.pad_len
);
884 if (spu
->spu_tx_status_len())
885 stat_pad_len
= spu
->spu_wordalign_padlen(db_size
);
888 pad_len
= hash_parms
.pad_len
+ data_pad_len
+ stat_pad_len
;
891 spu
->spu_request_pad(rctx
->msg_buf
.spu_req_pad
, data_pad_len
,
892 hash_parms
.pad_len
, ctx
->auth
.alg
,
893 ctx
->auth
.mode
, rctx
->total_sent
,
897 spu
->spu_dump_msg_hdr(rctx
->msg_buf
.bcm_spu_req_hdr
+ BCM_HDR_LEN
,
899 packet_dump(" prebuf: ", rctx
->hash_carry
, local_nbuf
);
901 dump_sg(rctx
->src_sg
, rctx
->src_skip
, new_data_len
);
902 packet_dump(" pad: ", rctx
->msg_buf
.spu_req_pad
, pad_len
);
905 * Build mailbox message containing SPU request msg and rx buffers
906 * to catch response message
908 memset(mssg
, 0, sizeof(*mssg
));
909 mssg
->type
= BRCM_MESSAGE_SPU
;
910 mssg
->ctx
= rctx
; /* Will be returned in response */
912 /* Create rx scatterlist to catch result */
913 err
= spu_ahash_rx_sg_create(mssg
, rctx
, rx_frag_num
, digestsize
,
918 /* Create tx scatterlist containing SPU request message */
919 tx_frag_num
+= rctx
->src_nents
;
920 if (spu
->spu_tx_status_len())
922 err
= spu_ahash_tx_sg_create(mssg
, rctx
, tx_frag_num
, spu_hdr_len
,
923 local_nbuf
, new_data_len
, pad_len
);
927 err
= mailbox_send_message(mssg
, req
->base
.flags
, rctx
->chan_idx
);
928 if (unlikely(err
< 0))
935 * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
936 * for an HMAC request.
937 * @req: The HMAC request from the crypto API
938 * @ctx: The session context
940 * Return: 0 if synchronous hash operation successful
941 * -EINVAL if the hash algo is unrecognized
942 * any other value indicates an error
944 static int spu_hmac_outer_hash(struct ahash_request
*req
,
945 struct iproc_ctx_s
*ctx
)
947 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
948 unsigned int blocksize
=
949 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash
));
952 switch (ctx
->auth
.alg
) {
954 rc
= do_shash("md5", req
->result
, ctx
->opad
, blocksize
,
955 req
->result
, ctx
->digestsize
, NULL
, 0);
958 rc
= do_shash("sha1", req
->result
, ctx
->opad
, blocksize
,
959 req
->result
, ctx
->digestsize
, NULL
, 0);
961 case HASH_ALG_SHA224
:
962 rc
= do_shash("sha224", req
->result
, ctx
->opad
, blocksize
,
963 req
->result
, ctx
->digestsize
, NULL
, 0);
965 case HASH_ALG_SHA256
:
966 rc
= do_shash("sha256", req
->result
, ctx
->opad
, blocksize
,
967 req
->result
, ctx
->digestsize
, NULL
, 0);
969 case HASH_ALG_SHA384
:
970 rc
= do_shash("sha384", req
->result
, ctx
->opad
, blocksize
,
971 req
->result
, ctx
->digestsize
, NULL
, 0);
973 case HASH_ALG_SHA512
:
974 rc
= do_shash("sha512", req
->result
, ctx
->opad
, blocksize
,
975 req
->result
, ctx
->digestsize
, NULL
, 0);
978 pr_err("%s() Error : unknown hmac type\n", __func__
);
985 * ahash_req_done() - Process a hash result from the SPU hardware.
986 * @rctx: Crypto request context
988 * Return: 0 if successful
991 static int ahash_req_done(struct iproc_reqctx_s
*rctx
)
993 struct spu_hw
*spu
= &iproc_priv
.spu
;
994 struct crypto_async_request
*areq
= rctx
->parent
;
995 struct ahash_request
*req
= ahash_request_cast(areq
);
996 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
999 memcpy(req
->result
, rctx
->msg_buf
.digest
, ctx
->digestsize
);
1001 if (spu
->spu_type
== SPU_TYPE_SPUM
) {
1002 /* byte swap the output from the UPDT function to network byte
1005 if (ctx
->auth
.alg
== HASH_ALG_MD5
) {
1006 __swab32s((u32
*)req
->result
);
1007 __swab32s(((u32
*)req
->result
) + 1);
1008 __swab32s(((u32
*)req
->result
) + 2);
1009 __swab32s(((u32
*)req
->result
) + 3);
1010 __swab32s(((u32
*)req
->result
) + 4);
1014 flow_dump(" digest ", req
->result
, ctx
->digestsize
);
1016 /* if this an HMAC then do the outer hash */
1017 if (rctx
->is_sw_hmac
) {
1018 err
= spu_hmac_outer_hash(req
, ctx
);
1021 flow_dump(" hmac: ", req
->result
, ctx
->digestsize
);
1024 if (rctx
->is_sw_hmac
|| ctx
->auth
.mode
== HASH_MODE_HMAC
) {
1025 atomic_inc(&iproc_priv
.op_counts
[SPU_OP_HMAC
]);
1026 atomic_inc(&iproc_priv
.hmac_cnt
[ctx
->auth
.alg
]);
1028 atomic_inc(&iproc_priv
.op_counts
[SPU_OP_HASH
]);
1029 atomic_inc(&iproc_priv
.hash_cnt
[ctx
->auth
.alg
]);
1036 * handle_ahash_resp() - Process a SPU response message for a hash request.
1037 * Checks if the entire crypto API request has been processed, and if so,
1038 * invokes post processing on the result.
1039 * @rctx: Crypto request context
1041 static void handle_ahash_resp(struct iproc_reqctx_s
*rctx
)
1043 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
1045 struct crypto_async_request
*areq
= rctx
->parent
;
1046 struct ahash_request
*req
= ahash_request_cast(areq
);
1047 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1048 unsigned int blocksize
=
1049 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash
));
1052 * Save hash to use as input to next op if incremental. Might be copying
1053 * too much, but that's easier than figuring out actual digest size here
1055 memcpy(rctx
->incr_hash
, rctx
->msg_buf
.digest
, MAX_DIGEST_SIZE
);
1057 flow_log("%s() blocksize:%u digestsize:%u\n",
1058 __func__
, blocksize
, ctx
->digestsize
);
1060 atomic64_add(ctx
->digestsize
, &iproc_priv
.bytes_in
);
1062 if (rctx
->is_final
&& (rctx
->total_sent
== rctx
->total_todo
))
1063 ahash_req_done(rctx
);
1067 * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1068 * a SPU response message for an AEAD request. Includes buffers to catch SPU
1069 * message headers and the response data.
1070 * @mssg: mailbox message containing the receive sg
1071 * @rctx: crypto request context
1072 * @rx_frag_num: number of scatterlist elements required to hold the
1073 * SPU response message
1074 * @assoc_len: Length of associated data included in the crypto request
1075 * @ret_iv_len: Length of IV returned in response
1076 * @resp_len: Number of bytes of response data expected to be written to
1077 * dst buffer from crypto API
1078 * @digestsize: Length of hash digest, in bytes
1079 * @stat_pad_len: Number of bytes required to pad the STAT field to
1082 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1083 * when the request completes, whether the request is handled successfully or
1084 * there is an error.
1090 static int spu_aead_rx_sg_create(struct brcm_message
*mssg
,
1091 struct aead_request
*req
,
1092 struct iproc_reqctx_s
*rctx
,
1094 unsigned int assoc_len
,
1095 u32 ret_iv_len
, unsigned int resp_len
,
1096 unsigned int digestsize
, u32 stat_pad_len
)
1098 struct spu_hw
*spu
= &iproc_priv
.spu
;
1099 struct scatterlist
*sg
; /* used to build sgs in mbox message */
1100 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
1101 u32 datalen
; /* Number of bytes of response data expected */
1105 if (ctx
->is_rfc4543
) {
1106 /* RFC4543: only pad after data, not after AAD */
1107 data_padlen
= spu
->spu_gcm_ccm_pad_len(ctx
->cipher
.mode
,
1108 assoc_len
+ resp_len
);
1109 assoc_buf_len
= assoc_len
;
1111 data_padlen
= spu
->spu_gcm_ccm_pad_len(ctx
->cipher
.mode
,
1113 assoc_buf_len
= spu
->spu_assoc_resp_len(ctx
->cipher
.mode
,
1114 assoc_len
, ret_iv_len
,
1118 if (ctx
->cipher
.mode
== CIPHER_MODE_CCM
)
1119 /* ICV (after data) must be in the next 32-bit word for CCM */
1120 data_padlen
+= spu
->spu_wordalign_padlen(assoc_buf_len
+
1125 /* have to catch gcm pad in separate buffer */
1128 mssg
->spu
.dst
= kcalloc(rx_frag_num
, sizeof(struct scatterlist
),
1134 sg_init_table(sg
, rx_frag_num
);
1136 /* Space for SPU message header */
1137 sg_set_buf(sg
++, rctx
->msg_buf
.spu_resp_hdr
, ctx
->spu_resp_hdr_len
);
1139 if (assoc_buf_len
) {
1141 * Don't write directly to req->dst, because SPU may pad the
1142 * assoc data in the response
1144 memset(rctx
->msg_buf
.a
.resp_aad
, 0, assoc_buf_len
);
1145 sg_set_buf(sg
++, rctx
->msg_buf
.a
.resp_aad
, assoc_buf_len
);
1150 * Copy in each dst sg entry from request, up to chunksize.
1151 * dst sg catches just the data. digest caught in separate buf.
1153 datalen
= spu_msg_sg_add(&sg
, &rctx
->dst_sg
, &rctx
->dst_skip
,
1154 rctx
->dst_nents
, resp_len
);
1155 if (datalen
< (resp_len
)) {
1156 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1157 __func__
, resp_len
, datalen
);
1162 /* If GCM/CCM data is padded, catch padding in separate buffer */
1164 memset(rctx
->msg_buf
.a
.gcmpad
, 0, data_padlen
);
1165 sg_set_buf(sg
++, rctx
->msg_buf
.a
.gcmpad
, data_padlen
);
1168 /* Always catch ICV in separate buffer */
1169 sg_set_buf(sg
++, rctx
->msg_buf
.digest
, digestsize
);
1171 flow_log("stat_pad_len %u\n", stat_pad_len
);
1173 memset(rctx
->msg_buf
.rx_stat_pad
, 0, stat_pad_len
);
1174 sg_set_buf(sg
++, rctx
->msg_buf
.rx_stat_pad
, stat_pad_len
);
1177 memset(rctx
->msg_buf
.rx_stat
, 0, SPU_RX_STATUS_LEN
);
1178 sg_set_buf(sg
, rctx
->msg_buf
.rx_stat
, spu
->spu_rx_status_len());
1184 * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1185 * SPU request message for an AEAD request. Includes SPU message headers and the
1187 * @mssg: mailbox message containing the transmit sg
1188 * @rctx: crypto request context
1189 * @tx_frag_num: number of scatterlist elements required to construct the
1190 * SPU request message
1191 * @spu_hdr_len: length of SPU message header in bytes
1192 * @assoc: crypto API associated data scatterlist
1193 * @assoc_len: length of associated data
1194 * @assoc_nents: number of scatterlist entries containing assoc data
1195 * @aead_iv_len: length of AEAD IV, if included
1196 * @chunksize: Number of bytes of request data
1197 * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1198 * @pad_len: Number of pad bytes
1199 * @incl_icv: If true, write separate ICV buffer after data and
1202 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1203 * when the request completes, whether the request is handled successfully or
1204 * there is an error.
1210 static int spu_aead_tx_sg_create(struct brcm_message
*mssg
,
1211 struct iproc_reqctx_s
*rctx
,
1214 struct scatterlist
*assoc
,
1215 unsigned int assoc_len
,
1217 unsigned int aead_iv_len
,
1218 unsigned int chunksize
,
1219 u32 aad_pad_len
, u32 pad_len
, bool incl_icv
)
1221 struct spu_hw
*spu
= &iproc_priv
.spu
;
1222 struct scatterlist
*sg
; /* used to build sgs in mbox message */
1223 struct scatterlist
*assoc_sg
= assoc
;
1224 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
1225 u32 datalen
; /* Number of bytes of data to write */
1226 u32 written
; /* Number of bytes of data written */
1227 u32 assoc_offset
= 0;
1230 mssg
->spu
.src
= kcalloc(tx_frag_num
, sizeof(struct scatterlist
),
1236 sg_init_table(sg
, tx_frag_num
);
1238 sg_set_buf(sg
++, rctx
->msg_buf
.bcm_spu_req_hdr
,
1239 BCM_HDR_LEN
+ spu_hdr_len
);
1242 /* Copy in each associated data sg entry from request */
1243 written
= spu_msg_sg_add(&sg
, &assoc_sg
, &assoc_offset
,
1244 assoc_nents
, assoc_len
);
1245 if (written
< assoc_len
) {
1246 pr_err("%s(): failed to copy assoc sg to mbox msg",
1253 sg_set_buf(sg
++, rctx
->msg_buf
.iv_ctr
, aead_iv_len
);
1256 memset(rctx
->msg_buf
.a
.req_aad_pad
, 0, aad_pad_len
);
1257 sg_set_buf(sg
++, rctx
->msg_buf
.a
.req_aad_pad
, aad_pad_len
);
1260 datalen
= chunksize
;
1261 if ((chunksize
> ctx
->digestsize
) && incl_icv
)
1262 datalen
-= ctx
->digestsize
;
1264 /* For aead, a single msg should consume the entire src sg */
1265 written
= spu_msg_sg_add(&sg
, &rctx
->src_sg
, &rctx
->src_skip
,
1266 rctx
->src_nents
, datalen
);
1267 if (written
< datalen
) {
1268 pr_err("%s(): failed to copy src sg to mbox msg",
1275 memset(rctx
->msg_buf
.spu_req_pad
, 0, pad_len
);
1276 sg_set_buf(sg
++, rctx
->msg_buf
.spu_req_pad
, pad_len
);
1280 sg_set_buf(sg
++, rctx
->msg_buf
.digest
, ctx
->digestsize
);
1282 stat_len
= spu
->spu_tx_status_len();
1284 memset(rctx
->msg_buf
.tx_stat
, 0, stat_len
);
1285 sg_set_buf(sg
, rctx
->msg_buf
.tx_stat
, stat_len
);
1291 * handle_aead_req() - Submit a SPU request message for the next chunk of the
1292 * current AEAD request.
1293 * @rctx: Crypto request context
1295 * Unlike other operation types, we assume the length of the request fits in
1296 * a single SPU request message. aead_enqueue() makes sure this is true.
1297 * Comments for other op types regarding threads applies here as well.
1299 * Unlike incremental hash ops, where the spu returns the entire hash for
1300 * truncated algs like sha-224, the SPU returns just the truncated hash in
1301 * response to aead requests. So digestsize is always ctx->digestsize here.
1303 * Return: -EINPROGRESS: crypto request has been accepted and result will be
1304 * returned asynchronously
1305 * Any other value indicates an error
1307 static int handle_aead_req(struct iproc_reqctx_s
*rctx
)
1309 struct spu_hw
*spu
= &iproc_priv
.spu
;
1310 struct crypto_async_request
*areq
= rctx
->parent
;
1311 struct aead_request
*req
= container_of(areq
,
1312 struct aead_request
, base
);
1313 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
1315 unsigned int chunksize
;
1316 unsigned int resp_len
;
1321 struct brcm_message
*mssg
; /* mailbox message */
1322 struct spu_request_opts req_opts
;
1323 struct spu_cipher_parms cipher_parms
;
1324 struct spu_hash_parms hash_parms
;
1325 struct spu_aead_parms aead_parms
;
1326 int assoc_nents
= 0;
1327 bool incl_icv
= false;
1328 unsigned int digestsize
= ctx
->digestsize
;
1330 /* number of entries in src and dst sg. Always includes SPU msg header.
1332 u8 rx_frag_num
= 2; /* and STATUS */
1335 /* doing the whole thing at once */
1336 chunksize
= rctx
->total_todo
;
1338 flow_log("%s: chunksize %u\n", __func__
, chunksize
);
1340 memset(&req_opts
, 0, sizeof(req_opts
));
1341 memset(&hash_parms
, 0, sizeof(hash_parms
));
1342 memset(&aead_parms
, 0, sizeof(aead_parms
));
1344 req_opts
.is_inbound
= !(rctx
->is_encrypt
);
1345 req_opts
.auth_first
= ctx
->auth_first
;
1346 req_opts
.is_aead
= true;
1347 req_opts
.is_esp
= ctx
->is_esp
;
1349 cipher_parms
.alg
= ctx
->cipher
.alg
;
1350 cipher_parms
.mode
= ctx
->cipher
.mode
;
1351 cipher_parms
.type
= ctx
->cipher_type
;
1352 cipher_parms
.key_buf
= ctx
->enckey
;
1353 cipher_parms
.key_len
= ctx
->enckeylen
;
1354 cipher_parms
.iv_buf
= rctx
->msg_buf
.iv_ctr
;
1355 cipher_parms
.iv_len
= rctx
->iv_ctr_len
;
1357 hash_parms
.alg
= ctx
->auth
.alg
;
1358 hash_parms
.mode
= ctx
->auth
.mode
;
1359 hash_parms
.type
= HASH_TYPE_NONE
;
1360 hash_parms
.key_buf
= (u8
*)ctx
->authkey
;
1361 hash_parms
.key_len
= ctx
->authkeylen
;
1362 hash_parms
.digestsize
= digestsize
;
1364 if ((ctx
->auth
.alg
== HASH_ALG_SHA224
) &&
1365 (ctx
->authkeylen
< SHA224_DIGEST_SIZE
))
1366 hash_parms
.key_len
= SHA224_DIGEST_SIZE
;
1368 aead_parms
.assoc_size
= req
->assoclen
;
1369 if (ctx
->is_esp
&& !ctx
->is_rfc4543
) {
1371 * 8-byte IV is included assoc data in request. SPU2
1372 * expects AAD to include just SPI and seqno. So
1373 * subtract off the IV len.
1375 aead_parms
.assoc_size
-= GCM_RFC4106_IV_SIZE
;
1377 if (rctx
->is_encrypt
) {
1378 aead_parms
.return_iv
= true;
1379 aead_parms
.ret_iv_len
= GCM_RFC4106_IV_SIZE
;
1380 aead_parms
.ret_iv_off
= GCM_ESP_SALT_SIZE
;
1383 aead_parms
.ret_iv_len
= 0;
1387 * Count number of sg entries from the crypto API request that are to
1388 * be included in this mailbox message. For dst sg, don't count space
1389 * for digest. Digest gets caught in a separate buffer and copied back
1390 * to dst sg when processing response.
1392 rctx
->src_nents
= spu_sg_count(rctx
->src_sg
, rctx
->src_skip
, chunksize
);
1393 rctx
->dst_nents
= spu_sg_count(rctx
->dst_sg
, rctx
->dst_skip
, chunksize
);
1394 if (aead_parms
.assoc_size
)
1395 assoc_nents
= spu_sg_count(rctx
->assoc
, 0,
1396 aead_parms
.assoc_size
);
1398 mssg
= &rctx
->mb_mssg
;
1400 rctx
->total_sent
= chunksize
;
1401 rctx
->src_sent
= chunksize
;
1402 if (spu
->spu_assoc_resp_len(ctx
->cipher
.mode
,
1403 aead_parms
.assoc_size
,
1404 aead_parms
.ret_iv_len
,
1408 aead_parms
.iv_len
= spu
->spu_aead_ivlen(ctx
->cipher
.mode
,
1411 if (ctx
->auth
.alg
== HASH_ALG_AES
)
1412 hash_parms
.type
= ctx
->cipher_type
;
1414 /* General case AAD padding (CCM and RFC4543 special cases below) */
1415 aead_parms
.aad_pad_len
= spu
->spu_gcm_ccm_pad_len(ctx
->cipher
.mode
,
1416 aead_parms
.assoc_size
);
1418 /* General case data padding (CCM decrypt special case below) */
1419 aead_parms
.data_pad_len
= spu
->spu_gcm_ccm_pad_len(ctx
->cipher
.mode
,
1422 if (ctx
->cipher
.mode
== CIPHER_MODE_CCM
) {
1424 * for CCM, AAD len + 2 (rather than AAD len) needs to be
1427 aead_parms
.aad_pad_len
= spu
->spu_gcm_ccm_pad_len(
1429 aead_parms
.assoc_size
+ 2);
1432 * And when decrypting CCM, need to pad without including
1433 * size of ICV which is tacked on to end of chunk
1435 if (!rctx
->is_encrypt
)
1436 aead_parms
.data_pad_len
=
1437 spu
->spu_gcm_ccm_pad_len(ctx
->cipher
.mode
,
1438 chunksize
- digestsize
);
1440 /* CCM also requires software to rewrite portions of IV: */
1441 spu
->spu_ccm_update_iv(digestsize
, &cipher_parms
, req
->assoclen
,
1442 chunksize
, rctx
->is_encrypt
,
1446 if (ctx
->is_rfc4543
) {
1448 * RFC4543: data is included in AAD, so don't pad after AAD
1449 * and pad data based on both AAD + data size
1451 aead_parms
.aad_pad_len
= 0;
1452 if (!rctx
->is_encrypt
)
1453 aead_parms
.data_pad_len
= spu
->spu_gcm_ccm_pad_len(
1455 aead_parms
.assoc_size
+ chunksize
-
1458 aead_parms
.data_pad_len
= spu
->spu_gcm_ccm_pad_len(
1460 aead_parms
.assoc_size
+ chunksize
);
1462 req_opts
.is_rfc4543
= true;
1465 if (spu_req_incl_icv(ctx
->cipher
.mode
, rctx
->is_encrypt
)) {
1468 /* Copy ICV from end of src scatterlist to digest buf */
1469 sg_copy_part_to_buf(req
->src
, rctx
->msg_buf
.digest
, digestsize
,
1470 req
->assoclen
+ rctx
->total_sent
-
1474 atomic64_add(chunksize
, &iproc_priv
.bytes_out
);
1476 flow_log("%s()-sent chunksize:%u\n", __func__
, chunksize
);
1478 /* Prepend SPU header with type 3 BCM header */
1479 memcpy(rctx
->msg_buf
.bcm_spu_req_hdr
, BCMHEADER
, BCM_HDR_LEN
);
1481 spu_hdr_len
= spu
->spu_create_request(rctx
->msg_buf
.bcm_spu_req_hdr
+
1482 BCM_HDR_LEN
, &req_opts
,
1483 &cipher_parms
, &hash_parms
,
1484 &aead_parms
, chunksize
);
1486 /* Determine total length of padding. Put all padding in one buffer. */
1487 db_size
= spu_real_db_size(aead_parms
.assoc_size
, aead_parms
.iv_len
, 0,
1488 chunksize
, aead_parms
.aad_pad_len
,
1489 aead_parms
.data_pad_len
, 0);
1491 stat_pad_len
= spu
->spu_wordalign_padlen(db_size
);
1495 pad_len
= aead_parms
.data_pad_len
+ stat_pad_len
;
1498 spu
->spu_request_pad(rctx
->msg_buf
.spu_req_pad
,
1499 aead_parms
.data_pad_len
, 0,
1500 ctx
->auth
.alg
, ctx
->auth
.mode
,
1501 rctx
->total_sent
, stat_pad_len
);
1504 spu
->spu_dump_msg_hdr(rctx
->msg_buf
.bcm_spu_req_hdr
+ BCM_HDR_LEN
,
1506 dump_sg(rctx
->assoc
, 0, aead_parms
.assoc_size
);
1507 packet_dump(" aead iv: ", rctx
->msg_buf
.iv_ctr
, aead_parms
.iv_len
);
1508 packet_log("BD:\n");
1509 dump_sg(rctx
->src_sg
, rctx
->src_skip
, chunksize
);
1510 packet_dump(" pad: ", rctx
->msg_buf
.spu_req_pad
, pad_len
);
1513 * Build mailbox message containing SPU request msg and rx buffers
1514 * to catch response message
1516 memset(mssg
, 0, sizeof(*mssg
));
1517 mssg
->type
= BRCM_MESSAGE_SPU
;
1518 mssg
->ctx
= rctx
; /* Will be returned in response */
1520 /* Create rx scatterlist to catch result */
1521 rx_frag_num
+= rctx
->dst_nents
;
1522 resp_len
= chunksize
;
1525 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1526 * padding. Have to for SHA-224 and other truncated SHAs because SPU
1527 * sends entire digest back.
1531 if (((ctx
->cipher
.mode
== CIPHER_MODE_GCM
) ||
1532 (ctx
->cipher
.mode
== CIPHER_MODE_CCM
)) && !rctx
->is_encrypt
) {
1534 * Input is ciphertxt plus ICV, but ICV not incl
1537 resp_len
-= ctx
->digestsize
;
1539 /* no rx frags to catch output data */
1540 rx_frag_num
-= rctx
->dst_nents
;
1543 err
= spu_aead_rx_sg_create(mssg
, req
, rctx
, rx_frag_num
,
1544 aead_parms
.assoc_size
,
1545 aead_parms
.ret_iv_len
, resp_len
, digestsize
,
1550 /* Create tx scatterlist containing SPU request message */
1551 tx_frag_num
+= rctx
->src_nents
;
1552 tx_frag_num
+= assoc_nents
;
1553 if (aead_parms
.aad_pad_len
)
1555 if (aead_parms
.iv_len
)
1557 if (spu
->spu_tx_status_len())
1559 err
= spu_aead_tx_sg_create(mssg
, rctx
, tx_frag_num
, spu_hdr_len
,
1560 rctx
->assoc
, aead_parms
.assoc_size
,
1561 assoc_nents
, aead_parms
.iv_len
, chunksize
,
1562 aead_parms
.aad_pad_len
, pad_len
, incl_icv
);
1566 err
= mailbox_send_message(mssg
, req
->base
.flags
, rctx
->chan_idx
);
1567 if (unlikely(err
< 0))
1570 return -EINPROGRESS
;
1574 * handle_aead_resp() - Process a SPU response message for an AEAD request.
1575 * @rctx: Crypto request context
1577 static void handle_aead_resp(struct iproc_reqctx_s
*rctx
)
1579 struct spu_hw
*spu
= &iproc_priv
.spu
;
1580 struct crypto_async_request
*areq
= rctx
->parent
;
1581 struct aead_request
*req
= container_of(areq
,
1582 struct aead_request
, base
);
1583 struct iproc_ctx_s
*ctx
= rctx
->ctx
;
1585 unsigned int icv_offset
;
1588 /* See how much data was returned */
1589 payload_len
= spu
->spu_payload_length(rctx
->msg_buf
.spu_resp_hdr
);
1590 flow_log("payload_len %u\n", payload_len
);
1592 /* only count payload */
1593 atomic64_add(payload_len
, &iproc_priv
.bytes_in
);
1596 packet_dump(" assoc_data ", rctx
->msg_buf
.a
.resp_aad
,
1600 * Copy the ICV back to the destination
1601 * buffer. In decrypt case, SPU gives us back the digest, but crypto
1602 * API doesn't expect ICV in dst buffer.
1604 result_len
= req
->cryptlen
;
1605 if (rctx
->is_encrypt
) {
1606 icv_offset
= req
->assoclen
+ rctx
->total_sent
;
1607 packet_dump(" ICV: ", rctx
->msg_buf
.digest
, ctx
->digestsize
);
1608 flow_log("copying ICV to dst sg at offset %u\n", icv_offset
);
1609 sg_copy_part_from_buf(req
->dst
, rctx
->msg_buf
.digest
,
1610 ctx
->digestsize
, icv_offset
);
1611 result_len
+= ctx
->digestsize
;
1614 packet_log("response data: ");
1615 dump_sg(req
->dst
, req
->assoclen
, result_len
);
1617 atomic_inc(&iproc_priv
.op_counts
[SPU_OP_AEAD
]);
1618 if (ctx
->cipher
.alg
== CIPHER_ALG_AES
) {
1619 if (ctx
->cipher
.mode
== CIPHER_MODE_CCM
)
1620 atomic_inc(&iproc_priv
.aead_cnt
[AES_CCM
]);
1621 else if (ctx
->cipher
.mode
== CIPHER_MODE_GCM
)
1622 atomic_inc(&iproc_priv
.aead_cnt
[AES_GCM
]);
1624 atomic_inc(&iproc_priv
.aead_cnt
[AUTHENC
]);
1626 atomic_inc(&iproc_priv
.aead_cnt
[AUTHENC
]);
1631 * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1632 * @rctx: request context
1634 * Mailbox scatterlists are allocated for each chunk. So free them after
1635 * processing each chunk.
1637 static void spu_chunk_cleanup(struct iproc_reqctx_s
*rctx
)
1639 /* mailbox message used to tx request */
1640 struct brcm_message
*mssg
= &rctx
->mb_mssg
;
1642 kfree(mssg
->spu
.src
);
1643 kfree(mssg
->spu
.dst
);
1644 memset(mssg
, 0, sizeof(struct brcm_message
));
1648 * finish_req() - Used to invoke the complete callback from the requester when
1649 * a request has been handled asynchronously.
1650 * @rctx: Request context
1651 * @err: Indicates whether the request was successful or not
1653 * Ensures that cleanup has been done for request
1655 static void finish_req(struct iproc_reqctx_s
*rctx
, int err
)
1657 struct crypto_async_request
*areq
= rctx
->parent
;
1659 flow_log("%s() err:%d\n\n", __func__
, err
);
1661 /* No harm done if already called */
1662 spu_chunk_cleanup(rctx
);
1665 areq
->complete(areq
, err
);
1669 * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1670 * @cl: mailbox client structure for SPU driver
1671 * @msg: mailbox message containing SPU response
1673 static void spu_rx_callback(struct mbox_client
*cl
, void *msg
)
1675 struct spu_hw
*spu
= &iproc_priv
.spu
;
1676 struct brcm_message
*mssg
= msg
;
1677 struct iproc_reqctx_s
*rctx
;
1678 struct iproc_ctx_s
*ctx
;
1679 struct crypto_async_request
*areq
;
1683 if (unlikely(!rctx
)) {
1685 pr_err("%s(): no request context", __func__
);
1689 areq
= rctx
->parent
;
1692 /* process the SPU status */
1693 err
= spu
->spu_status_process(rctx
->msg_buf
.rx_stat
);
1695 if (err
== SPU_INVALID_ICV
)
1696 atomic_inc(&iproc_priv
.bad_icv
);
1701 /* Process the SPU response message */
1702 switch (rctx
->ctx
->alg
->type
) {
1703 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
1704 handle_ablkcipher_resp(rctx
);
1706 case CRYPTO_ALG_TYPE_AHASH
:
1707 handle_ahash_resp(rctx
);
1709 case CRYPTO_ALG_TYPE_AEAD
:
1710 handle_aead_resp(rctx
);
1718 * If this response does not complete the request, then send the next
1721 if (rctx
->total_sent
< rctx
->total_todo
) {
1722 /* Deallocate anything specific to previous chunk */
1723 spu_chunk_cleanup(rctx
);
1725 switch (rctx
->ctx
->alg
->type
) {
1726 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
1727 err
= handle_ablkcipher_req(rctx
);
1729 case CRYPTO_ALG_TYPE_AHASH
:
1730 err
= handle_ahash_req(rctx
);
1733 * we saved data in hash carry, but tell crypto
1734 * API we successfully completed request.
1738 case CRYPTO_ALG_TYPE_AEAD
:
1739 err
= handle_aead_req(rctx
);
1745 if (err
== -EINPROGRESS
)
1746 /* Successfully submitted request for next chunk */
1751 finish_req(rctx
, err
);
1754 /* ==================== Kernel Cryptographic API ==================== */
1757 * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
1758 * @req: Crypto API request
1759 * @encrypt: true if encrypting; false if decrypting
1761 * Return: -EINPROGRESS if request accepted and result will be returned
1765 static int ablkcipher_enqueue(struct ablkcipher_request
*req
, bool encrypt
)
1767 struct iproc_reqctx_s
*rctx
= ablkcipher_request_ctx(req
);
1768 struct iproc_ctx_s
*ctx
=
1769 crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req
));
1772 flow_log("%s() enc:%u\n", __func__
, encrypt
);
1774 rctx
->gfp
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
1775 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
1776 rctx
->parent
= &req
->base
;
1777 rctx
->is_encrypt
= encrypt
;
1778 rctx
->bd_suppress
= false;
1779 rctx
->total_todo
= req
->nbytes
;
1781 rctx
->total_sent
= 0;
1782 rctx
->total_received
= 0;
1785 /* Initialize current position in src and dst scatterlists */
1786 rctx
->src_sg
= req
->src
;
1787 rctx
->src_nents
= 0;
1789 rctx
->dst_sg
= req
->dst
;
1790 rctx
->dst_nents
= 0;
1793 if (ctx
->cipher
.mode
== CIPHER_MODE_CBC
||
1794 ctx
->cipher
.mode
== CIPHER_MODE_CTR
||
1795 ctx
->cipher
.mode
== CIPHER_MODE_OFB
||
1796 ctx
->cipher
.mode
== CIPHER_MODE_XTS
||
1797 ctx
->cipher
.mode
== CIPHER_MODE_GCM
||
1798 ctx
->cipher
.mode
== CIPHER_MODE_CCM
) {
1800 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req
));
1801 memcpy(rctx
->msg_buf
.iv_ctr
, req
->info
, rctx
->iv_ctr_len
);
1803 rctx
->iv_ctr_len
= 0;
1806 /* Choose a SPU to process this request */
1807 rctx
->chan_idx
= select_channel();
1808 err
= handle_ablkcipher_req(rctx
);
1809 if (err
!= -EINPROGRESS
)
1810 /* synchronous result */
1811 spu_chunk_cleanup(rctx
);
1816 static int des_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
1817 unsigned int keylen
)
1819 struct iproc_ctx_s
*ctx
= crypto_ablkcipher_ctx(cipher
);
1820 u32 tmp
[DES_EXPKEY_WORDS
];
1822 if (keylen
== DES_KEY_SIZE
) {
1823 if (des_ekey(tmp
, key
) == 0) {
1824 if (crypto_ablkcipher_get_flags(cipher
) &
1825 CRYPTO_TFM_REQ_WEAK_KEY
) {
1826 u32 flags
= CRYPTO_TFM_RES_WEAK_KEY
;
1828 crypto_ablkcipher_set_flags(cipher
, flags
);
1833 ctx
->cipher_type
= CIPHER_TYPE_DES
;
1835 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1841 static int threedes_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
1842 unsigned int keylen
)
1844 struct iproc_ctx_s
*ctx
= crypto_ablkcipher_ctx(cipher
);
1846 if (keylen
== (DES_KEY_SIZE
* 3)) {
1847 const u32
*K
= (const u32
*)key
;
1848 u32 flags
= CRYPTO_TFM_RES_BAD_KEY_SCHED
;
1850 if (!((K
[0] ^ K
[2]) | (K
[1] ^ K
[3])) ||
1851 !((K
[2] ^ K
[4]) | (K
[3] ^ K
[5]))) {
1852 crypto_ablkcipher_set_flags(cipher
, flags
);
1856 ctx
->cipher_type
= CIPHER_TYPE_3DES
;
1858 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1864 static int aes_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
1865 unsigned int keylen
)
1867 struct iproc_ctx_s
*ctx
= crypto_ablkcipher_ctx(cipher
);
1869 if (ctx
->cipher
.mode
== CIPHER_MODE_XTS
)
1870 /* XTS includes two keys of equal length */
1871 keylen
= keylen
/ 2;
1874 case AES_KEYSIZE_128
:
1875 ctx
->cipher_type
= CIPHER_TYPE_AES128
;
1877 case AES_KEYSIZE_192
:
1878 ctx
->cipher_type
= CIPHER_TYPE_AES192
;
1880 case AES_KEYSIZE_256
:
1881 ctx
->cipher_type
= CIPHER_TYPE_AES256
;
1884 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1887 WARN_ON((ctx
->max_payload
!= SPU_MAX_PAYLOAD_INF
) &&
1888 ((ctx
->max_payload
% AES_BLOCK_SIZE
) != 0));
1892 static int rc4_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
1893 unsigned int keylen
)
1895 struct iproc_ctx_s
*ctx
= crypto_ablkcipher_ctx(cipher
);
1898 ctx
->enckeylen
= ARC4_MAX_KEY_SIZE
+ ARC4_STATE_SIZE
;
1900 ctx
->enckey
[0] = 0x00; /* 0x00 */
1901 ctx
->enckey
[1] = 0x00; /* i */
1902 ctx
->enckey
[2] = 0x00; /* 0x00 */
1903 ctx
->enckey
[3] = 0x00; /* j */
1904 for (i
= 0; i
< ARC4_MAX_KEY_SIZE
; i
++)
1905 ctx
->enckey
[i
+ ARC4_STATE_SIZE
] = key
[i
% keylen
];
1907 ctx
->cipher_type
= CIPHER_TYPE_INIT
;
1912 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
, const u8
*key
,
1913 unsigned int keylen
)
1915 struct spu_hw
*spu
= &iproc_priv
.spu
;
1916 struct iproc_ctx_s
*ctx
= crypto_ablkcipher_ctx(cipher
);
1917 struct spu_cipher_parms cipher_parms
;
1921 flow_log("ablkcipher_setkey() keylen: %d\n", keylen
);
1922 flow_dump(" key: ", key
, keylen
);
1924 switch (ctx
->cipher
.alg
) {
1925 case CIPHER_ALG_DES
:
1926 err
= des_setkey(cipher
, key
, keylen
);
1928 case CIPHER_ALG_3DES
:
1929 err
= threedes_setkey(cipher
, key
, keylen
);
1931 case CIPHER_ALG_AES
:
1932 err
= aes_setkey(cipher
, key
, keylen
);
1934 case CIPHER_ALG_RC4
:
1935 err
= rc4_setkey(cipher
, key
, keylen
);
1938 pr_err("%s() Error: unknown cipher alg\n", __func__
);
1944 /* RC4 already populated ctx->enkey */
1945 if (ctx
->cipher
.alg
!= CIPHER_ALG_RC4
) {
1946 memcpy(ctx
->enckey
, key
, keylen
);
1947 ctx
->enckeylen
= keylen
;
1949 /* SPU needs XTS keys in the reverse order the crypto API presents */
1950 if ((ctx
->cipher
.alg
== CIPHER_ALG_AES
) &&
1951 (ctx
->cipher
.mode
== CIPHER_MODE_XTS
)) {
1952 unsigned int xts_keylen
= keylen
/ 2;
1954 memcpy(ctx
->enckey
, key
+ xts_keylen
, xts_keylen
);
1955 memcpy(ctx
->enckey
+ xts_keylen
, key
, xts_keylen
);
1958 if (spu
->spu_type
== SPU_TYPE_SPUM
)
1959 alloc_len
= BCM_HDR_LEN
+ SPU_HEADER_ALLOC_LEN
;
1960 else if (spu
->spu_type
== SPU_TYPE_SPU2
)
1961 alloc_len
= BCM_HDR_LEN
+ SPU2_HEADER_ALLOC_LEN
;
1962 memset(ctx
->bcm_spu_req_hdr
, 0, alloc_len
);
1963 cipher_parms
.iv_buf
= NULL
;
1964 cipher_parms
.iv_len
= crypto_ablkcipher_ivsize(cipher
);
1965 flow_log("%s: iv_len %u\n", __func__
, cipher_parms
.iv_len
);
1967 cipher_parms
.alg
= ctx
->cipher
.alg
;
1968 cipher_parms
.mode
= ctx
->cipher
.mode
;
1969 cipher_parms
.type
= ctx
->cipher_type
;
1970 cipher_parms
.key_buf
= ctx
->enckey
;
1971 cipher_parms
.key_len
= ctx
->enckeylen
;
1973 /* Prepend SPU request message with BCM header */
1974 memcpy(ctx
->bcm_spu_req_hdr
, BCMHEADER
, BCM_HDR_LEN
);
1975 ctx
->spu_req_hdr_len
=
1976 spu
->spu_cipher_req_init(ctx
->bcm_spu_req_hdr
+ BCM_HDR_LEN
,
1979 ctx
->spu_resp_hdr_len
= spu
->spu_response_hdr_len(ctx
->authkeylen
,
1983 atomic_inc(&iproc_priv
.setkey_cnt
[SPU_OP_CIPHER
]);
1988 static int ablkcipher_encrypt(struct ablkcipher_request
*req
)
1990 flow_log("ablkcipher_encrypt() nbytes:%u\n", req
->nbytes
);
1992 return ablkcipher_enqueue(req
, true);
1995 static int ablkcipher_decrypt(struct ablkcipher_request
*req
)
1997 flow_log("ablkcipher_decrypt() nbytes:%u\n", req
->nbytes
);
1998 return ablkcipher_enqueue(req
, false);
2001 static int ahash_enqueue(struct ahash_request
*req
)
2003 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2004 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2005 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2007 const char *alg_name
;
2009 flow_log("ahash_enqueue() nbytes:%u\n", req
->nbytes
);
2011 rctx
->gfp
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2012 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2013 rctx
->parent
= &req
->base
;
2015 rctx
->bd_suppress
= true;
2016 memset(&rctx
->mb_mssg
, 0, sizeof(struct brcm_message
));
2018 /* Initialize position in src scatterlist */
2019 rctx
->src_sg
= req
->src
;
2021 rctx
->src_nents
= 0;
2022 rctx
->dst_sg
= NULL
;
2024 rctx
->dst_nents
= 0;
2026 /* SPU2 hardware does not compute hash of zero length data */
2027 if ((rctx
->is_final
== 1) && (rctx
->total_todo
== 0) &&
2028 (iproc_priv
.spu
.spu_type
== SPU_TYPE_SPU2
)) {
2029 alg_name
= crypto_tfm_alg_name(crypto_ahash_tfm(tfm
));
2030 flow_log("Doing %sfinal %s zero-len hash request in software\n",
2031 rctx
->is_final
? "" : "non-", alg_name
);
2032 err
= do_shash((unsigned char *)alg_name
, req
->result
,
2033 NULL
, 0, NULL
, 0, ctx
->authkey
,
2036 flow_log("Hash request failed with error %d\n", err
);
2039 /* Choose a SPU to process this request */
2040 rctx
->chan_idx
= select_channel();
2042 err
= handle_ahash_req(rctx
);
2043 if (err
!= -EINPROGRESS
)
2044 /* synchronous result */
2045 spu_chunk_cleanup(rctx
);
2049 * we saved data in hash carry, but tell crypto API
2050 * we successfully completed request.
2057 static int __ahash_init(struct ahash_request
*req
)
2059 struct spu_hw
*spu
= &iproc_priv
.spu
;
2060 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2061 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2062 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2064 flow_log("%s()\n", __func__
);
2066 /* Initialize the context */
2067 rctx
->hash_carry_len
= 0;
2070 rctx
->total_todo
= 0;
2072 rctx
->total_sent
= 0;
2073 rctx
->total_received
= 0;
2075 ctx
->digestsize
= crypto_ahash_digestsize(tfm
);
2076 /* If we add a hash whose digest is larger, catch it here. */
2077 WARN_ON(ctx
->digestsize
> MAX_DIGEST_SIZE
);
2079 rctx
->is_sw_hmac
= false;
2081 ctx
->spu_resp_hdr_len
= spu
->spu_response_hdr_len(ctx
->authkeylen
, 0,
2088 * spu_no_incr_hash() - Determine whether incremental hashing is supported.
2089 * @ctx: Crypto session context
2091 * SPU-2 does not support incremental hashing (we'll have to revisit and
2092 * condition based on chip revision or device tree entry if future versions do
2093 * support incremental hash)
2095 * SPU-M also doesn't support incremental hashing of AES-XCBC
2097 * Return: true if incremental hashing is not supported
2100 bool spu_no_incr_hash(struct iproc_ctx_s
*ctx
)
2102 struct spu_hw
*spu
= &iproc_priv
.spu
;
2104 if (spu
->spu_type
== SPU_TYPE_SPU2
)
2107 if ((ctx
->auth
.alg
== HASH_ALG_AES
) &&
2108 (ctx
->auth
.mode
== HASH_MODE_XCBC
))
2111 /* Otherwise, incremental hashing is supported */
2115 static int ahash_init(struct ahash_request
*req
)
2117 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2118 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2119 const char *alg_name
;
2120 struct crypto_shash
*hash
;
2124 if (spu_no_incr_hash(ctx
)) {
2126 * If we get an incremental hashing request and it's not
2127 * supported by the hardware, we need to handle it in software
2128 * by calling synchronous hash functions.
2130 alg_name
= crypto_tfm_alg_name(crypto_ahash_tfm(tfm
));
2131 hash
= crypto_alloc_shash(alg_name
, 0, 0);
2133 ret
= PTR_ERR(hash
);
2137 gfp
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2138 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2139 ctx
->shash
= kmalloc(sizeof(*ctx
->shash
) +
2140 crypto_shash_descsize(hash
), gfp
);
2145 ctx
->shash
->tfm
= hash
;
2146 ctx
->shash
->flags
= 0;
2148 /* Set the key using data we already have from setkey */
2149 if (ctx
->authkeylen
> 0) {
2150 ret
= crypto_shash_setkey(hash
, ctx
->authkey
,
2156 /* Initialize hash w/ this key and other params */
2157 ret
= crypto_shash_init(ctx
->shash
);
2161 /* Otherwise call the internal function which uses SPU hw */
2162 ret
= __ahash_init(req
);
2170 crypto_free_shash(hash
);
2175 static int __ahash_update(struct ahash_request
*req
)
2177 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2179 flow_log("ahash_update() nbytes:%u\n", req
->nbytes
);
2183 rctx
->total_todo
+= req
->nbytes
;
2186 return ahash_enqueue(req
);
2189 static int ahash_update(struct ahash_request
*req
)
2191 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2192 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2198 if (spu_no_incr_hash(ctx
)) {
2200 * If we get an incremental hashing request and it's not
2201 * supported by the hardware, we need to handle it in software
2202 * by calling synchronous hash functions.
2205 nents
= sg_nents(req
->src
);
2209 /* Copy data from req scatterlist to tmp buffer */
2210 gfp
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2211 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2212 tmpbuf
= kmalloc(req
->nbytes
, gfp
);
2216 if (sg_copy_to_buffer(req
->src
, nents
, tmpbuf
, req
->nbytes
) !=
2222 /* Call synchronous update */
2223 ret
= crypto_shash_update(ctx
->shash
, tmpbuf
, req
->nbytes
);
2226 /* Otherwise call the internal function which uses SPU hw */
2227 ret
= __ahash_update(req
);
2233 static int __ahash_final(struct ahash_request
*req
)
2235 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2237 flow_log("ahash_final() nbytes:%u\n", req
->nbytes
);
2241 return ahash_enqueue(req
);
2244 static int ahash_final(struct ahash_request
*req
)
2246 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2247 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2250 if (spu_no_incr_hash(ctx
)) {
2252 * If we get an incremental hashing request and it's not
2253 * supported by the hardware, we need to handle it in software
2254 * by calling synchronous hash functions.
2256 ret
= crypto_shash_final(ctx
->shash
, req
->result
);
2258 /* Done with hash, can deallocate it now */
2259 crypto_free_shash(ctx
->shash
->tfm
);
2263 /* Otherwise call the internal function which uses SPU hw */
2264 ret
= __ahash_final(req
);
2270 static int __ahash_finup(struct ahash_request
*req
)
2272 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2274 flow_log("ahash_finup() nbytes:%u\n", req
->nbytes
);
2276 rctx
->total_todo
+= req
->nbytes
;
2280 return ahash_enqueue(req
);
2283 static int ahash_finup(struct ahash_request
*req
)
2285 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2286 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2292 if (spu_no_incr_hash(ctx
)) {
2294 * If we get an incremental hashing request and it's not
2295 * supported by the hardware, we need to handle it in software
2296 * by calling synchronous hash functions.
2299 nents
= sg_nents(req
->src
);
2302 goto ahash_finup_exit
;
2305 /* Copy data from req scatterlist to tmp buffer */
2306 gfp
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2307 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2308 tmpbuf
= kmalloc(req
->nbytes
, gfp
);
2311 goto ahash_finup_exit
;
2314 if (sg_copy_to_buffer(req
->src
, nents
, tmpbuf
, req
->nbytes
) !=
2317 goto ahash_finup_free
;
2320 /* Call synchronous update */
2321 ret
= crypto_shash_finup(ctx
->shash
, tmpbuf
, req
->nbytes
,
2324 /* Otherwise call the internal function which uses SPU hw */
2325 return __ahash_finup(req
);
2331 /* Done with hash, can deallocate it now */
2332 crypto_free_shash(ctx
->shash
->tfm
);
2337 static int ahash_digest(struct ahash_request
*req
)
2341 flow_log("ahash_digest() nbytes:%u\n", req
->nbytes
);
2343 /* whole thing at once */
2344 err
= __ahash_init(req
);
2346 err
= __ahash_finup(req
);
2351 static int ahash_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
2352 unsigned int keylen
)
2354 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(ahash
);
2356 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2357 __func__
, ahash
, key
, keylen
);
2358 flow_dump(" key: ", key
, keylen
);
2360 if (ctx
->auth
.alg
== HASH_ALG_AES
) {
2362 case AES_KEYSIZE_128
:
2363 ctx
->cipher_type
= CIPHER_TYPE_AES128
;
2365 case AES_KEYSIZE_192
:
2366 ctx
->cipher_type
= CIPHER_TYPE_AES192
;
2368 case AES_KEYSIZE_256
:
2369 ctx
->cipher_type
= CIPHER_TYPE_AES256
;
2372 pr_err("%s() Error: Invalid key length\n", __func__
);
2376 pr_err("%s() Error: unknown hash alg\n", __func__
);
2379 memcpy(ctx
->authkey
, key
, keylen
);
2380 ctx
->authkeylen
= keylen
;
2385 static int ahash_export(struct ahash_request
*req
, void *out
)
2387 const struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2388 struct spu_hash_export_s
*spu_exp
= (struct spu_hash_export_s
*)out
;
2390 spu_exp
->total_todo
= rctx
->total_todo
;
2391 spu_exp
->total_sent
= rctx
->total_sent
;
2392 spu_exp
->is_sw_hmac
= rctx
->is_sw_hmac
;
2393 memcpy(spu_exp
->hash_carry
, rctx
->hash_carry
, sizeof(rctx
->hash_carry
));
2394 spu_exp
->hash_carry_len
= rctx
->hash_carry_len
;
2395 memcpy(spu_exp
->incr_hash
, rctx
->incr_hash
, sizeof(rctx
->incr_hash
));
2400 static int ahash_import(struct ahash_request
*req
, const void *in
)
2402 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2403 struct spu_hash_export_s
*spu_exp
= (struct spu_hash_export_s
*)in
;
2405 rctx
->total_todo
= spu_exp
->total_todo
;
2406 rctx
->total_sent
= spu_exp
->total_sent
;
2407 rctx
->is_sw_hmac
= spu_exp
->is_sw_hmac
;
2408 memcpy(rctx
->hash_carry
, spu_exp
->hash_carry
, sizeof(rctx
->hash_carry
));
2409 rctx
->hash_carry_len
= spu_exp
->hash_carry_len
;
2410 memcpy(rctx
->incr_hash
, spu_exp
->incr_hash
, sizeof(rctx
->incr_hash
));
2415 static int ahash_hmac_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
2416 unsigned int keylen
)
2418 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(ahash
);
2419 unsigned int blocksize
=
2420 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash
));
2421 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
2425 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2426 __func__
, ahash
, key
, keylen
, blocksize
, digestsize
);
2427 flow_dump(" key: ", key
, keylen
);
2429 if (keylen
> blocksize
) {
2430 switch (ctx
->auth
.alg
) {
2432 rc
= do_shash("md5", ctx
->authkey
, key
, keylen
, NULL
,
2436 rc
= do_shash("sha1", ctx
->authkey
, key
, keylen
, NULL
,
2439 case HASH_ALG_SHA224
:
2440 rc
= do_shash("sha224", ctx
->authkey
, key
, keylen
, NULL
,
2443 case HASH_ALG_SHA256
:
2444 rc
= do_shash("sha256", ctx
->authkey
, key
, keylen
, NULL
,
2447 case HASH_ALG_SHA384
:
2448 rc
= do_shash("sha384", ctx
->authkey
, key
, keylen
, NULL
,
2451 case HASH_ALG_SHA512
:
2452 rc
= do_shash("sha512", ctx
->authkey
, key
, keylen
, NULL
,
2455 case HASH_ALG_SHA3_224
:
2456 rc
= do_shash("sha3-224", ctx
->authkey
, key
, keylen
,
2459 case HASH_ALG_SHA3_256
:
2460 rc
= do_shash("sha3-256", ctx
->authkey
, key
, keylen
,
2463 case HASH_ALG_SHA3_384
:
2464 rc
= do_shash("sha3-384", ctx
->authkey
, key
, keylen
,
2467 case HASH_ALG_SHA3_512
:
2468 rc
= do_shash("sha3-512", ctx
->authkey
, key
, keylen
,
2472 pr_err("%s() Error: unknown hash alg\n", __func__
);
2476 pr_err("%s() Error %d computing shash for %s\n",
2477 __func__
, rc
, hash_alg_name
[ctx
->auth
.alg
]);
2480 ctx
->authkeylen
= digestsize
;
2482 flow_log(" keylen > digestsize... hashed\n");
2483 flow_dump(" newkey: ", ctx
->authkey
, ctx
->authkeylen
);
2485 memcpy(ctx
->authkey
, key
, keylen
);
2486 ctx
->authkeylen
= keylen
;
2490 * Full HMAC operation in SPUM is not verified,
2491 * So keeping the generation of IPAD, OPAD and
2492 * outer hashing in software.
2494 if (iproc_priv
.spu
.spu_type
== SPU_TYPE_SPUM
) {
2495 memcpy(ctx
->ipad
, ctx
->authkey
, ctx
->authkeylen
);
2496 memset(ctx
->ipad
+ ctx
->authkeylen
, 0,
2497 blocksize
- ctx
->authkeylen
);
2498 ctx
->authkeylen
= 0;
2499 memcpy(ctx
->opad
, ctx
->ipad
, blocksize
);
2501 for (index
= 0; index
< blocksize
; index
++) {
2502 ctx
->ipad
[index
] ^= HMAC_IPAD_VALUE
;
2503 ctx
->opad
[index
] ^= HMAC_OPAD_VALUE
;
2506 flow_dump(" ipad: ", ctx
->ipad
, blocksize
);
2507 flow_dump(" opad: ", ctx
->opad
, blocksize
);
2509 ctx
->digestsize
= digestsize
;
2510 atomic_inc(&iproc_priv
.setkey_cnt
[SPU_OP_HMAC
]);
2515 static int ahash_hmac_init(struct ahash_request
*req
)
2517 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2518 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2519 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2520 unsigned int blocksize
=
2521 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2523 flow_log("ahash_hmac_init()\n");
2525 /* init the context as a hash */
2528 if (!spu_no_incr_hash(ctx
)) {
2529 /* SPU-M can do incr hashing but needs sw for outer HMAC */
2530 rctx
->is_sw_hmac
= true;
2531 ctx
->auth
.mode
= HASH_MODE_HASH
;
2532 /* start with a prepended ipad */
2533 memcpy(rctx
->hash_carry
, ctx
->ipad
, blocksize
);
2534 rctx
->hash_carry_len
= blocksize
;
2535 rctx
->total_todo
+= blocksize
;
2541 static int ahash_hmac_update(struct ahash_request
*req
)
2543 flow_log("ahash_hmac_update() nbytes:%u\n", req
->nbytes
);
2548 return ahash_update(req
);
2551 static int ahash_hmac_final(struct ahash_request
*req
)
2553 flow_log("ahash_hmac_final() nbytes:%u\n", req
->nbytes
);
2555 return ahash_final(req
);
2558 static int ahash_hmac_finup(struct ahash_request
*req
)
2560 flow_log("ahash_hmac_finupl() nbytes:%u\n", req
->nbytes
);
2562 return ahash_finup(req
);
2565 static int ahash_hmac_digest(struct ahash_request
*req
)
2567 struct iproc_reqctx_s
*rctx
= ahash_request_ctx(req
);
2568 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
2569 struct iproc_ctx_s
*ctx
= crypto_ahash_ctx(tfm
);
2570 unsigned int blocksize
=
2571 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2573 flow_log("ahash_hmac_digest() nbytes:%u\n", req
->nbytes
);
2575 /* Perform initialization and then call finup */
2578 if (iproc_priv
.spu
.spu_type
== SPU_TYPE_SPU2
) {
2580 * SPU2 supports full HMAC implementation in the
2581 * hardware, need not to generate IPAD, OPAD and
2582 * outer hash in software.
2583 * Only for hash key len > hash block size, SPU2
2584 * expects to perform hashing on the key, shorten
2585 * it to digest size and feed it as hash key.
2587 rctx
->is_sw_hmac
= false;
2588 ctx
->auth
.mode
= HASH_MODE_HMAC
;
2590 rctx
->is_sw_hmac
= true;
2591 ctx
->auth
.mode
= HASH_MODE_HASH
;
2592 /* start with a prepended ipad */
2593 memcpy(rctx
->hash_carry
, ctx
->ipad
, blocksize
);
2594 rctx
->hash_carry_len
= blocksize
;
2595 rctx
->total_todo
+= blocksize
;
2598 return __ahash_finup(req
);
2603 static int aead_need_fallback(struct aead_request
*req
)
2605 struct iproc_reqctx_s
*rctx
= aead_request_ctx(req
);
2606 struct spu_hw
*spu
= &iproc_priv
.spu
;
2607 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2608 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(aead
);
2612 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2613 * and AAD are both 0 bytes long. So use fallback in this case.
2615 if (((ctx
->cipher
.mode
== CIPHER_MODE_GCM
) ||
2616 (ctx
->cipher
.mode
== CIPHER_MODE_CCM
)) &&
2617 (req
->assoclen
== 0)) {
2618 if ((rctx
->is_encrypt
&& (req
->cryptlen
== 0)) ||
2619 (!rctx
->is_encrypt
&& (req
->cryptlen
== ctx
->digestsize
))) {
2620 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2625 /* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2626 if ((ctx
->cipher
.mode
== CIPHER_MODE_CCM
) &&
2627 (spu
->spu_type
== SPU_TYPE_SPUM
) &&
2628 (ctx
->digestsize
!= 8) && (ctx
->digestsize
!= 12) &&
2629 (ctx
->digestsize
!= 16)) {
2630 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2631 __func__
, ctx
->digestsize
);
2636 * SPU-M on NSP has an issue where AES-CCM hash is not correct
2637 * when AAD size is 0
2639 if ((ctx
->cipher
.mode
== CIPHER_MODE_CCM
) &&
2640 (spu
->spu_subtype
== SPU_SUBTYPE_SPUM_NSP
) &&
2641 (req
->assoclen
== 0)) {
2642 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2647 payload_len
= req
->cryptlen
;
2648 if (spu
->spu_type
== SPU_TYPE_SPUM
)
2649 payload_len
+= req
->assoclen
;
2651 flow_log("%s() payload len: %u\n", __func__
, payload_len
);
2653 if (ctx
->max_payload
== SPU_MAX_PAYLOAD_INF
)
2656 return payload_len
> ctx
->max_payload
;
2659 static void aead_complete(struct crypto_async_request
*areq
, int err
)
2661 struct aead_request
*req
=
2662 container_of(areq
, struct aead_request
, base
);
2663 struct iproc_reqctx_s
*rctx
= aead_request_ctx(req
);
2664 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2666 flow_log("%s() err:%d\n", __func__
, err
);
2668 areq
->tfm
= crypto_aead_tfm(aead
);
2670 areq
->complete
= rctx
->old_complete
;
2671 areq
->data
= rctx
->old_data
;
2673 areq
->complete(areq
, err
);
2676 static int aead_do_fallback(struct aead_request
*req
, bool is_encrypt
)
2678 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2679 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead
);
2680 struct iproc_reqctx_s
*rctx
= aead_request_ctx(req
);
2681 struct iproc_ctx_s
*ctx
= crypto_tfm_ctx(tfm
);
2685 flow_log("%s() enc:%u\n", __func__
, is_encrypt
);
2687 if (ctx
->fallback_cipher
) {
2688 /* Store the cipher tfm and then use the fallback tfm */
2689 rctx
->old_tfm
= tfm
;
2690 aead_request_set_tfm(req
, ctx
->fallback_cipher
);
2692 * Save the callback and chain ourselves in, so we can restore
2695 rctx
->old_complete
= req
->base
.complete
;
2696 rctx
->old_data
= req
->base
.data
;
2697 req_flags
= aead_request_flags(req
);
2698 aead_request_set_callback(req
, req_flags
, aead_complete
, req
);
2699 err
= is_encrypt
? crypto_aead_encrypt(req
) :
2700 crypto_aead_decrypt(req
);
2704 * fallback was synchronous (did not return
2705 * -EINPROGRESS). So restore request state here.
2707 aead_request_set_callback(req
, req_flags
,
2708 rctx
->old_complete
, req
);
2709 req
->base
.data
= rctx
->old_data
;
2710 aead_request_set_tfm(req
, aead
);
2711 flow_log("%s() fallback completed successfully\n\n",
2721 static int aead_enqueue(struct aead_request
*req
, bool is_encrypt
)
2723 struct iproc_reqctx_s
*rctx
= aead_request_ctx(req
);
2724 struct crypto_aead
*aead
= crypto_aead_reqtfm(req
);
2725 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(aead
);
2728 flow_log("%s() enc:%u\n", __func__
, is_encrypt
);
2730 if (req
->assoclen
> MAX_ASSOC_SIZE
) {
2732 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2733 __func__
, req
->assoclen
, MAX_ASSOC_SIZE
);
2737 rctx
->gfp
= (req
->base
.flags
& (CRYPTO_TFM_REQ_MAY_BACKLOG
|
2738 CRYPTO_TFM_REQ_MAY_SLEEP
)) ? GFP_KERNEL
: GFP_ATOMIC
;
2739 rctx
->parent
= &req
->base
;
2740 rctx
->is_encrypt
= is_encrypt
;
2741 rctx
->bd_suppress
= false;
2742 rctx
->total_todo
= req
->cryptlen
;
2744 rctx
->total_sent
= 0;
2745 rctx
->total_received
= 0;
2746 rctx
->is_sw_hmac
= false;
2748 memset(&rctx
->mb_mssg
, 0, sizeof(struct brcm_message
));
2750 /* assoc data is at start of src sg */
2751 rctx
->assoc
= req
->src
;
2754 * Init current position in src scatterlist to be after assoc data.
2755 * src_skip set to buffer offset where data begins. (Assoc data could
2756 * end in the middle of a buffer.)
2758 if (spu_sg_at_offset(req
->src
, req
->assoclen
, &rctx
->src_sg
,
2759 &rctx
->src_skip
) < 0) {
2760 pr_err("%s() Error: Unable to find start of src data\n",
2765 rctx
->src_nents
= 0;
2766 rctx
->dst_nents
= 0;
2767 if (req
->dst
== req
->src
) {
2768 rctx
->dst_sg
= rctx
->src_sg
;
2769 rctx
->dst_skip
= rctx
->src_skip
;
2772 * Expect req->dst to have room for assoc data followed by
2773 * output data and ICV, if encrypt. So initialize dst_sg
2774 * to point beyond assoc len offset.
2776 if (spu_sg_at_offset(req
->dst
, req
->assoclen
, &rctx
->dst_sg
,
2777 &rctx
->dst_skip
) < 0) {
2778 pr_err("%s() Error: Unable to find start of dst data\n",
2784 if (ctx
->cipher
.mode
== CIPHER_MODE_CBC
||
2785 ctx
->cipher
.mode
== CIPHER_MODE_CTR
||
2786 ctx
->cipher
.mode
== CIPHER_MODE_OFB
||
2787 ctx
->cipher
.mode
== CIPHER_MODE_XTS
||
2788 ctx
->cipher
.mode
== CIPHER_MODE_GCM
) {
2791 crypto_aead_ivsize(crypto_aead_reqtfm(req
));
2792 } else if (ctx
->cipher
.mode
== CIPHER_MODE_CCM
) {
2793 rctx
->iv_ctr_len
= CCM_AES_IV_SIZE
;
2795 rctx
->iv_ctr_len
= 0;
2798 rctx
->hash_carry_len
= 0;
2800 flow_log(" src sg: %p\n", req
->src
);
2801 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2802 rctx
->src_sg
, rctx
->src_skip
);
2803 flow_log(" assoc: %p, assoclen %u\n", rctx
->assoc
, req
->assoclen
);
2804 flow_log(" dst sg: %p\n", req
->dst
);
2805 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2806 rctx
->dst_sg
, rctx
->dst_skip
);
2807 flow_log(" iv_ctr_len:%u\n", rctx
->iv_ctr_len
);
2808 flow_dump(" iv: ", req
->iv
, rctx
->iv_ctr_len
);
2809 flow_log(" authkeylen:%u\n", ctx
->authkeylen
);
2810 flow_log(" is_esp: %s\n", ctx
->is_esp
? "yes" : "no");
2812 if (ctx
->max_payload
== SPU_MAX_PAYLOAD_INF
)
2813 flow_log(" max_payload infinite");
2815 flow_log(" max_payload: %u\n", ctx
->max_payload
);
2817 if (unlikely(aead_need_fallback(req
)))
2818 return aead_do_fallback(req
, is_encrypt
);
2821 * Do memory allocations for request after fallback check, because if we
2822 * do fallback, we won't call finish_req() to dealloc.
2824 if (rctx
->iv_ctr_len
) {
2826 memcpy(rctx
->msg_buf
.iv_ctr
+ ctx
->salt_offset
,
2827 ctx
->salt
, ctx
->salt_len
);
2828 memcpy(rctx
->msg_buf
.iv_ctr
+ ctx
->salt_offset
+ ctx
->salt_len
,
2830 rctx
->iv_ctr_len
- ctx
->salt_len
- ctx
->salt_offset
);
2833 rctx
->chan_idx
= select_channel();
2834 err
= handle_aead_req(rctx
);
2835 if (err
!= -EINPROGRESS
)
2836 /* synchronous result */
2837 spu_chunk_cleanup(rctx
);
2842 static int aead_authenc_setkey(struct crypto_aead
*cipher
,
2843 const u8
*key
, unsigned int keylen
)
2845 struct spu_hw
*spu
= &iproc_priv
.spu
;
2846 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(cipher
);
2847 struct crypto_tfm
*tfm
= crypto_aead_tfm(cipher
);
2848 struct rtattr
*rta
= (void *)key
;
2849 struct crypto_authenc_key_param
*param
;
2850 const u8
*origkey
= key
;
2851 const unsigned int origkeylen
= keylen
;
2855 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__
, cipher
, key
,
2857 flow_dump(" key: ", key
, keylen
);
2859 if (!RTA_OK(rta
, keylen
))
2861 if (rta
->rta_type
!= CRYPTO_AUTHENC_KEYA_PARAM
)
2863 if (RTA_PAYLOAD(rta
) < sizeof(*param
))
2866 param
= RTA_DATA(rta
);
2867 ctx
->enckeylen
= be32_to_cpu(param
->enckeylen
);
2869 key
+= RTA_ALIGN(rta
->rta_len
);
2870 keylen
-= RTA_ALIGN(rta
->rta_len
);
2872 if (keylen
< ctx
->enckeylen
)
2874 if (ctx
->enckeylen
> MAX_KEY_SIZE
)
2877 ctx
->authkeylen
= keylen
- ctx
->enckeylen
;
2879 if (ctx
->authkeylen
> MAX_KEY_SIZE
)
2882 memcpy(ctx
->enckey
, key
+ ctx
->authkeylen
, ctx
->enckeylen
);
2883 /* May end up padding auth key. So make sure it's zeroed. */
2884 memset(ctx
->authkey
, 0, sizeof(ctx
->authkey
));
2885 memcpy(ctx
->authkey
, key
, ctx
->authkeylen
);
2887 switch (ctx
->alg
->cipher_info
.alg
) {
2888 case CIPHER_ALG_DES
:
2889 if (ctx
->enckeylen
== DES_KEY_SIZE
) {
2890 u32 tmp
[DES_EXPKEY_WORDS
];
2891 u32 flags
= CRYPTO_TFM_RES_WEAK_KEY
;
2893 if (des_ekey(tmp
, key
) == 0) {
2894 if (crypto_aead_get_flags(cipher
) &
2895 CRYPTO_TFM_REQ_WEAK_KEY
) {
2896 crypto_aead_set_flags(cipher
, flags
);
2901 ctx
->cipher_type
= CIPHER_TYPE_DES
;
2906 case CIPHER_ALG_3DES
:
2907 if (ctx
->enckeylen
== (DES_KEY_SIZE
* 3)) {
2908 const u32
*K
= (const u32
*)key
;
2909 u32 flags
= CRYPTO_TFM_RES_BAD_KEY_SCHED
;
2911 if (!((K
[0] ^ K
[2]) | (K
[1] ^ K
[3])) ||
2912 !((K
[2] ^ K
[4]) | (K
[3] ^ K
[5]))) {
2913 crypto_aead_set_flags(cipher
, flags
);
2917 ctx
->cipher_type
= CIPHER_TYPE_3DES
;
2919 crypto_aead_set_flags(cipher
,
2920 CRYPTO_TFM_RES_BAD_KEY_LEN
);
2924 case CIPHER_ALG_AES
:
2925 switch (ctx
->enckeylen
) {
2926 case AES_KEYSIZE_128
:
2927 ctx
->cipher_type
= CIPHER_TYPE_AES128
;
2929 case AES_KEYSIZE_192
:
2930 ctx
->cipher_type
= CIPHER_TYPE_AES192
;
2932 case AES_KEYSIZE_256
:
2933 ctx
->cipher_type
= CIPHER_TYPE_AES256
;
2939 case CIPHER_ALG_RC4
:
2940 ctx
->cipher_type
= CIPHER_TYPE_INIT
;
2943 pr_err("%s() Error: Unknown cipher alg\n", __func__
);
2947 flow_log(" enckeylen:%u authkeylen:%u\n", ctx
->enckeylen
,
2949 flow_dump(" enc: ", ctx
->enckey
, ctx
->enckeylen
);
2950 flow_dump(" auth: ", ctx
->authkey
, ctx
->authkeylen
);
2952 /* setkey the fallback just in case we needto use it */
2953 if (ctx
->fallback_cipher
) {
2954 flow_log(" running fallback setkey()\n");
2956 ctx
->fallback_cipher
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
2957 ctx
->fallback_cipher
->base
.crt_flags
|=
2958 tfm
->crt_flags
& CRYPTO_TFM_REQ_MASK
;
2960 crypto_aead_setkey(ctx
->fallback_cipher
, origkey
,
2963 flow_log(" fallback setkey() returned:%d\n", ret
);
2964 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
2966 (ctx
->fallback_cipher
->base
.crt_flags
&
2967 CRYPTO_TFM_RES_MASK
);
2971 ctx
->spu_resp_hdr_len
= spu
->spu_response_hdr_len(ctx
->authkeylen
,
2975 atomic_inc(&iproc_priv
.setkey_cnt
[SPU_OP_AEAD
]);
2981 ctx
->authkeylen
= 0;
2982 ctx
->digestsize
= 0;
2984 crypto_aead_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2988 static int aead_gcm_ccm_setkey(struct crypto_aead
*cipher
,
2989 const u8
*key
, unsigned int keylen
)
2991 struct spu_hw
*spu
= &iproc_priv
.spu
;
2992 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(cipher
);
2993 struct crypto_tfm
*tfm
= crypto_aead_tfm(cipher
);
2997 flow_log("%s() keylen:%u\n", __func__
, keylen
);
2998 flow_dump(" key: ", key
, keylen
);
3001 ctx
->digestsize
= keylen
;
3003 ctx
->enckeylen
= keylen
;
3004 ctx
->authkeylen
= 0;
3005 memcpy(ctx
->enckey
, key
, ctx
->enckeylen
);
3007 switch (ctx
->enckeylen
) {
3008 case AES_KEYSIZE_128
:
3009 ctx
->cipher_type
= CIPHER_TYPE_AES128
;
3011 case AES_KEYSIZE_192
:
3012 ctx
->cipher_type
= CIPHER_TYPE_AES192
;
3014 case AES_KEYSIZE_256
:
3015 ctx
->cipher_type
= CIPHER_TYPE_AES256
;
3021 flow_log(" enckeylen:%u authkeylen:%u\n", ctx
->enckeylen
,
3023 flow_dump(" enc: ", ctx
->enckey
, ctx
->enckeylen
);
3024 flow_dump(" auth: ", ctx
->authkey
, ctx
->authkeylen
);
3026 /* setkey the fallback just in case we need to use it */
3027 if (ctx
->fallback_cipher
) {
3028 flow_log(" running fallback setkey()\n");
3030 ctx
->fallback_cipher
->base
.crt_flags
&= ~CRYPTO_TFM_REQ_MASK
;
3031 ctx
->fallback_cipher
->base
.crt_flags
|=
3032 tfm
->crt_flags
& CRYPTO_TFM_REQ_MASK
;
3033 ret
= crypto_aead_setkey(ctx
->fallback_cipher
, key
,
3034 keylen
+ ctx
->salt_len
);
3036 flow_log(" fallback setkey() returned:%d\n", ret
);
3037 tfm
->crt_flags
&= ~CRYPTO_TFM_RES_MASK
;
3039 (ctx
->fallback_cipher
->base
.crt_flags
&
3040 CRYPTO_TFM_RES_MASK
);
3044 ctx
->spu_resp_hdr_len
= spu
->spu_response_hdr_len(ctx
->authkeylen
,
3048 atomic_inc(&iproc_priv
.setkey_cnt
[SPU_OP_AEAD
]);
3050 flow_log(" enckeylen:%u authkeylen:%u\n", ctx
->enckeylen
,
3057 ctx
->authkeylen
= 0;
3058 ctx
->digestsize
= 0;
3060 crypto_aead_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
3065 * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
3066 * @cipher: AEAD structure
3067 * @key: Key followed by 4 bytes of salt
3068 * @keylen: Length of key plus salt, in bytes
3070 * Extracts salt from key and stores it to be prepended to IV on each request.
3071 * Digest is always 16 bytes
3073 * Return: Value from generic gcm setkey.
3075 static int aead_gcm_esp_setkey(struct crypto_aead
*cipher
,
3076 const u8
*key
, unsigned int keylen
)
3078 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(cipher
);
3080 flow_log("%s\n", __func__
);
3081 ctx
->salt_len
= GCM_ESP_SALT_SIZE
;
3082 ctx
->salt_offset
= GCM_ESP_SALT_OFFSET
;
3083 memcpy(ctx
->salt
, key
+ keylen
- GCM_ESP_SALT_SIZE
, GCM_ESP_SALT_SIZE
);
3084 keylen
-= GCM_ESP_SALT_SIZE
;
3085 ctx
->digestsize
= GCM_ESP_DIGESTSIZE
;
3087 flow_dump("salt: ", ctx
->salt
, GCM_ESP_SALT_SIZE
);
3089 return aead_gcm_ccm_setkey(cipher
, key
, keylen
);
3093 * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
3094 * cipher: AEAD structure
3095 * key: Key followed by 4 bytes of salt
3096 * keylen: Length of key plus salt, in bytes
3098 * Extracts salt from key and stores it to be prepended to IV on each request.
3099 * Digest is always 16 bytes
3101 * Return: Value from generic gcm setkey.
3103 static int rfc4543_gcm_esp_setkey(struct crypto_aead
*cipher
,
3104 const u8
*key
, unsigned int keylen
)
3106 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(cipher
);
3108 flow_log("%s\n", __func__
);
3109 ctx
->salt_len
= GCM_ESP_SALT_SIZE
;
3110 ctx
->salt_offset
= GCM_ESP_SALT_OFFSET
;
3111 memcpy(ctx
->salt
, key
+ keylen
- GCM_ESP_SALT_SIZE
, GCM_ESP_SALT_SIZE
);
3112 keylen
-= GCM_ESP_SALT_SIZE
;
3113 ctx
->digestsize
= GCM_ESP_DIGESTSIZE
;
3115 ctx
->is_rfc4543
= true;
3116 flow_dump("salt: ", ctx
->salt
, GCM_ESP_SALT_SIZE
);
3118 return aead_gcm_ccm_setkey(cipher
, key
, keylen
);
3122 * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
3123 * @cipher: AEAD structure
3124 * @key: Key followed by 4 bytes of salt
3125 * @keylen: Length of key plus salt, in bytes
3127 * Extracts salt from key and stores it to be prepended to IV on each request.
3128 * Digest is always 16 bytes
3130 * Return: Value from generic ccm setkey.
3132 static int aead_ccm_esp_setkey(struct crypto_aead
*cipher
,
3133 const u8
*key
, unsigned int keylen
)
3135 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(cipher
);
3137 flow_log("%s\n", __func__
);
3138 ctx
->salt_len
= CCM_ESP_SALT_SIZE
;
3139 ctx
->salt_offset
= CCM_ESP_SALT_OFFSET
;
3140 memcpy(ctx
->salt
, key
+ keylen
- CCM_ESP_SALT_SIZE
, CCM_ESP_SALT_SIZE
);
3141 keylen
-= CCM_ESP_SALT_SIZE
;
3143 flow_dump("salt: ", ctx
->salt
, CCM_ESP_SALT_SIZE
);
3145 return aead_gcm_ccm_setkey(cipher
, key
, keylen
);
3148 static int aead_setauthsize(struct crypto_aead
*cipher
, unsigned int authsize
)
3150 struct iproc_ctx_s
*ctx
= crypto_aead_ctx(cipher
);
3153 flow_log("%s() authkeylen:%u authsize:%u\n",
3154 __func__
, ctx
->authkeylen
, authsize
);
3156 ctx
->digestsize
= authsize
;
3158 /* setkey the fallback just in case we needto use it */
3159 if (ctx
->fallback_cipher
) {
3160 flow_log(" running fallback setauth()\n");
3162 ret
= crypto_aead_setauthsize(ctx
->fallback_cipher
, authsize
);
3164 flow_log(" fallback setauth() returned:%d\n", ret
);
3170 static int aead_encrypt(struct aead_request
*req
)
3172 flow_log("%s() cryptlen:%u %08x\n", __func__
, req
->cryptlen
,
3174 dump_sg(req
->src
, 0, req
->cryptlen
+ req
->assoclen
);
3175 flow_log(" assoc_len:%u\n", req
->assoclen
);
3177 return aead_enqueue(req
, true);
3180 static int aead_decrypt(struct aead_request
*req
)
3182 flow_log("%s() cryptlen:%u\n", __func__
, req
->cryptlen
);
3183 dump_sg(req
->src
, 0, req
->cryptlen
+ req
->assoclen
);
3184 flow_log(" assoc_len:%u\n", req
->assoclen
);
3186 return aead_enqueue(req
, false);
3189 /* ==================== Supported Cipher Algorithms ==================== */
3191 static struct iproc_alg_s driver_algs
[] = {
3193 .type
= CRYPTO_ALG_TYPE_AEAD
,
3196 .cra_name
= "gcm(aes)",
3197 .cra_driver_name
= "gcm-aes-iproc",
3198 .cra_blocksize
= AES_BLOCK_SIZE
,
3199 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
3201 .setkey
= aead_gcm_ccm_setkey
,
3202 .ivsize
= GCM_AES_IV_SIZE
,
3203 .maxauthsize
= AES_BLOCK_SIZE
,
3206 .alg
= CIPHER_ALG_AES
,
3207 .mode
= CIPHER_MODE_GCM
,
3210 .alg
= HASH_ALG_AES
,
3211 .mode
= HASH_MODE_GCM
,
3216 .type
= CRYPTO_ALG_TYPE_AEAD
,
3219 .cra_name
= "ccm(aes)",
3220 .cra_driver_name
= "ccm-aes-iproc",
3221 .cra_blocksize
= AES_BLOCK_SIZE
,
3222 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
3224 .setkey
= aead_gcm_ccm_setkey
,
3225 .ivsize
= CCM_AES_IV_SIZE
,
3226 .maxauthsize
= AES_BLOCK_SIZE
,
3229 .alg
= CIPHER_ALG_AES
,
3230 .mode
= CIPHER_MODE_CCM
,
3233 .alg
= HASH_ALG_AES
,
3234 .mode
= HASH_MODE_CCM
,
3239 .type
= CRYPTO_ALG_TYPE_AEAD
,
3242 .cra_name
= "rfc4106(gcm(aes))",
3243 .cra_driver_name
= "gcm-aes-esp-iproc",
3244 .cra_blocksize
= AES_BLOCK_SIZE
,
3245 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
3247 .setkey
= aead_gcm_esp_setkey
,
3248 .ivsize
= GCM_RFC4106_IV_SIZE
,
3249 .maxauthsize
= AES_BLOCK_SIZE
,
3252 .alg
= CIPHER_ALG_AES
,
3253 .mode
= CIPHER_MODE_GCM
,
3256 .alg
= HASH_ALG_AES
,
3257 .mode
= HASH_MODE_GCM
,
3262 .type
= CRYPTO_ALG_TYPE_AEAD
,
3265 .cra_name
= "rfc4309(ccm(aes))",
3266 .cra_driver_name
= "ccm-aes-esp-iproc",
3267 .cra_blocksize
= AES_BLOCK_SIZE
,
3268 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
3270 .setkey
= aead_ccm_esp_setkey
,
3271 .ivsize
= CCM_AES_IV_SIZE
,
3272 .maxauthsize
= AES_BLOCK_SIZE
,
3275 .alg
= CIPHER_ALG_AES
,
3276 .mode
= CIPHER_MODE_CCM
,
3279 .alg
= HASH_ALG_AES
,
3280 .mode
= HASH_MODE_CCM
,
3285 .type
= CRYPTO_ALG_TYPE_AEAD
,
3288 .cra_name
= "rfc4543(gcm(aes))",
3289 .cra_driver_name
= "gmac-aes-esp-iproc",
3290 .cra_blocksize
= AES_BLOCK_SIZE
,
3291 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
3293 .setkey
= rfc4543_gcm_esp_setkey
,
3294 .ivsize
= GCM_RFC4106_IV_SIZE
,
3295 .maxauthsize
= AES_BLOCK_SIZE
,
3298 .alg
= CIPHER_ALG_AES
,
3299 .mode
= CIPHER_MODE_GCM
,
3302 .alg
= HASH_ALG_AES
,
3303 .mode
= HASH_MODE_GCM
,
3308 .type
= CRYPTO_ALG_TYPE_AEAD
,
3311 .cra_name
= "authenc(hmac(md5),cbc(aes))",
3312 .cra_driver_name
= "authenc-hmac-md5-cbc-aes-iproc",
3313 .cra_blocksize
= AES_BLOCK_SIZE
,
3314 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3316 .setkey
= aead_authenc_setkey
,
3317 .ivsize
= AES_BLOCK_SIZE
,
3318 .maxauthsize
= MD5_DIGEST_SIZE
,
3321 .alg
= CIPHER_ALG_AES
,
3322 .mode
= CIPHER_MODE_CBC
,
3325 .alg
= HASH_ALG_MD5
,
3326 .mode
= HASH_MODE_HMAC
,
3331 .type
= CRYPTO_ALG_TYPE_AEAD
,
3334 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
3335 .cra_driver_name
= "authenc-hmac-sha1-cbc-aes-iproc",
3336 .cra_blocksize
= AES_BLOCK_SIZE
,
3337 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3339 .setkey
= aead_authenc_setkey
,
3340 .ivsize
= AES_BLOCK_SIZE
,
3341 .maxauthsize
= SHA1_DIGEST_SIZE
,
3344 .alg
= CIPHER_ALG_AES
,
3345 .mode
= CIPHER_MODE_CBC
,
3348 .alg
= HASH_ALG_SHA1
,
3349 .mode
= HASH_MODE_HMAC
,
3354 .type
= CRYPTO_ALG_TYPE_AEAD
,
3357 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
3358 .cra_driver_name
= "authenc-hmac-sha256-cbc-aes-iproc",
3359 .cra_blocksize
= AES_BLOCK_SIZE
,
3360 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3362 .setkey
= aead_authenc_setkey
,
3363 .ivsize
= AES_BLOCK_SIZE
,
3364 .maxauthsize
= SHA256_DIGEST_SIZE
,
3367 .alg
= CIPHER_ALG_AES
,
3368 .mode
= CIPHER_MODE_CBC
,
3371 .alg
= HASH_ALG_SHA256
,
3372 .mode
= HASH_MODE_HMAC
,
3377 .type
= CRYPTO_ALG_TYPE_AEAD
,
3380 .cra_name
= "authenc(hmac(md5),cbc(des))",
3381 .cra_driver_name
= "authenc-hmac-md5-cbc-des-iproc",
3382 .cra_blocksize
= DES_BLOCK_SIZE
,
3383 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3385 .setkey
= aead_authenc_setkey
,
3386 .ivsize
= DES_BLOCK_SIZE
,
3387 .maxauthsize
= MD5_DIGEST_SIZE
,
3390 .alg
= CIPHER_ALG_DES
,
3391 .mode
= CIPHER_MODE_CBC
,
3394 .alg
= HASH_ALG_MD5
,
3395 .mode
= HASH_MODE_HMAC
,
3400 .type
= CRYPTO_ALG_TYPE_AEAD
,
3403 .cra_name
= "authenc(hmac(sha1),cbc(des))",
3404 .cra_driver_name
= "authenc-hmac-sha1-cbc-des-iproc",
3405 .cra_blocksize
= DES_BLOCK_SIZE
,
3406 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3408 .setkey
= aead_authenc_setkey
,
3409 .ivsize
= DES_BLOCK_SIZE
,
3410 .maxauthsize
= SHA1_DIGEST_SIZE
,
3413 .alg
= CIPHER_ALG_DES
,
3414 .mode
= CIPHER_MODE_CBC
,
3417 .alg
= HASH_ALG_SHA1
,
3418 .mode
= HASH_MODE_HMAC
,
3423 .type
= CRYPTO_ALG_TYPE_AEAD
,
3426 .cra_name
= "authenc(hmac(sha224),cbc(des))",
3427 .cra_driver_name
= "authenc-hmac-sha224-cbc-des-iproc",
3428 .cra_blocksize
= DES_BLOCK_SIZE
,
3429 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3431 .setkey
= aead_authenc_setkey
,
3432 .ivsize
= DES_BLOCK_SIZE
,
3433 .maxauthsize
= SHA224_DIGEST_SIZE
,
3436 .alg
= CIPHER_ALG_DES
,
3437 .mode
= CIPHER_MODE_CBC
,
3440 .alg
= HASH_ALG_SHA224
,
3441 .mode
= HASH_MODE_HMAC
,
3446 .type
= CRYPTO_ALG_TYPE_AEAD
,
3449 .cra_name
= "authenc(hmac(sha256),cbc(des))",
3450 .cra_driver_name
= "authenc-hmac-sha256-cbc-des-iproc",
3451 .cra_blocksize
= DES_BLOCK_SIZE
,
3452 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3454 .setkey
= aead_authenc_setkey
,
3455 .ivsize
= DES_BLOCK_SIZE
,
3456 .maxauthsize
= SHA256_DIGEST_SIZE
,
3459 .alg
= CIPHER_ALG_DES
,
3460 .mode
= CIPHER_MODE_CBC
,
3463 .alg
= HASH_ALG_SHA256
,
3464 .mode
= HASH_MODE_HMAC
,
3469 .type
= CRYPTO_ALG_TYPE_AEAD
,
3472 .cra_name
= "authenc(hmac(sha384),cbc(des))",
3473 .cra_driver_name
= "authenc-hmac-sha384-cbc-des-iproc",
3474 .cra_blocksize
= DES_BLOCK_SIZE
,
3475 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3477 .setkey
= aead_authenc_setkey
,
3478 .ivsize
= DES_BLOCK_SIZE
,
3479 .maxauthsize
= SHA384_DIGEST_SIZE
,
3482 .alg
= CIPHER_ALG_DES
,
3483 .mode
= CIPHER_MODE_CBC
,
3486 .alg
= HASH_ALG_SHA384
,
3487 .mode
= HASH_MODE_HMAC
,
3492 .type
= CRYPTO_ALG_TYPE_AEAD
,
3495 .cra_name
= "authenc(hmac(sha512),cbc(des))",
3496 .cra_driver_name
= "authenc-hmac-sha512-cbc-des-iproc",
3497 .cra_blocksize
= DES_BLOCK_SIZE
,
3498 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3500 .setkey
= aead_authenc_setkey
,
3501 .ivsize
= DES_BLOCK_SIZE
,
3502 .maxauthsize
= SHA512_DIGEST_SIZE
,
3505 .alg
= CIPHER_ALG_DES
,
3506 .mode
= CIPHER_MODE_CBC
,
3509 .alg
= HASH_ALG_SHA512
,
3510 .mode
= HASH_MODE_HMAC
,
3515 .type
= CRYPTO_ALG_TYPE_AEAD
,
3518 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
3519 .cra_driver_name
= "authenc-hmac-md5-cbc-des3-iproc",
3520 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3521 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3523 .setkey
= aead_authenc_setkey
,
3524 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3525 .maxauthsize
= MD5_DIGEST_SIZE
,
3528 .alg
= CIPHER_ALG_3DES
,
3529 .mode
= CIPHER_MODE_CBC
,
3532 .alg
= HASH_ALG_MD5
,
3533 .mode
= HASH_MODE_HMAC
,
3538 .type
= CRYPTO_ALG_TYPE_AEAD
,
3541 .cra_name
= "authenc(hmac(sha1),cbc(des3_ede))",
3542 .cra_driver_name
= "authenc-hmac-sha1-cbc-des3-iproc",
3543 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3544 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3546 .setkey
= aead_authenc_setkey
,
3547 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3548 .maxauthsize
= SHA1_DIGEST_SIZE
,
3551 .alg
= CIPHER_ALG_3DES
,
3552 .mode
= CIPHER_MODE_CBC
,
3555 .alg
= HASH_ALG_SHA1
,
3556 .mode
= HASH_MODE_HMAC
,
3561 .type
= CRYPTO_ALG_TYPE_AEAD
,
3564 .cra_name
= "authenc(hmac(sha224),cbc(des3_ede))",
3565 .cra_driver_name
= "authenc-hmac-sha224-cbc-des3-iproc",
3566 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3567 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3569 .setkey
= aead_authenc_setkey
,
3570 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3571 .maxauthsize
= SHA224_DIGEST_SIZE
,
3574 .alg
= CIPHER_ALG_3DES
,
3575 .mode
= CIPHER_MODE_CBC
,
3578 .alg
= HASH_ALG_SHA224
,
3579 .mode
= HASH_MODE_HMAC
,
3584 .type
= CRYPTO_ALG_TYPE_AEAD
,
3587 .cra_name
= "authenc(hmac(sha256),cbc(des3_ede))",
3588 .cra_driver_name
= "authenc-hmac-sha256-cbc-des3-iproc",
3589 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3590 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3592 .setkey
= aead_authenc_setkey
,
3593 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3594 .maxauthsize
= SHA256_DIGEST_SIZE
,
3597 .alg
= CIPHER_ALG_3DES
,
3598 .mode
= CIPHER_MODE_CBC
,
3601 .alg
= HASH_ALG_SHA256
,
3602 .mode
= HASH_MODE_HMAC
,
3607 .type
= CRYPTO_ALG_TYPE_AEAD
,
3610 .cra_name
= "authenc(hmac(sha384),cbc(des3_ede))",
3611 .cra_driver_name
= "authenc-hmac-sha384-cbc-des3-iproc",
3612 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3613 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3615 .setkey
= aead_authenc_setkey
,
3616 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3617 .maxauthsize
= SHA384_DIGEST_SIZE
,
3620 .alg
= CIPHER_ALG_3DES
,
3621 .mode
= CIPHER_MODE_CBC
,
3624 .alg
= HASH_ALG_SHA384
,
3625 .mode
= HASH_MODE_HMAC
,
3630 .type
= CRYPTO_ALG_TYPE_AEAD
,
3633 .cra_name
= "authenc(hmac(sha512),cbc(des3_ede))",
3634 .cra_driver_name
= "authenc-hmac-sha512-cbc-des3-iproc",
3635 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3636 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
| CRYPTO_ALG_ASYNC
3638 .setkey
= aead_authenc_setkey
,
3639 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3640 .maxauthsize
= SHA512_DIGEST_SIZE
,
3643 .alg
= CIPHER_ALG_3DES
,
3644 .mode
= CIPHER_MODE_CBC
,
3647 .alg
= HASH_ALG_SHA512
,
3648 .mode
= HASH_MODE_HMAC
,
3653 /* ABLKCIPHER algorithms. */
3655 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3657 .cra_name
= "ecb(arc4)",
3658 .cra_driver_name
= "ecb-arc4-iproc",
3659 .cra_blocksize
= ARC4_BLOCK_SIZE
,
3661 .min_keysize
= ARC4_MIN_KEY_SIZE
,
3662 .max_keysize
= ARC4_MAX_KEY_SIZE
,
3667 .alg
= CIPHER_ALG_RC4
,
3668 .mode
= CIPHER_MODE_NONE
,
3671 .alg
= HASH_ALG_NONE
,
3672 .mode
= HASH_MODE_NONE
,
3676 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3678 .cra_name
= "ofb(des)",
3679 .cra_driver_name
= "ofb-des-iproc",
3680 .cra_blocksize
= DES_BLOCK_SIZE
,
3682 .min_keysize
= DES_KEY_SIZE
,
3683 .max_keysize
= DES_KEY_SIZE
,
3684 .ivsize
= DES_BLOCK_SIZE
,
3688 .alg
= CIPHER_ALG_DES
,
3689 .mode
= CIPHER_MODE_OFB
,
3692 .alg
= HASH_ALG_NONE
,
3693 .mode
= HASH_MODE_NONE
,
3697 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3699 .cra_name
= "cbc(des)",
3700 .cra_driver_name
= "cbc-des-iproc",
3701 .cra_blocksize
= DES_BLOCK_SIZE
,
3703 .min_keysize
= DES_KEY_SIZE
,
3704 .max_keysize
= DES_KEY_SIZE
,
3705 .ivsize
= DES_BLOCK_SIZE
,
3709 .alg
= CIPHER_ALG_DES
,
3710 .mode
= CIPHER_MODE_CBC
,
3713 .alg
= HASH_ALG_NONE
,
3714 .mode
= HASH_MODE_NONE
,
3718 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3720 .cra_name
= "ecb(des)",
3721 .cra_driver_name
= "ecb-des-iproc",
3722 .cra_blocksize
= DES_BLOCK_SIZE
,
3724 .min_keysize
= DES_KEY_SIZE
,
3725 .max_keysize
= DES_KEY_SIZE
,
3730 .alg
= CIPHER_ALG_DES
,
3731 .mode
= CIPHER_MODE_ECB
,
3734 .alg
= HASH_ALG_NONE
,
3735 .mode
= HASH_MODE_NONE
,
3739 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3741 .cra_name
= "ofb(des3_ede)",
3742 .cra_driver_name
= "ofb-des3-iproc",
3743 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3745 .min_keysize
= DES3_EDE_KEY_SIZE
,
3746 .max_keysize
= DES3_EDE_KEY_SIZE
,
3747 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3751 .alg
= CIPHER_ALG_3DES
,
3752 .mode
= CIPHER_MODE_OFB
,
3755 .alg
= HASH_ALG_NONE
,
3756 .mode
= HASH_MODE_NONE
,
3760 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3762 .cra_name
= "cbc(des3_ede)",
3763 .cra_driver_name
= "cbc-des3-iproc",
3764 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3766 .min_keysize
= DES3_EDE_KEY_SIZE
,
3767 .max_keysize
= DES3_EDE_KEY_SIZE
,
3768 .ivsize
= DES3_EDE_BLOCK_SIZE
,
3772 .alg
= CIPHER_ALG_3DES
,
3773 .mode
= CIPHER_MODE_CBC
,
3776 .alg
= HASH_ALG_NONE
,
3777 .mode
= HASH_MODE_NONE
,
3781 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3783 .cra_name
= "ecb(des3_ede)",
3784 .cra_driver_name
= "ecb-des3-iproc",
3785 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
3787 .min_keysize
= DES3_EDE_KEY_SIZE
,
3788 .max_keysize
= DES3_EDE_KEY_SIZE
,
3793 .alg
= CIPHER_ALG_3DES
,
3794 .mode
= CIPHER_MODE_ECB
,
3797 .alg
= HASH_ALG_NONE
,
3798 .mode
= HASH_MODE_NONE
,
3802 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3804 .cra_name
= "ofb(aes)",
3805 .cra_driver_name
= "ofb-aes-iproc",
3806 .cra_blocksize
= AES_BLOCK_SIZE
,
3808 .min_keysize
= AES_MIN_KEY_SIZE
,
3809 .max_keysize
= AES_MAX_KEY_SIZE
,
3810 .ivsize
= AES_BLOCK_SIZE
,
3814 .alg
= CIPHER_ALG_AES
,
3815 .mode
= CIPHER_MODE_OFB
,
3818 .alg
= HASH_ALG_NONE
,
3819 .mode
= HASH_MODE_NONE
,
3823 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3825 .cra_name
= "cbc(aes)",
3826 .cra_driver_name
= "cbc-aes-iproc",
3827 .cra_blocksize
= AES_BLOCK_SIZE
,
3829 .min_keysize
= AES_MIN_KEY_SIZE
,
3830 .max_keysize
= AES_MAX_KEY_SIZE
,
3831 .ivsize
= AES_BLOCK_SIZE
,
3835 .alg
= CIPHER_ALG_AES
,
3836 .mode
= CIPHER_MODE_CBC
,
3839 .alg
= HASH_ALG_NONE
,
3840 .mode
= HASH_MODE_NONE
,
3844 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3846 .cra_name
= "ecb(aes)",
3847 .cra_driver_name
= "ecb-aes-iproc",
3848 .cra_blocksize
= AES_BLOCK_SIZE
,
3850 .min_keysize
= AES_MIN_KEY_SIZE
,
3851 .max_keysize
= AES_MAX_KEY_SIZE
,
3856 .alg
= CIPHER_ALG_AES
,
3857 .mode
= CIPHER_MODE_ECB
,
3860 .alg
= HASH_ALG_NONE
,
3861 .mode
= HASH_MODE_NONE
,
3865 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3867 .cra_name
= "ctr(aes)",
3868 .cra_driver_name
= "ctr-aes-iproc",
3869 .cra_blocksize
= AES_BLOCK_SIZE
,
3871 /* .geniv = "chainiv", */
3872 .min_keysize
= AES_MIN_KEY_SIZE
,
3873 .max_keysize
= AES_MAX_KEY_SIZE
,
3874 .ivsize
= AES_BLOCK_SIZE
,
3878 .alg
= CIPHER_ALG_AES
,
3879 .mode
= CIPHER_MODE_CTR
,
3882 .alg
= HASH_ALG_NONE
,
3883 .mode
= HASH_MODE_NONE
,
3887 .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
3889 .cra_name
= "xts(aes)",
3890 .cra_driver_name
= "xts-aes-iproc",
3891 .cra_blocksize
= AES_BLOCK_SIZE
,
3893 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
3894 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
3895 .ivsize
= AES_BLOCK_SIZE
,
3899 .alg
= CIPHER_ALG_AES
,
3900 .mode
= CIPHER_MODE_XTS
,
3903 .alg
= HASH_ALG_NONE
,
3904 .mode
= HASH_MODE_NONE
,
3908 /* AHASH algorithms. */
3910 .type
= CRYPTO_ALG_TYPE_AHASH
,
3912 .halg
.digestsize
= MD5_DIGEST_SIZE
,
3915 .cra_driver_name
= "md5-iproc",
3916 .cra_blocksize
= MD5_BLOCK_WORDS
* 4,
3917 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
3922 .alg
= CIPHER_ALG_NONE
,
3923 .mode
= CIPHER_MODE_NONE
,
3926 .alg
= HASH_ALG_MD5
,
3927 .mode
= HASH_MODE_HASH
,
3931 .type
= CRYPTO_ALG_TYPE_AHASH
,
3933 .halg
.digestsize
= MD5_DIGEST_SIZE
,
3935 .cra_name
= "hmac(md5)",
3936 .cra_driver_name
= "hmac-md5-iproc",
3937 .cra_blocksize
= MD5_BLOCK_WORDS
* 4,
3941 .alg
= CIPHER_ALG_NONE
,
3942 .mode
= CIPHER_MODE_NONE
,
3945 .alg
= HASH_ALG_MD5
,
3946 .mode
= HASH_MODE_HMAC
,
3949 {.type
= CRYPTO_ALG_TYPE_AHASH
,
3951 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3954 .cra_driver_name
= "sha1-iproc",
3955 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3959 .alg
= CIPHER_ALG_NONE
,
3960 .mode
= CIPHER_MODE_NONE
,
3963 .alg
= HASH_ALG_SHA1
,
3964 .mode
= HASH_MODE_HASH
,
3967 {.type
= CRYPTO_ALG_TYPE_AHASH
,
3969 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
3971 .cra_name
= "hmac(sha1)",
3972 .cra_driver_name
= "hmac-sha1-iproc",
3973 .cra_blocksize
= SHA1_BLOCK_SIZE
,
3977 .alg
= CIPHER_ALG_NONE
,
3978 .mode
= CIPHER_MODE_NONE
,
3981 .alg
= HASH_ALG_SHA1
,
3982 .mode
= HASH_MODE_HMAC
,
3985 {.type
= CRYPTO_ALG_TYPE_AHASH
,
3987 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
3989 .cra_name
= "sha224",
3990 .cra_driver_name
= "sha224-iproc",
3991 .cra_blocksize
= SHA224_BLOCK_SIZE
,
3995 .alg
= CIPHER_ALG_NONE
,
3996 .mode
= CIPHER_MODE_NONE
,
3999 .alg
= HASH_ALG_SHA224
,
4000 .mode
= HASH_MODE_HASH
,
4003 {.type
= CRYPTO_ALG_TYPE_AHASH
,
4005 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
4007 .cra_name
= "hmac(sha224)",
4008 .cra_driver_name
= "hmac-sha224-iproc",
4009 .cra_blocksize
= SHA224_BLOCK_SIZE
,
4013 .alg
= CIPHER_ALG_NONE
,
4014 .mode
= CIPHER_MODE_NONE
,
4017 .alg
= HASH_ALG_SHA224
,
4018 .mode
= HASH_MODE_HMAC
,
4021 {.type
= CRYPTO_ALG_TYPE_AHASH
,
4023 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
4025 .cra_name
= "sha256",
4026 .cra_driver_name
= "sha256-iproc",
4027 .cra_blocksize
= SHA256_BLOCK_SIZE
,
4031 .alg
= CIPHER_ALG_NONE
,
4032 .mode
= CIPHER_MODE_NONE
,
4035 .alg
= HASH_ALG_SHA256
,
4036 .mode
= HASH_MODE_HASH
,
4039 {.type
= CRYPTO_ALG_TYPE_AHASH
,
4041 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
4043 .cra_name
= "hmac(sha256)",
4044 .cra_driver_name
= "hmac-sha256-iproc",
4045 .cra_blocksize
= SHA256_BLOCK_SIZE
,
4049 .alg
= CIPHER_ALG_NONE
,
4050 .mode
= CIPHER_MODE_NONE
,
4053 .alg
= HASH_ALG_SHA256
,
4054 .mode
= HASH_MODE_HMAC
,
4058 .type
= CRYPTO_ALG_TYPE_AHASH
,
4060 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
4062 .cra_name
= "sha384",
4063 .cra_driver_name
= "sha384-iproc",
4064 .cra_blocksize
= SHA384_BLOCK_SIZE
,
4068 .alg
= CIPHER_ALG_NONE
,
4069 .mode
= CIPHER_MODE_NONE
,
4072 .alg
= HASH_ALG_SHA384
,
4073 .mode
= HASH_MODE_HASH
,
4077 .type
= CRYPTO_ALG_TYPE_AHASH
,
4079 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
4081 .cra_name
= "hmac(sha384)",
4082 .cra_driver_name
= "hmac-sha384-iproc",
4083 .cra_blocksize
= SHA384_BLOCK_SIZE
,
4087 .alg
= CIPHER_ALG_NONE
,
4088 .mode
= CIPHER_MODE_NONE
,
4091 .alg
= HASH_ALG_SHA384
,
4092 .mode
= HASH_MODE_HMAC
,
4096 .type
= CRYPTO_ALG_TYPE_AHASH
,
4098 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
4100 .cra_name
= "sha512",
4101 .cra_driver_name
= "sha512-iproc",
4102 .cra_blocksize
= SHA512_BLOCK_SIZE
,
4106 .alg
= CIPHER_ALG_NONE
,
4107 .mode
= CIPHER_MODE_NONE
,
4110 .alg
= HASH_ALG_SHA512
,
4111 .mode
= HASH_MODE_HASH
,
4115 .type
= CRYPTO_ALG_TYPE_AHASH
,
4117 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
4119 .cra_name
= "hmac(sha512)",
4120 .cra_driver_name
= "hmac-sha512-iproc",
4121 .cra_blocksize
= SHA512_BLOCK_SIZE
,
4125 .alg
= CIPHER_ALG_NONE
,
4126 .mode
= CIPHER_MODE_NONE
,
4129 .alg
= HASH_ALG_SHA512
,
4130 .mode
= HASH_MODE_HMAC
,
4134 .type
= CRYPTO_ALG_TYPE_AHASH
,
4136 .halg
.digestsize
= SHA3_224_DIGEST_SIZE
,
4138 .cra_name
= "sha3-224",
4139 .cra_driver_name
= "sha3-224-iproc",
4140 .cra_blocksize
= SHA3_224_BLOCK_SIZE
,
4144 .alg
= CIPHER_ALG_NONE
,
4145 .mode
= CIPHER_MODE_NONE
,
4148 .alg
= HASH_ALG_SHA3_224
,
4149 .mode
= HASH_MODE_HASH
,
4153 .type
= CRYPTO_ALG_TYPE_AHASH
,
4155 .halg
.digestsize
= SHA3_224_DIGEST_SIZE
,
4157 .cra_name
= "hmac(sha3-224)",
4158 .cra_driver_name
= "hmac-sha3-224-iproc",
4159 .cra_blocksize
= SHA3_224_BLOCK_SIZE
,
4163 .alg
= CIPHER_ALG_NONE
,
4164 .mode
= CIPHER_MODE_NONE
,
4167 .alg
= HASH_ALG_SHA3_224
,
4168 .mode
= HASH_MODE_HMAC
4172 .type
= CRYPTO_ALG_TYPE_AHASH
,
4174 .halg
.digestsize
= SHA3_256_DIGEST_SIZE
,
4176 .cra_name
= "sha3-256",
4177 .cra_driver_name
= "sha3-256-iproc",
4178 .cra_blocksize
= SHA3_256_BLOCK_SIZE
,
4182 .alg
= CIPHER_ALG_NONE
,
4183 .mode
= CIPHER_MODE_NONE
,
4186 .alg
= HASH_ALG_SHA3_256
,
4187 .mode
= HASH_MODE_HASH
,
4191 .type
= CRYPTO_ALG_TYPE_AHASH
,
4193 .halg
.digestsize
= SHA3_256_DIGEST_SIZE
,
4195 .cra_name
= "hmac(sha3-256)",
4196 .cra_driver_name
= "hmac-sha3-256-iproc",
4197 .cra_blocksize
= SHA3_256_BLOCK_SIZE
,
4201 .alg
= CIPHER_ALG_NONE
,
4202 .mode
= CIPHER_MODE_NONE
,
4205 .alg
= HASH_ALG_SHA3_256
,
4206 .mode
= HASH_MODE_HMAC
,
4210 .type
= CRYPTO_ALG_TYPE_AHASH
,
4212 .halg
.digestsize
= SHA3_384_DIGEST_SIZE
,
4214 .cra_name
= "sha3-384",
4215 .cra_driver_name
= "sha3-384-iproc",
4216 .cra_blocksize
= SHA3_224_BLOCK_SIZE
,
4220 .alg
= CIPHER_ALG_NONE
,
4221 .mode
= CIPHER_MODE_NONE
,
4224 .alg
= HASH_ALG_SHA3_384
,
4225 .mode
= HASH_MODE_HASH
,
4229 .type
= CRYPTO_ALG_TYPE_AHASH
,
4231 .halg
.digestsize
= SHA3_384_DIGEST_SIZE
,
4233 .cra_name
= "hmac(sha3-384)",
4234 .cra_driver_name
= "hmac-sha3-384-iproc",
4235 .cra_blocksize
= SHA3_384_BLOCK_SIZE
,
4239 .alg
= CIPHER_ALG_NONE
,
4240 .mode
= CIPHER_MODE_NONE
,
4243 .alg
= HASH_ALG_SHA3_384
,
4244 .mode
= HASH_MODE_HMAC
,
4248 .type
= CRYPTO_ALG_TYPE_AHASH
,
4250 .halg
.digestsize
= SHA3_512_DIGEST_SIZE
,
4252 .cra_name
= "sha3-512",
4253 .cra_driver_name
= "sha3-512-iproc",
4254 .cra_blocksize
= SHA3_512_BLOCK_SIZE
,
4258 .alg
= CIPHER_ALG_NONE
,
4259 .mode
= CIPHER_MODE_NONE
,
4262 .alg
= HASH_ALG_SHA3_512
,
4263 .mode
= HASH_MODE_HASH
,
4267 .type
= CRYPTO_ALG_TYPE_AHASH
,
4269 .halg
.digestsize
= SHA3_512_DIGEST_SIZE
,
4271 .cra_name
= "hmac(sha3-512)",
4272 .cra_driver_name
= "hmac-sha3-512-iproc",
4273 .cra_blocksize
= SHA3_512_BLOCK_SIZE
,
4277 .alg
= CIPHER_ALG_NONE
,
4278 .mode
= CIPHER_MODE_NONE
,
4281 .alg
= HASH_ALG_SHA3_512
,
4282 .mode
= HASH_MODE_HMAC
,
4286 .type
= CRYPTO_ALG_TYPE_AHASH
,
4288 .halg
.digestsize
= AES_BLOCK_SIZE
,
4290 .cra_name
= "xcbc(aes)",
4291 .cra_driver_name
= "xcbc-aes-iproc",
4292 .cra_blocksize
= AES_BLOCK_SIZE
,
4296 .alg
= CIPHER_ALG_NONE
,
4297 .mode
= CIPHER_MODE_NONE
,
4300 .alg
= HASH_ALG_AES
,
4301 .mode
= HASH_MODE_XCBC
,
4305 .type
= CRYPTO_ALG_TYPE_AHASH
,
4307 .halg
.digestsize
= AES_BLOCK_SIZE
,
4309 .cra_name
= "cmac(aes)",
4310 .cra_driver_name
= "cmac-aes-iproc",
4311 .cra_blocksize
= AES_BLOCK_SIZE
,
4315 .alg
= CIPHER_ALG_NONE
,
4316 .mode
= CIPHER_MODE_NONE
,
4319 .alg
= HASH_ALG_AES
,
4320 .mode
= HASH_MODE_CMAC
,
4325 static int generic_cra_init(struct crypto_tfm
*tfm
,
4326 struct iproc_alg_s
*cipher_alg
)
4328 struct spu_hw
*spu
= &iproc_priv
.spu
;
4329 struct iproc_ctx_s
*ctx
= crypto_tfm_ctx(tfm
);
4330 unsigned int blocksize
= crypto_tfm_alg_blocksize(tfm
);
4332 flow_log("%s()\n", __func__
);
4334 ctx
->alg
= cipher_alg
;
4335 ctx
->cipher
= cipher_alg
->cipher_info
;
4336 ctx
->auth
= cipher_alg
->auth_info
;
4337 ctx
->auth_first
= cipher_alg
->auth_first
;
4338 ctx
->max_payload
= spu
->spu_ctx_max_payload(ctx
->cipher
.alg
,
4341 ctx
->fallback_cipher
= NULL
;
4344 ctx
->authkeylen
= 0;
4346 atomic_inc(&iproc_priv
.stream_count
);
4347 atomic_inc(&iproc_priv
.session_count
);
4352 static int ablkcipher_cra_init(struct crypto_tfm
*tfm
)
4354 struct crypto_alg
*alg
= tfm
->__crt_alg
;
4355 struct iproc_alg_s
*cipher_alg
;
4357 flow_log("%s()\n", __func__
);
4359 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct iproc_reqctx_s
);
4361 cipher_alg
= container_of(alg
, struct iproc_alg_s
, alg
.crypto
);
4362 return generic_cra_init(tfm
, cipher_alg
);
4365 static int ahash_cra_init(struct crypto_tfm
*tfm
)
4368 struct crypto_alg
*alg
= tfm
->__crt_alg
;
4369 struct iproc_alg_s
*cipher_alg
;
4371 cipher_alg
= container_of(__crypto_ahash_alg(alg
), struct iproc_alg_s
,
4374 err
= generic_cra_init(tfm
, cipher_alg
);
4375 flow_log("%s()\n", __func__
);
4378 * export state size has to be < 512 bytes. So don't include msg bufs
4381 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
4382 sizeof(struct iproc_reqctx_s
));
4387 static int aead_cra_init(struct crypto_aead
*aead
)
4389 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead
);
4390 struct iproc_ctx_s
*ctx
= crypto_tfm_ctx(tfm
);
4391 struct crypto_alg
*alg
= tfm
->__crt_alg
;
4392 struct aead_alg
*aalg
= container_of(alg
, struct aead_alg
, base
);
4393 struct iproc_alg_s
*cipher_alg
= container_of(aalg
, struct iproc_alg_s
,
4396 int err
= generic_cra_init(tfm
, cipher_alg
);
4398 flow_log("%s()\n", __func__
);
4400 crypto_aead_set_reqsize(aead
, sizeof(struct iproc_reqctx_s
));
4401 ctx
->is_esp
= false;
4403 ctx
->salt_offset
= 0;
4405 /* random first IV */
4406 get_random_bytes(ctx
->iv
, MAX_IV_SIZE
);
4407 flow_dump(" iv: ", ctx
->iv
, MAX_IV_SIZE
);
4410 if (alg
->cra_flags
& CRYPTO_ALG_NEED_FALLBACK
) {
4411 flow_log("%s() creating fallback cipher\n", __func__
);
4413 ctx
->fallback_cipher
=
4414 crypto_alloc_aead(alg
->cra_name
, 0,
4416 CRYPTO_ALG_NEED_FALLBACK
);
4417 if (IS_ERR(ctx
->fallback_cipher
)) {
4418 pr_err("%s() Error: failed to allocate fallback for %s\n",
4419 __func__
, alg
->cra_name
);
4420 return PTR_ERR(ctx
->fallback_cipher
);
4428 static void generic_cra_exit(struct crypto_tfm
*tfm
)
4430 atomic_dec(&iproc_priv
.session_count
);
4433 static void aead_cra_exit(struct crypto_aead
*aead
)
4435 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead
);
4436 struct iproc_ctx_s
*ctx
= crypto_tfm_ctx(tfm
);
4438 generic_cra_exit(tfm
);
4440 if (ctx
->fallback_cipher
) {
4441 crypto_free_aead(ctx
->fallback_cipher
);
4442 ctx
->fallback_cipher
= NULL
;
4447 * spu_functions_register() - Specify hardware-specific SPU functions based on
4448 * SPU type read from device tree.
4449 * @dev: device structure
4450 * @spu_type: SPU hardware generation
4451 * @spu_subtype: SPU hardware version
4453 static void spu_functions_register(struct device
*dev
,
4454 enum spu_spu_type spu_type
,
4455 enum spu_spu_subtype spu_subtype
)
4457 struct spu_hw
*spu
= &iproc_priv
.spu
;
4459 if (spu_type
== SPU_TYPE_SPUM
) {
4460 dev_dbg(dev
, "Registering SPUM functions");
4461 spu
->spu_dump_msg_hdr
= spum_dump_msg_hdr
;
4462 spu
->spu_payload_length
= spum_payload_length
;
4463 spu
->spu_response_hdr_len
= spum_response_hdr_len
;
4464 spu
->spu_hash_pad_len
= spum_hash_pad_len
;
4465 spu
->spu_gcm_ccm_pad_len
= spum_gcm_ccm_pad_len
;
4466 spu
->spu_assoc_resp_len
= spum_assoc_resp_len
;
4467 spu
->spu_aead_ivlen
= spum_aead_ivlen
;
4468 spu
->spu_hash_type
= spum_hash_type
;
4469 spu
->spu_digest_size
= spum_digest_size
;
4470 spu
->spu_create_request
= spum_create_request
;
4471 spu
->spu_cipher_req_init
= spum_cipher_req_init
;
4472 spu
->spu_cipher_req_finish
= spum_cipher_req_finish
;
4473 spu
->spu_request_pad
= spum_request_pad
;
4474 spu
->spu_tx_status_len
= spum_tx_status_len
;
4475 spu
->spu_rx_status_len
= spum_rx_status_len
;
4476 spu
->spu_status_process
= spum_status_process
;
4477 spu
->spu_xts_tweak_in_payload
= spum_xts_tweak_in_payload
;
4478 spu
->spu_ccm_update_iv
= spum_ccm_update_iv
;
4479 spu
->spu_wordalign_padlen
= spum_wordalign_padlen
;
4480 if (spu_subtype
== SPU_SUBTYPE_SPUM_NS2
)
4481 spu
->spu_ctx_max_payload
= spum_ns2_ctx_max_payload
;
4483 spu
->spu_ctx_max_payload
= spum_nsp_ctx_max_payload
;
4485 dev_dbg(dev
, "Registering SPU2 functions");
4486 spu
->spu_dump_msg_hdr
= spu2_dump_msg_hdr
;
4487 spu
->spu_ctx_max_payload
= spu2_ctx_max_payload
;
4488 spu
->spu_payload_length
= spu2_payload_length
;
4489 spu
->spu_response_hdr_len
= spu2_response_hdr_len
;
4490 spu
->spu_hash_pad_len
= spu2_hash_pad_len
;
4491 spu
->spu_gcm_ccm_pad_len
= spu2_gcm_ccm_pad_len
;
4492 spu
->spu_assoc_resp_len
= spu2_assoc_resp_len
;
4493 spu
->spu_aead_ivlen
= spu2_aead_ivlen
;
4494 spu
->spu_hash_type
= spu2_hash_type
;
4495 spu
->spu_digest_size
= spu2_digest_size
;
4496 spu
->spu_create_request
= spu2_create_request
;
4497 spu
->spu_cipher_req_init
= spu2_cipher_req_init
;
4498 spu
->spu_cipher_req_finish
= spu2_cipher_req_finish
;
4499 spu
->spu_request_pad
= spu2_request_pad
;
4500 spu
->spu_tx_status_len
= spu2_tx_status_len
;
4501 spu
->spu_rx_status_len
= spu2_rx_status_len
;
4502 spu
->spu_status_process
= spu2_status_process
;
4503 spu
->spu_xts_tweak_in_payload
= spu2_xts_tweak_in_payload
;
4504 spu
->spu_ccm_update_iv
= spu2_ccm_update_iv
;
4505 spu
->spu_wordalign_padlen
= spu2_wordalign_padlen
;
4510 * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4511 * channel for the SPU being probed.
4512 * @dev: SPU driver device structure
4514 * Return: 0 if successful
4517 static int spu_mb_init(struct device
*dev
)
4519 struct mbox_client
*mcl
= &iproc_priv
.mcl
;
4522 iproc_priv
.mbox
= devm_kcalloc(dev
, iproc_priv
.spu
.num_chan
,
4523 sizeof(struct mbox_chan
*), GFP_KERNEL
);
4524 if (!iproc_priv
.mbox
)
4528 mcl
->tx_block
= false;
4530 mcl
->knows_txdone
= true;
4531 mcl
->rx_callback
= spu_rx_callback
;
4532 mcl
->tx_done
= NULL
;
4534 for (i
= 0; i
< iproc_priv
.spu
.num_chan
; i
++) {
4535 iproc_priv
.mbox
[i
] = mbox_request_channel(mcl
, i
);
4536 if (IS_ERR(iproc_priv
.mbox
[i
])) {
4537 err
= (int)PTR_ERR(iproc_priv
.mbox
[i
]);
4539 "Mbox channel %d request failed with err %d",
4541 iproc_priv
.mbox
[i
] = NULL
;
4548 for (i
= 0; i
< iproc_priv
.spu
.num_chan
; i
++) {
4549 if (iproc_priv
.mbox
[i
])
4550 mbox_free_channel(iproc_priv
.mbox
[i
]);
4556 static void spu_mb_release(struct platform_device
*pdev
)
4560 for (i
= 0; i
< iproc_priv
.spu
.num_chan
; i
++)
4561 mbox_free_channel(iproc_priv
.mbox
[i
]);
4564 static void spu_counters_init(void)
4569 atomic_set(&iproc_priv
.session_count
, 0);
4570 atomic_set(&iproc_priv
.stream_count
, 0);
4571 atomic_set(&iproc_priv
.next_chan
, (int)iproc_priv
.spu
.num_chan
);
4572 atomic64_set(&iproc_priv
.bytes_in
, 0);
4573 atomic64_set(&iproc_priv
.bytes_out
, 0);
4574 for (i
= 0; i
< SPU_OP_NUM
; i
++) {
4575 atomic_set(&iproc_priv
.op_counts
[i
], 0);
4576 atomic_set(&iproc_priv
.setkey_cnt
[i
], 0);
4578 for (i
= 0; i
< CIPHER_ALG_LAST
; i
++)
4579 for (j
= 0; j
< CIPHER_MODE_LAST
; j
++)
4580 atomic_set(&iproc_priv
.cipher_cnt
[i
][j
], 0);
4582 for (i
= 0; i
< HASH_ALG_LAST
; i
++) {
4583 atomic_set(&iproc_priv
.hash_cnt
[i
], 0);
4584 atomic_set(&iproc_priv
.hmac_cnt
[i
], 0);
4586 for (i
= 0; i
< AEAD_TYPE_LAST
; i
++)
4587 atomic_set(&iproc_priv
.aead_cnt
[i
], 0);
4589 atomic_set(&iproc_priv
.mb_no_spc
, 0);
4590 atomic_set(&iproc_priv
.mb_send_fail
, 0);
4591 atomic_set(&iproc_priv
.bad_icv
, 0);
4594 static int spu_register_ablkcipher(struct iproc_alg_s
*driver_alg
)
4596 struct spu_hw
*spu
= &iproc_priv
.spu
;
4597 struct crypto_alg
*crypto
= &driver_alg
->alg
.crypto
;
4600 /* SPU2 does not support RC4 */
4601 if ((driver_alg
->cipher_info
.alg
== CIPHER_ALG_RC4
) &&
4602 (spu
->spu_type
== SPU_TYPE_SPU2
))
4605 crypto
->cra_module
= THIS_MODULE
;
4606 crypto
->cra_priority
= cipher_pri
;
4607 crypto
->cra_alignmask
= 0;
4608 crypto
->cra_ctxsize
= sizeof(struct iproc_ctx_s
);
4609 INIT_LIST_HEAD(&crypto
->cra_list
);
4611 crypto
->cra_init
= ablkcipher_cra_init
;
4612 crypto
->cra_exit
= generic_cra_exit
;
4613 crypto
->cra_type
= &crypto_ablkcipher_type
;
4614 crypto
->cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
|
4615 CRYPTO_ALG_KERN_DRIVER_ONLY
;
4617 crypto
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
4618 crypto
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
4619 crypto
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
4621 err
= crypto_register_alg(crypto
);
4622 /* Mark alg as having been registered, if successful */
4624 driver_alg
->registered
= true;
4625 pr_debug(" registered ablkcipher %s\n", crypto
->cra_driver_name
);
4629 static int spu_register_ahash(struct iproc_alg_s
*driver_alg
)
4631 struct spu_hw
*spu
= &iproc_priv
.spu
;
4632 struct ahash_alg
*hash
= &driver_alg
->alg
.hash
;
4635 /* AES-XCBC is the only AES hash type currently supported on SPU-M */
4636 if ((driver_alg
->auth_info
.alg
== HASH_ALG_AES
) &&
4637 (driver_alg
->auth_info
.mode
!= HASH_MODE_XCBC
) &&
4638 (spu
->spu_type
== SPU_TYPE_SPUM
))
4641 /* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4642 if ((driver_alg
->auth_info
.alg
>= HASH_ALG_SHA3_224
) &&
4643 (spu
->spu_subtype
!= SPU_SUBTYPE_SPU2_V2
))
4646 hash
->halg
.base
.cra_module
= THIS_MODULE
;
4647 hash
->halg
.base
.cra_priority
= hash_pri
;
4648 hash
->halg
.base
.cra_alignmask
= 0;
4649 hash
->halg
.base
.cra_ctxsize
= sizeof(struct iproc_ctx_s
);
4650 hash
->halg
.base
.cra_init
= ahash_cra_init
;
4651 hash
->halg
.base
.cra_exit
= generic_cra_exit
;
4652 hash
->halg
.base
.cra_type
= &crypto_ahash_type
;
4653 hash
->halg
.base
.cra_flags
= CRYPTO_ALG_TYPE_AHASH
| CRYPTO_ALG_ASYNC
;
4654 hash
->halg
.statesize
= sizeof(struct spu_hash_export_s
);
4656 if (driver_alg
->auth_info
.mode
!= HASH_MODE_HMAC
) {
4657 hash
->setkey
= ahash_setkey
;
4658 hash
->init
= ahash_init
;
4659 hash
->update
= ahash_update
;
4660 hash
->final
= ahash_final
;
4661 hash
->finup
= ahash_finup
;
4662 hash
->digest
= ahash_digest
;
4664 hash
->setkey
= ahash_hmac_setkey
;
4665 hash
->init
= ahash_hmac_init
;
4666 hash
->update
= ahash_hmac_update
;
4667 hash
->final
= ahash_hmac_final
;
4668 hash
->finup
= ahash_hmac_finup
;
4669 hash
->digest
= ahash_hmac_digest
;
4671 hash
->export
= ahash_export
;
4672 hash
->import
= ahash_import
;
4674 err
= crypto_register_ahash(hash
);
4675 /* Mark alg as having been registered, if successful */
4677 driver_alg
->registered
= true;
4678 pr_debug(" registered ahash %s\n",
4679 hash
->halg
.base
.cra_driver_name
);
4683 static int spu_register_aead(struct iproc_alg_s
*driver_alg
)
4685 struct aead_alg
*aead
= &driver_alg
->alg
.aead
;
4688 aead
->base
.cra_module
= THIS_MODULE
;
4689 aead
->base
.cra_priority
= aead_pri
;
4690 aead
->base
.cra_alignmask
= 0;
4691 aead
->base
.cra_ctxsize
= sizeof(struct iproc_ctx_s
);
4692 INIT_LIST_HEAD(&aead
->base
.cra_list
);
4694 aead
->base
.cra_flags
|= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
;
4695 /* setkey set in alg initialization */
4696 aead
->setauthsize
= aead_setauthsize
;
4697 aead
->encrypt
= aead_encrypt
;
4698 aead
->decrypt
= aead_decrypt
;
4699 aead
->init
= aead_cra_init
;
4700 aead
->exit
= aead_cra_exit
;
4702 err
= crypto_register_aead(aead
);
4703 /* Mark alg as having been registered, if successful */
4705 driver_alg
->registered
= true;
4706 pr_debug(" registered aead %s\n", aead
->base
.cra_driver_name
);
4710 /* register crypto algorithms the device supports */
4711 static int spu_algs_register(struct device
*dev
)
4716 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4717 switch (driver_algs
[i
].type
) {
4718 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4719 err
= spu_register_ablkcipher(&driver_algs
[i
]);
4721 case CRYPTO_ALG_TYPE_AHASH
:
4722 err
= spu_register_ahash(&driver_algs
[i
]);
4724 case CRYPTO_ALG_TYPE_AEAD
:
4725 err
= spu_register_aead(&driver_algs
[i
]);
4729 "iproc-crypto: unknown alg type: %d",
4730 driver_algs
[i
].type
);
4735 dev_err(dev
, "alg registration failed with error %d\n",
4744 for (j
= 0; j
< i
; j
++) {
4745 /* Skip any algorithm not registered */
4746 if (!driver_algs
[j
].registered
)
4748 switch (driver_algs
[j
].type
) {
4749 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4750 crypto_unregister_alg(&driver_algs
[j
].alg
.crypto
);
4751 driver_algs
[j
].registered
= false;
4753 case CRYPTO_ALG_TYPE_AHASH
:
4754 crypto_unregister_ahash(&driver_algs
[j
].alg
.hash
);
4755 driver_algs
[j
].registered
= false;
4757 case CRYPTO_ALG_TYPE_AEAD
:
4758 crypto_unregister_aead(&driver_algs
[j
].alg
.aead
);
4759 driver_algs
[j
].registered
= false;
4766 /* ==================== Kernel Platform API ==================== */
4768 static struct spu_type_subtype spum_ns2_types
= {
4769 SPU_TYPE_SPUM
, SPU_SUBTYPE_SPUM_NS2
4772 static struct spu_type_subtype spum_nsp_types
= {
4773 SPU_TYPE_SPUM
, SPU_SUBTYPE_SPUM_NSP
4776 static struct spu_type_subtype spu2_types
= {
4777 SPU_TYPE_SPU2
, SPU_SUBTYPE_SPU2_V1
4780 static struct spu_type_subtype spu2_v2_types
= {
4781 SPU_TYPE_SPU2
, SPU_SUBTYPE_SPU2_V2
4784 static const struct of_device_id bcm_spu_dt_ids
[] = {
4786 .compatible
= "brcm,spum-crypto",
4787 .data
= &spum_ns2_types
,
4790 .compatible
= "brcm,spum-nsp-crypto",
4791 .data
= &spum_nsp_types
,
4794 .compatible
= "brcm,spu2-crypto",
4795 .data
= &spu2_types
,
4798 .compatible
= "brcm,spu2-v2-crypto",
4799 .data
= &spu2_v2_types
,
4804 MODULE_DEVICE_TABLE(of
, bcm_spu_dt_ids
);
4806 static int spu_dt_read(struct platform_device
*pdev
)
4808 struct device
*dev
= &pdev
->dev
;
4809 struct spu_hw
*spu
= &iproc_priv
.spu
;
4810 struct resource
*spu_ctrl_regs
;
4811 const struct spu_type_subtype
*matched_spu_type
;
4812 struct device_node
*dn
= pdev
->dev
.of_node
;
4815 /* Count number of mailbox channels */
4816 spu
->num_chan
= of_count_phandle_with_args(dn
, "mboxes", "#mbox-cells");
4818 matched_spu_type
= of_device_get_match_data(dev
);
4819 if (!matched_spu_type
) {
4820 dev_err(&pdev
->dev
, "Failed to match device\n");
4824 spu
->spu_type
= matched_spu_type
->type
;
4825 spu
->spu_subtype
= matched_spu_type
->subtype
;
4828 for (i
= 0; (i
< MAX_SPUS
) && ((spu_ctrl_regs
=
4829 platform_get_resource(pdev
, IORESOURCE_MEM
, i
)) != NULL
); i
++) {
4831 spu
->reg_vbase
[i
] = devm_ioremap_resource(dev
, spu_ctrl_regs
);
4832 if (IS_ERR(spu
->reg_vbase
[i
])) {
4833 err
= PTR_ERR(spu
->reg_vbase
[i
]);
4834 dev_err(&pdev
->dev
, "Failed to map registers: %d\n",
4836 spu
->reg_vbase
[i
] = NULL
;
4841 dev_dbg(dev
, "Device has %d SPUs", spu
->num_spu
);
4846 int bcm_spu_probe(struct platform_device
*pdev
)
4848 struct device
*dev
= &pdev
->dev
;
4849 struct spu_hw
*spu
= &iproc_priv
.spu
;
4852 iproc_priv
.pdev
= pdev
;
4853 platform_set_drvdata(iproc_priv
.pdev
,
4856 err
= spu_dt_read(pdev
);
4860 err
= spu_mb_init(&pdev
->dev
);
4864 if (spu
->spu_type
== SPU_TYPE_SPUM
)
4865 iproc_priv
.bcm_hdr_len
= 8;
4866 else if (spu
->spu_type
== SPU_TYPE_SPU2
)
4867 iproc_priv
.bcm_hdr_len
= 0;
4869 spu_functions_register(&pdev
->dev
, spu
->spu_type
, spu
->spu_subtype
);
4871 spu_counters_init();
4873 spu_setup_debugfs();
4875 err
= spu_algs_register(dev
);
4884 spu_mb_release(pdev
);
4885 dev_err(dev
, "%s failed with error %d.\n", __func__
, err
);
4890 int bcm_spu_remove(struct platform_device
*pdev
)
4893 struct device
*dev
= &pdev
->dev
;
4896 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
4898 * Not all algorithms were registered, depending on whether
4899 * hardware is SPU or SPU2. So here we make sure to skip
4900 * those algorithms that were not previously registered.
4902 if (!driver_algs
[i
].registered
)
4905 switch (driver_algs
[i
].type
) {
4906 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
4907 crypto_unregister_alg(&driver_algs
[i
].alg
.crypto
);
4908 dev_dbg(dev
, " unregistered cipher %s\n",
4909 driver_algs
[i
].alg
.crypto
.cra_driver_name
);
4910 driver_algs
[i
].registered
= false;
4912 case CRYPTO_ALG_TYPE_AHASH
:
4913 crypto_unregister_ahash(&driver_algs
[i
].alg
.hash
);
4914 cdn
= driver_algs
[i
].alg
.hash
.halg
.base
.cra_driver_name
;
4915 dev_dbg(dev
, " unregistered hash %s\n", cdn
);
4916 driver_algs
[i
].registered
= false;
4918 case CRYPTO_ALG_TYPE_AEAD
:
4919 crypto_unregister_aead(&driver_algs
[i
].alg
.aead
);
4920 dev_dbg(dev
, " unregistered aead %s\n",
4921 driver_algs
[i
].alg
.aead
.base
.cra_driver_name
);
4922 driver_algs
[i
].registered
= false;
4927 spu_mb_release(pdev
);
4931 /* ===== Kernel Module API ===== */
4933 static struct platform_driver bcm_spu_pdriver
= {
4935 .name
= "brcm-spu-crypto",
4936 .of_match_table
= of_match_ptr(bcm_spu_dt_ids
),
4938 .probe
= bcm_spu_probe
,
4939 .remove
= bcm_spu_remove
,
4941 module_platform_driver(bcm_spu_pdriver
);
4943 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4944 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4945 MODULE_LICENSE("GPL v2");