2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
4 * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved.
5 * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved.
6 * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved.
7 * Copyright (c) 2018, Covalent IO, Inc. http://covalent.io
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/bug.h>
39 #include <linux/sched/signal.h>
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/splice.h>
43 #include <crypto/aead.h>
45 #include <net/strparser.h>
47 #include <trace/events/sock.h>
51 struct tls_decrypt_arg
{
62 struct tls_decrypt_ctx
{
64 u8 iv
[TLS_MAX_IV_SIZE
];
65 u8 aad
[TLS_MAX_AAD_SIZE
];
68 struct scatterlist sg
[];
71 noinline
void tls_err_abort(struct sock
*sk
, int err
)
73 WARN_ON_ONCE(err
>= 0);
74 /* sk->sk_err should contain a positive error code. */
75 WRITE_ONCE(sk
->sk_err
, -err
);
76 /* Paired with smp_rmb() in tcp_poll() */
81 static int __skb_nsg(struct sk_buff
*skb
, int offset
, int len
,
82 unsigned int recursion_level
)
84 int start
= skb_headlen(skb
);
85 int i
, chunk
= start
- offset
;
86 struct sk_buff
*frag_iter
;
89 if (unlikely(recursion_level
>= 24))
102 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
105 WARN_ON(start
> offset
+ len
);
107 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
108 chunk
= end
- offset
;
121 if (unlikely(skb_has_frag_list(skb
))) {
122 skb_walk_frags(skb
, frag_iter
) {
125 WARN_ON(start
> offset
+ len
);
127 end
= start
+ frag_iter
->len
;
128 chunk
= end
- offset
;
132 ret
= __skb_nsg(frag_iter
, offset
- start
, chunk
,
133 recursion_level
+ 1);
134 if (unlikely(ret
< 0))
149 /* Return the number of scatterlist elements required to completely map the
150 * skb, or -EMSGSIZE if the recursion depth is exceeded.
152 static int skb_nsg(struct sk_buff
*skb
, int offset
, int len
)
154 return __skb_nsg(skb
, offset
, len
, 0);
157 static int tls_padding_length(struct tls_prot_info
*prot
, struct sk_buff
*skb
,
158 struct tls_decrypt_arg
*darg
)
160 struct strp_msg
*rxm
= strp_msg(skb
);
161 struct tls_msg
*tlm
= tls_msg(skb
);
164 /* Determine zero-padding length */
165 if (prot
->version
== TLS_1_3_VERSION
) {
166 int offset
= rxm
->full_len
- TLS_TAG_SIZE
- 1;
167 char content_type
= darg
->zc
? darg
->tail
: 0;
170 while (content_type
== 0) {
171 if (offset
< prot
->prepend_size
)
173 err
= skb_copy_bits(skb
, rxm
->offset
+ offset
,
182 tlm
->control
= content_type
;
187 static void tls_decrypt_done(void *data
, int err
)
189 struct aead_request
*aead_req
= data
;
190 struct crypto_aead
*aead
= crypto_aead_reqtfm(aead_req
);
191 struct scatterlist
*sgout
= aead_req
->dst
;
192 struct tls_sw_context_rx
*ctx
;
193 struct tls_decrypt_ctx
*dctx
;
194 struct tls_context
*tls_ctx
;
195 struct scatterlist
*sg
;
200 /* If requests get too backlogged crypto API returns -EBUSY and calls
201 * ->complete(-EINPROGRESS) immediately followed by ->complete(0)
202 * to make waiting for backlog to flush with crypto_wait_req() easier.
203 * First wait converts -EBUSY -> -EINPROGRESS, and the second one
205 * We have a single struct crypto_async_request per direction, this
206 * scheme doesn't help us, so just ignore the first ->complete().
208 if (err
== -EINPROGRESS
)
211 aead_size
= sizeof(*aead_req
) + crypto_aead_reqsize(aead
);
212 aead_size
= ALIGN(aead_size
, __alignof__(*dctx
));
213 dctx
= (void *)((u8
*)aead_req
+ aead_size
);
216 tls_ctx
= tls_get_ctx(sk
);
217 ctx
= tls_sw_ctx_rx(tls_ctx
);
219 /* Propagate if there was an err */
222 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSDECRYPTERROR
);
223 ctx
->async_wait
.err
= err
;
224 tls_err_abort(sk
, err
);
227 /* Free the destination pages if skb was not decrypted inplace */
228 if (dctx
->free_sgout
) {
229 /* Skip the first S/G entry as it points to AAD */
230 for_each_sg(sg_next(sgout
), sg
, UINT_MAX
, pages
) {
233 put_page(sg_page(sg
));
239 if (atomic_dec_and_test(&ctx
->decrypt_pending
))
240 complete(&ctx
->async_wait
.completion
);
243 static int tls_decrypt_async_wait(struct tls_sw_context_rx
*ctx
)
245 if (!atomic_dec_and_test(&ctx
->decrypt_pending
))
246 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
247 atomic_inc(&ctx
->decrypt_pending
);
249 return ctx
->async_wait
.err
;
252 static int tls_do_decryption(struct sock
*sk
,
253 struct scatterlist
*sgin
,
254 struct scatterlist
*sgout
,
257 struct aead_request
*aead_req
,
258 struct tls_decrypt_arg
*darg
)
260 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
261 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
262 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
265 aead_request_set_tfm(aead_req
, ctx
->aead_recv
);
266 aead_request_set_ad(aead_req
, prot
->aad_size
);
267 aead_request_set_crypt(aead_req
, sgin
, sgout
,
268 data_len
+ prot
->tag_size
,
272 aead_request_set_callback(aead_req
,
273 CRYPTO_TFM_REQ_MAY_BACKLOG
,
274 tls_decrypt_done
, aead_req
);
275 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx
->decrypt_pending
) < 1);
276 atomic_inc(&ctx
->decrypt_pending
);
278 DECLARE_CRYPTO_WAIT(wait
);
280 aead_request_set_callback(aead_req
,
281 CRYPTO_TFM_REQ_MAY_BACKLOG
,
282 crypto_req_done
, &wait
);
283 ret
= crypto_aead_decrypt(aead_req
);
284 if (ret
== -EINPROGRESS
|| ret
== -EBUSY
)
285 ret
= crypto_wait_req(ret
, &wait
);
289 ret
= crypto_aead_decrypt(aead_req
);
290 if (ret
== -EINPROGRESS
)
294 ret
= tls_decrypt_async_wait(ctx
);
295 darg
->async_done
= true;
296 /* all completions have run, we're not doing async anymore */
301 atomic_dec(&ctx
->decrypt_pending
);
307 static void tls_trim_both_msgs(struct sock
*sk
, int target_size
)
309 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
310 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
311 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
312 struct tls_rec
*rec
= ctx
->open_rec
;
314 sk_msg_trim(sk
, &rec
->msg_plaintext
, target_size
);
316 target_size
+= prot
->overhead_size
;
317 sk_msg_trim(sk
, &rec
->msg_encrypted
, target_size
);
320 static int tls_alloc_encrypted_msg(struct sock
*sk
, int len
)
322 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
323 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
324 struct tls_rec
*rec
= ctx
->open_rec
;
325 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
327 return sk_msg_alloc(sk
, msg_en
, len
, 0);
330 static int tls_clone_plaintext_msg(struct sock
*sk
, int required
)
332 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
333 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
334 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
335 struct tls_rec
*rec
= ctx
->open_rec
;
336 struct sk_msg
*msg_pl
= &rec
->msg_plaintext
;
337 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
340 /* We add page references worth len bytes from encrypted sg
341 * at the end of plaintext sg. It is guaranteed that msg_en
342 * has enough required room (ensured by caller).
344 len
= required
- msg_pl
->sg
.size
;
346 /* Skip initial bytes in msg_en's data to be able to use
347 * same offset of both plain and encrypted data.
349 skip
= prot
->prepend_size
+ msg_pl
->sg
.size
;
351 return sk_msg_clone(sk
, msg_pl
, msg_en
, skip
, len
);
354 static struct tls_rec
*tls_get_rec(struct sock
*sk
)
356 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
357 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
358 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
359 struct sk_msg
*msg_pl
, *msg_en
;
363 mem_size
= sizeof(struct tls_rec
) + crypto_aead_reqsize(ctx
->aead_send
);
365 rec
= kzalloc(mem_size
, sk
->sk_allocation
);
369 msg_pl
= &rec
->msg_plaintext
;
370 msg_en
= &rec
->msg_encrypted
;
375 sg_init_table(rec
->sg_aead_in
, 2);
376 sg_set_buf(&rec
->sg_aead_in
[0], rec
->aad_space
, prot
->aad_size
);
377 sg_unmark_end(&rec
->sg_aead_in
[1]);
379 sg_init_table(rec
->sg_aead_out
, 2);
380 sg_set_buf(&rec
->sg_aead_out
[0], rec
->aad_space
, prot
->aad_size
);
381 sg_unmark_end(&rec
->sg_aead_out
[1]);
388 static void tls_free_rec(struct sock
*sk
, struct tls_rec
*rec
)
390 sk_msg_free(sk
, &rec
->msg_encrypted
);
391 sk_msg_free(sk
, &rec
->msg_plaintext
);
395 static void tls_free_open_rec(struct sock
*sk
)
397 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
398 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
399 struct tls_rec
*rec
= ctx
->open_rec
;
402 tls_free_rec(sk
, rec
);
403 ctx
->open_rec
= NULL
;
407 int tls_tx_records(struct sock
*sk
, int flags
)
409 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
410 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
411 struct tls_rec
*rec
, *tmp
;
412 struct sk_msg
*msg_en
;
413 int tx_flags
, rc
= 0;
415 if (tls_is_partially_sent_record(tls_ctx
)) {
416 rec
= list_first_entry(&ctx
->tx_list
,
417 struct tls_rec
, list
);
420 tx_flags
= rec
->tx_flags
;
424 rc
= tls_push_partial_record(sk
, tls_ctx
, tx_flags
);
428 /* Full record has been transmitted.
429 * Remove the head of tx_list
431 list_del(&rec
->list
);
432 sk_msg_free(sk
, &rec
->msg_plaintext
);
436 /* Tx all ready records */
437 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
438 if (READ_ONCE(rec
->tx_ready
)) {
440 tx_flags
= rec
->tx_flags
;
444 msg_en
= &rec
->msg_encrypted
;
445 rc
= tls_push_sg(sk
, tls_ctx
,
446 &msg_en
->sg
.data
[msg_en
->sg
.curr
],
451 list_del(&rec
->list
);
452 sk_msg_free(sk
, &rec
->msg_plaintext
);
460 if (rc
< 0 && rc
!= -EAGAIN
)
461 tls_err_abort(sk
, -EBADMSG
);
466 static void tls_encrypt_done(void *data
, int err
)
468 struct tls_sw_context_tx
*ctx
;
469 struct tls_context
*tls_ctx
;
470 struct tls_prot_info
*prot
;
471 struct tls_rec
*rec
= data
;
472 struct scatterlist
*sge
;
473 struct sk_msg
*msg_en
;
476 if (err
== -EINPROGRESS
) /* see the comment in tls_decrypt_done() */
479 msg_en
= &rec
->msg_encrypted
;
482 tls_ctx
= tls_get_ctx(sk
);
483 prot
= &tls_ctx
->prot_info
;
484 ctx
= tls_sw_ctx_tx(tls_ctx
);
486 sge
= sk_msg_elem(msg_en
, msg_en
->sg
.curr
);
487 sge
->offset
-= prot
->prepend_size
;
488 sge
->length
+= prot
->prepend_size
;
490 /* Check if error is previously set on socket */
491 if (err
|| sk
->sk_err
) {
494 /* If err is already set on socket, return the same code */
496 ctx
->async_wait
.err
= -sk
->sk_err
;
498 ctx
->async_wait
.err
= err
;
499 tls_err_abort(sk
, err
);
504 struct tls_rec
*first_rec
;
506 /* Mark the record as ready for transmission */
507 smp_store_mb(rec
->tx_ready
, true);
509 /* If received record is at head of tx_list, schedule tx */
510 first_rec
= list_first_entry(&ctx
->tx_list
,
511 struct tls_rec
, list
);
512 if (rec
== first_rec
) {
513 /* Schedule the transmission */
514 if (!test_and_set_bit(BIT_TX_SCHEDULED
,
516 schedule_delayed_work(&ctx
->tx_work
.work
, 1);
520 if (atomic_dec_and_test(&ctx
->encrypt_pending
))
521 complete(&ctx
->async_wait
.completion
);
524 static int tls_encrypt_async_wait(struct tls_sw_context_tx
*ctx
)
526 if (!atomic_dec_and_test(&ctx
->encrypt_pending
))
527 crypto_wait_req(-EINPROGRESS
, &ctx
->async_wait
);
528 atomic_inc(&ctx
->encrypt_pending
);
530 return ctx
->async_wait
.err
;
533 static int tls_do_encryption(struct sock
*sk
,
534 struct tls_context
*tls_ctx
,
535 struct tls_sw_context_tx
*ctx
,
536 struct aead_request
*aead_req
,
537 size_t data_len
, u32 start
)
539 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
540 struct tls_rec
*rec
= ctx
->open_rec
;
541 struct sk_msg
*msg_en
= &rec
->msg_encrypted
;
542 struct scatterlist
*sge
= sk_msg_elem(msg_en
, start
);
543 int rc
, iv_offset
= 0;
545 /* For CCM based ciphers, first byte of IV is a constant */
546 switch (prot
->cipher_type
) {
547 case TLS_CIPHER_AES_CCM_128
:
548 rec
->iv_data
[0] = TLS_AES_CCM_IV_B0_BYTE
;
551 case TLS_CIPHER_SM4_CCM
:
552 rec
->iv_data
[0] = TLS_SM4_CCM_IV_B0_BYTE
;
557 memcpy(&rec
->iv_data
[iv_offset
], tls_ctx
->tx
.iv
,
558 prot
->iv_size
+ prot
->salt_size
);
560 tls_xor_iv_with_seq(prot
, rec
->iv_data
+ iv_offset
,
561 tls_ctx
->tx
.rec_seq
);
563 sge
->offset
+= prot
->prepend_size
;
564 sge
->length
-= prot
->prepend_size
;
566 msg_en
->sg
.curr
= start
;
568 aead_request_set_tfm(aead_req
, ctx
->aead_send
);
569 aead_request_set_ad(aead_req
, prot
->aad_size
);
570 aead_request_set_crypt(aead_req
, rec
->sg_aead_in
,
572 data_len
, rec
->iv_data
);
574 aead_request_set_callback(aead_req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
575 tls_encrypt_done
, rec
);
577 /* Add the record in tx_list */
578 list_add_tail((struct list_head
*)&rec
->list
, &ctx
->tx_list
);
579 DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx
->encrypt_pending
) < 1);
580 atomic_inc(&ctx
->encrypt_pending
);
582 rc
= crypto_aead_encrypt(aead_req
);
584 rc
= tls_encrypt_async_wait(ctx
);
585 rc
= rc
?: -EINPROGRESS
;
587 if (!rc
|| rc
!= -EINPROGRESS
) {
588 atomic_dec(&ctx
->encrypt_pending
);
589 sge
->offset
-= prot
->prepend_size
;
590 sge
->length
+= prot
->prepend_size
;
594 WRITE_ONCE(rec
->tx_ready
, true);
595 } else if (rc
!= -EINPROGRESS
) {
596 list_del(&rec
->list
);
600 /* Unhook the record from context if encryption is not failure */
601 ctx
->open_rec
= NULL
;
602 tls_advance_record_sn(sk
, prot
, &tls_ctx
->tx
);
606 static int tls_split_open_record(struct sock
*sk
, struct tls_rec
*from
,
607 struct tls_rec
**to
, struct sk_msg
*msg_opl
,
608 struct sk_msg
*msg_oen
, u32 split_point
,
609 u32 tx_overhead_size
, u32
*orig_end
)
611 u32 i
, j
, bytes
= 0, apply
= msg_opl
->apply_bytes
;
612 struct scatterlist
*sge
, *osge
, *nsge
;
613 u32 orig_size
= msg_opl
->sg
.size
;
614 struct scatterlist tmp
= { };
615 struct sk_msg
*msg_npl
;
619 new = tls_get_rec(sk
);
622 ret
= sk_msg_alloc(sk
, &new->msg_encrypted
, msg_opl
->sg
.size
+
623 tx_overhead_size
, 0);
625 tls_free_rec(sk
, new);
629 *orig_end
= msg_opl
->sg
.end
;
630 i
= msg_opl
->sg
.start
;
631 sge
= sk_msg_elem(msg_opl
, i
);
632 while (apply
&& sge
->length
) {
633 if (sge
->length
> apply
) {
634 u32 len
= sge
->length
- apply
;
636 get_page(sg_page(sge
));
637 sg_set_page(&tmp
, sg_page(sge
), len
,
638 sge
->offset
+ apply
);
643 apply
-= sge
->length
;
644 bytes
+= sge
->length
;
647 sk_msg_iter_var_next(i
);
648 if (i
== msg_opl
->sg
.end
)
650 sge
= sk_msg_elem(msg_opl
, i
);
654 msg_opl
->sg
.curr
= i
;
655 msg_opl
->sg
.copybreak
= 0;
656 msg_opl
->apply_bytes
= 0;
657 msg_opl
->sg
.size
= bytes
;
659 msg_npl
= &new->msg_plaintext
;
660 msg_npl
->apply_bytes
= apply
;
661 msg_npl
->sg
.size
= orig_size
- bytes
;
663 j
= msg_npl
->sg
.start
;
664 nsge
= sk_msg_elem(msg_npl
, j
);
666 memcpy(nsge
, &tmp
, sizeof(*nsge
));
667 sk_msg_iter_var_next(j
);
668 nsge
= sk_msg_elem(msg_npl
, j
);
671 osge
= sk_msg_elem(msg_opl
, i
);
672 while (osge
->length
) {
673 memcpy(nsge
, osge
, sizeof(*nsge
));
675 sk_msg_iter_var_next(i
);
676 sk_msg_iter_var_next(j
);
679 osge
= sk_msg_elem(msg_opl
, i
);
680 nsge
= sk_msg_elem(msg_npl
, j
);
684 msg_npl
->sg
.curr
= j
;
685 msg_npl
->sg
.copybreak
= 0;
691 static void tls_merge_open_record(struct sock
*sk
, struct tls_rec
*to
,
692 struct tls_rec
*from
, u32 orig_end
)
694 struct sk_msg
*msg_npl
= &from
->msg_plaintext
;
695 struct sk_msg
*msg_opl
= &to
->msg_plaintext
;
696 struct scatterlist
*osge
, *nsge
;
700 sk_msg_iter_var_prev(i
);
701 j
= msg_npl
->sg
.start
;
703 osge
= sk_msg_elem(msg_opl
, i
);
704 nsge
= sk_msg_elem(msg_npl
, j
);
706 if (sg_page(osge
) == sg_page(nsge
) &&
707 osge
->offset
+ osge
->length
== nsge
->offset
) {
708 osge
->length
+= nsge
->length
;
709 put_page(sg_page(nsge
));
712 msg_opl
->sg
.end
= orig_end
;
713 msg_opl
->sg
.curr
= orig_end
;
714 msg_opl
->sg
.copybreak
= 0;
715 msg_opl
->apply_bytes
= msg_opl
->sg
.size
+ msg_npl
->sg
.size
;
716 msg_opl
->sg
.size
+= msg_npl
->sg
.size
;
718 sk_msg_free(sk
, &to
->msg_encrypted
);
719 sk_msg_xfer_full(&to
->msg_encrypted
, &from
->msg_encrypted
);
724 static int tls_push_record(struct sock
*sk
, int flags
,
725 unsigned char record_type
)
727 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
728 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
729 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
730 struct tls_rec
*rec
= ctx
->open_rec
, *tmp
= NULL
;
731 u32 i
, split_point
, orig_end
;
732 struct sk_msg
*msg_pl
, *msg_en
;
733 struct aead_request
*req
;
740 msg_pl
= &rec
->msg_plaintext
;
741 msg_en
= &rec
->msg_encrypted
;
743 split_point
= msg_pl
->apply_bytes
;
744 split
= split_point
&& split_point
< msg_pl
->sg
.size
;
745 if (unlikely((!split
&&
747 prot
->overhead_size
> msg_en
->sg
.size
) ||
750 prot
->overhead_size
> msg_en
->sg
.size
))) {
752 split_point
= msg_en
->sg
.size
;
755 rc
= tls_split_open_record(sk
, rec
, &tmp
, msg_pl
, msg_en
,
756 split_point
, prot
->overhead_size
,
760 /* This can happen if above tls_split_open_record allocates
761 * a single large encryption buffer instead of two smaller
762 * ones. In this case adjust pointers and continue without
765 if (!msg_pl
->sg
.size
) {
766 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
767 msg_pl
= &rec
->msg_plaintext
;
768 msg_en
= &rec
->msg_encrypted
;
771 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+
772 prot
->overhead_size
);
775 rec
->tx_flags
= flags
;
776 req
= &rec
->aead_req
;
779 sk_msg_iter_var_prev(i
);
781 rec
->content_type
= record_type
;
782 if (prot
->version
== TLS_1_3_VERSION
) {
783 /* Add content type to end of message. No padding added */
784 sg_set_buf(&rec
->sg_content_type
, &rec
->content_type
, 1);
785 sg_mark_end(&rec
->sg_content_type
);
786 sg_chain(msg_pl
->sg
.data
, msg_pl
->sg
.end
+ 1,
787 &rec
->sg_content_type
);
789 sg_mark_end(sk_msg_elem(msg_pl
, i
));
792 if (msg_pl
->sg
.end
< msg_pl
->sg
.start
) {
793 sg_chain(&msg_pl
->sg
.data
[msg_pl
->sg
.start
],
794 MAX_SKB_FRAGS
- msg_pl
->sg
.start
+ 1,
798 i
= msg_pl
->sg
.start
;
799 sg_chain(rec
->sg_aead_in
, 2, &msg_pl
->sg
.data
[i
]);
802 sk_msg_iter_var_prev(i
);
803 sg_mark_end(sk_msg_elem(msg_en
, i
));
805 i
= msg_en
->sg
.start
;
806 sg_chain(rec
->sg_aead_out
, 2, &msg_en
->sg
.data
[i
]);
808 tls_make_aad(rec
->aad_space
, msg_pl
->sg
.size
+ prot
->tail_size
,
809 tls_ctx
->tx
.rec_seq
, record_type
, prot
);
811 tls_fill_prepend(tls_ctx
,
812 page_address(sg_page(&msg_en
->sg
.data
[i
])) +
813 msg_en
->sg
.data
[i
].offset
,
814 msg_pl
->sg
.size
+ prot
->tail_size
,
817 tls_ctx
->pending_open_record_frags
= false;
819 rc
= tls_do_encryption(sk
, tls_ctx
, ctx
, req
,
820 msg_pl
->sg
.size
+ prot
->tail_size
, i
);
822 if (rc
!= -EINPROGRESS
) {
823 tls_err_abort(sk
, -EBADMSG
);
825 tls_ctx
->pending_open_record_frags
= true;
826 tls_merge_open_record(sk
, rec
, tmp
, orig_end
);
829 ctx
->async_capable
= 1;
832 msg_pl
= &tmp
->msg_plaintext
;
833 msg_en
= &tmp
->msg_encrypted
;
834 sk_msg_trim(sk
, msg_en
, msg_pl
->sg
.size
+ prot
->overhead_size
);
835 tls_ctx
->pending_open_record_frags
= true;
839 return tls_tx_records(sk
, flags
);
842 static int bpf_exec_tx_verdict(struct sk_msg
*msg
, struct sock
*sk
,
843 bool full_record
, u8 record_type
,
844 ssize_t
*copied
, int flags
)
846 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
847 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
848 struct sk_msg msg_redir
= { };
849 struct sk_psock
*psock
;
850 struct sock
*sk_redir
;
852 bool enospc
, policy
, redir_ingress
;
856 policy
= !(flags
& MSG_SENDPAGE_NOPOLICY
);
857 psock
= sk_psock_get(sk
);
858 if (!psock
|| !policy
) {
859 err
= tls_push_record(sk
, flags
, record_type
);
860 if (err
&& err
!= -EINPROGRESS
&& sk
->sk_err
== EBADMSG
) {
861 *copied
-= sk_msg_free(sk
, msg
);
862 tls_free_open_rec(sk
);
866 sk_psock_put(sk
, psock
);
870 enospc
= sk_msg_full(msg
);
871 if (psock
->eval
== __SK_NONE
) {
872 delta
= msg
->sg
.size
;
873 psock
->eval
= sk_psock_msg_verdict(sk
, psock
, msg
);
874 delta
-= msg
->sg
.size
;
876 if (msg
->cork_bytes
&& msg
->cork_bytes
> msg
->sg
.size
&&
877 !enospc
&& !full_record
) {
883 if (msg
->apply_bytes
&& msg
->apply_bytes
< send
)
884 send
= msg
->apply_bytes
;
886 switch (psock
->eval
) {
888 err
= tls_push_record(sk
, flags
, record_type
);
889 if (err
&& err
!= -EINPROGRESS
&& sk
->sk_err
== EBADMSG
) {
890 *copied
-= sk_msg_free(sk
, msg
);
891 tls_free_open_rec(sk
);
897 redir_ingress
= psock
->redir_ingress
;
898 sk_redir
= psock
->sk_redir
;
899 memcpy(&msg_redir
, msg
, sizeof(*msg
));
900 if (msg
->apply_bytes
< send
)
901 msg
->apply_bytes
= 0;
903 msg
->apply_bytes
-= send
;
904 sk_msg_return_zero(sk
, msg
, send
);
905 msg
->sg
.size
-= send
;
907 err
= tcp_bpf_sendmsg_redir(sk_redir
, redir_ingress
,
908 &msg_redir
, send
, flags
);
911 *copied
-= sk_msg_free_nocharge(sk
, &msg_redir
);
914 if (msg
->sg
.size
== 0)
915 tls_free_open_rec(sk
);
919 sk_msg_free_partial(sk
, msg
, send
);
920 if (msg
->apply_bytes
< send
)
921 msg
->apply_bytes
= 0;
923 msg
->apply_bytes
-= send
;
924 if (msg
->sg
.size
== 0)
925 tls_free_open_rec(sk
);
926 *copied
-= (send
+ delta
);
931 bool reset_eval
= !ctx
->open_rec
;
935 msg
= &rec
->msg_plaintext
;
936 if (!msg
->apply_bytes
)
940 psock
->eval
= __SK_NONE
;
941 if (psock
->sk_redir
) {
942 sock_put(psock
->sk_redir
);
943 psock
->sk_redir
= NULL
;
950 sk_psock_put(sk
, psock
);
954 static int tls_sw_push_pending_record(struct sock
*sk
, int flags
)
956 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
957 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
958 struct tls_rec
*rec
= ctx
->open_rec
;
959 struct sk_msg
*msg_pl
;
965 msg_pl
= &rec
->msg_plaintext
;
966 copied
= msg_pl
->sg
.size
;
970 return bpf_exec_tx_verdict(msg_pl
, sk
, true, TLS_RECORD_TYPE_DATA
,
974 static int tls_sw_sendmsg_splice(struct sock
*sk
, struct msghdr
*msg
,
975 struct sk_msg
*msg_pl
, size_t try_to_copy
,
978 struct page
*page
= NULL
, **pages
= &page
;
984 part
= iov_iter_extract_pages(&msg
->msg_iter
, &pages
,
985 try_to_copy
, 1, 0, &off
);
989 if (WARN_ON_ONCE(!sendpage_ok(page
))) {
990 iov_iter_revert(&msg
->msg_iter
, part
);
994 sk_msg_page_add(msg_pl
, page
, part
, off
);
995 msg_pl
->sg
.copybreak
= 0;
996 msg_pl
->sg
.curr
= msg_pl
->sg
.end
;
997 sk_mem_charge(sk
, part
);
1000 } while (try_to_copy
&& !sk_msg_full(msg_pl
));
1005 static int tls_sw_sendmsg_locked(struct sock
*sk
, struct msghdr
*msg
,
1008 long timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1009 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1010 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1011 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
1012 bool async_capable
= ctx
->async_capable
;
1013 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
1014 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
1015 bool eor
= !(msg
->msg_flags
& MSG_MORE
);
1018 struct sk_msg
*msg_pl
, *msg_en
;
1019 struct tls_rec
*rec
;
1028 if (!eor
&& (msg
->msg_flags
& MSG_EOR
))
1031 if (unlikely(msg
->msg_controllen
)) {
1032 ret
= tls_process_cmsg(sk
, msg
, &record_type
);
1034 if (ret
== -EINPROGRESS
)
1036 else if (ret
!= -EAGAIN
)
1041 while (msg_data_left(msg
)) {
1048 rec
= ctx
->open_rec
;
1050 rec
= ctx
->open_rec
= tls_get_rec(sk
);
1056 msg_pl
= &rec
->msg_plaintext
;
1057 msg_en
= &rec
->msg_encrypted
;
1059 orig_size
= msg_pl
->sg
.size
;
1060 full_record
= false;
1061 try_to_copy
= msg_data_left(msg
);
1062 record_room
= TLS_MAX_PAYLOAD_SIZE
- msg_pl
->sg
.size
;
1063 if (try_to_copy
>= record_room
) {
1064 try_to_copy
= record_room
;
1068 required_size
= msg_pl
->sg
.size
+ try_to_copy
+
1069 prot
->overhead_size
;
1071 if (!sk_stream_memory_free(sk
))
1072 goto wait_for_sndbuf
;
1075 ret
= tls_alloc_encrypted_msg(sk
, required_size
);
1078 goto wait_for_memory
;
1080 /* Adjust try_to_copy according to the amount that was
1081 * actually allocated. The difference is due
1082 * to max sg elements limit
1084 try_to_copy
-= required_size
- msg_en
->sg
.size
;
1088 if (try_to_copy
&& (msg
->msg_flags
& MSG_SPLICE_PAGES
)) {
1089 ret
= tls_sw_sendmsg_splice(sk
, msg
, msg_pl
,
1090 try_to_copy
, &copied
);
1093 tls_ctx
->pending_open_record_frags
= true;
1095 if (sk_msg_full(msg_pl
))
1098 if (full_record
|| eor
)
1103 if (!is_kvec
&& (full_record
|| eor
) && !async_capable
) {
1104 u32 first
= msg_pl
->sg
.end
;
1106 ret
= sk_msg_zerocopy_from_iter(sk
, &msg
->msg_iter
,
1107 msg_pl
, try_to_copy
);
1109 goto fallback_to_reg_send
;
1112 copied
+= try_to_copy
;
1114 sk_msg_sg_copy_set(msg_pl
, first
);
1115 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1116 record_type
, &copied
,
1119 if (ret
== -EINPROGRESS
)
1121 else if (ret
== -ENOMEM
)
1122 goto wait_for_memory
;
1123 else if (ctx
->open_rec
&& ret
== -ENOSPC
)
1125 else if (ret
!= -EAGAIN
)
1130 copied
-= try_to_copy
;
1131 sk_msg_sg_copy_clear(msg_pl
, first
);
1132 iov_iter_revert(&msg
->msg_iter
,
1133 msg_pl
->sg
.size
- orig_size
);
1134 fallback_to_reg_send
:
1135 sk_msg_trim(sk
, msg_pl
, orig_size
);
1138 required_size
= msg_pl
->sg
.size
+ try_to_copy
;
1140 ret
= tls_clone_plaintext_msg(sk
, required_size
);
1145 /* Adjust try_to_copy according to the amount that was
1146 * actually allocated. The difference is due
1147 * to max sg elements limit
1149 try_to_copy
-= required_size
- msg_pl
->sg
.size
;
1151 sk_msg_trim(sk
, msg_en
,
1152 msg_pl
->sg
.size
+ prot
->overhead_size
);
1156 ret
= sk_msg_memcopy_from_iter(sk
, &msg
->msg_iter
,
1157 msg_pl
, try_to_copy
);
1162 /* Open records defined only if successfully copied, otherwise
1163 * we would trim the sg but not reset the open record frags.
1165 tls_ctx
->pending_open_record_frags
= true;
1166 copied
+= try_to_copy
;
1168 if (full_record
|| eor
) {
1169 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, full_record
,
1170 record_type
, &copied
,
1173 if (ret
== -EINPROGRESS
)
1175 else if (ret
== -ENOMEM
)
1176 goto wait_for_memory
;
1177 else if (ret
!= -EAGAIN
) {
1188 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1190 ret
= sk_stream_wait_memory(sk
, &timeo
);
1194 tls_trim_both_msgs(sk
, orig_size
);
1198 if (ctx
->open_rec
&& msg_en
->sg
.size
< required_size
)
1199 goto alloc_encrypted
;
1204 } else if (num_zc
|| eor
) {
1207 /* Wait for pending encryptions to get completed */
1208 err
= tls_encrypt_async_wait(ctx
);
1215 /* Transmit if any encryptions have completed */
1216 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1217 cancel_delayed_work(&ctx
->tx_work
.work
);
1218 tls_tx_records(sk
, msg
->msg_flags
);
1222 ret
= sk_stream_error(sk
, msg
->msg_flags
, ret
);
1223 return copied
> 0 ? copied
: ret
;
1226 int tls_sw_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
1228 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1231 if (msg
->msg_flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
|
1232 MSG_CMSG_COMPAT
| MSG_SPLICE_PAGES
| MSG_EOR
|
1233 MSG_SENDPAGE_NOPOLICY
))
1236 ret
= mutex_lock_interruptible(&tls_ctx
->tx_lock
);
1240 ret
= tls_sw_sendmsg_locked(sk
, msg
, size
);
1242 mutex_unlock(&tls_ctx
->tx_lock
);
1247 * Handle unexpected EOF during splice without SPLICE_F_MORE set.
1249 void tls_sw_splice_eof(struct socket
*sock
)
1251 struct sock
*sk
= sock
->sk
;
1252 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1253 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
1254 struct tls_rec
*rec
;
1255 struct sk_msg
*msg_pl
;
1257 bool retrying
= false;
1263 mutex_lock(&tls_ctx
->tx_lock
);
1267 /* same checks as in tls_sw_push_pending_record() */
1268 rec
= ctx
->open_rec
;
1272 msg_pl
= &rec
->msg_plaintext
;
1273 if (msg_pl
->sg
.size
== 0)
1276 /* Check the BPF advisor and perform transmission. */
1277 ret
= bpf_exec_tx_verdict(msg_pl
, sk
, false, TLS_RECORD_TYPE_DATA
,
1292 /* Wait for pending encryptions to get completed */
1293 if (tls_encrypt_async_wait(ctx
))
1296 /* Transmit if any encryptions have completed */
1297 if (test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
1298 cancel_delayed_work(&ctx
->tx_work
.work
);
1299 tls_tx_records(sk
, 0);
1304 mutex_unlock(&tls_ctx
->tx_lock
);
1308 tls_rx_rec_wait(struct sock
*sk
, struct sk_psock
*psock
, bool nonblock
,
1311 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1312 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1313 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1317 timeo
= sock_rcvtimeo(sk
, nonblock
);
1319 while (!tls_strp_msg_ready(ctx
)) {
1320 if (!sk_psock_queue_empty(psock
))
1324 return sock_error(sk
);
1329 if (!skb_queue_empty(&sk
->sk_receive_queue
)) {
1330 tls_strp_check_rcv(&ctx
->strp
);
1331 if (tls_strp_msg_ready(ctx
))
1335 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1338 if (sock_flag(sk
, SOCK_DONE
))
1345 add_wait_queue(sk_sleep(sk
), &wait
);
1346 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1347 ret
= sk_wait_event(sk
, &timeo
,
1348 tls_strp_msg_ready(ctx
) ||
1349 !sk_psock_queue_empty(psock
),
1351 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
1352 remove_wait_queue(sk_sleep(sk
), &wait
);
1354 /* Handle signals */
1355 if (signal_pending(current
))
1356 return sock_intr_errno(timeo
);
1359 tls_strp_msg_load(&ctx
->strp
, released
);
1364 static int tls_setup_from_iter(struct iov_iter
*from
,
1365 int length
, int *pages_used
,
1366 struct scatterlist
*to
,
1369 int rc
= 0, i
= 0, num_elem
= *pages_used
, maxpages
;
1370 struct page
*pages
[MAX_SKB_FRAGS
];
1371 unsigned int size
= 0;
1372 ssize_t copied
, use
;
1375 while (length
> 0) {
1377 maxpages
= to_max_pages
- num_elem
;
1378 if (maxpages
== 0) {
1382 copied
= iov_iter_get_pages2(from
, pages
,
1393 use
= min_t(int, copied
, PAGE_SIZE
- offset
);
1395 sg_set_page(&to
[num_elem
],
1396 pages
[i
], use
, offset
);
1397 sg_unmark_end(&to
[num_elem
]);
1398 /* We do not uncharge memory from this API */
1407 /* Mark the end in the last sg entry if newly added */
1408 if (num_elem
> *pages_used
)
1409 sg_mark_end(&to
[num_elem
- 1]);
1412 iov_iter_revert(from
, size
);
1413 *pages_used
= num_elem
;
1418 static struct sk_buff
*
1419 tls_alloc_clrtxt_skb(struct sock
*sk
, struct sk_buff
*skb
,
1420 unsigned int full_len
)
1422 struct strp_msg
*clr_rxm
;
1423 struct sk_buff
*clr_skb
;
1426 clr_skb
= alloc_skb_with_frags(0, full_len
, TLS_PAGE_ORDER
,
1427 &err
, sk
->sk_allocation
);
1431 skb_copy_header(clr_skb
, skb
);
1432 clr_skb
->len
= full_len
;
1433 clr_skb
->data_len
= full_len
;
1435 clr_rxm
= strp_msg(clr_skb
);
1436 clr_rxm
->offset
= 0;
1443 * tls_decrypt_sw() and tls_decrypt_device() are decrypt handlers.
1444 * They must transform the darg in/out argument are as follows:
1446 * -------------------------------------------------------------------
1447 * zc | Zero-copy decrypt allowed | Zero-copy performed
1448 * async | Async decrypt allowed | Async crypto used / in progress
1449 * skb | * | Output skb
1451 * If ZC decryption was performed darg.skb will point to the input skb.
1454 /* This function decrypts the input skb into either out_iov or in out_sg
1455 * or in skb buffers itself. The input parameter 'darg->zc' indicates if
1456 * zero-copy mode needs to be tried or not. With zero-copy mode, either
1457 * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are
1458 * NULL, then the decryption happens inside skb buffers itself, i.e.
1459 * zero-copy gets disabled and 'darg->zc' is updated.
1461 static int tls_decrypt_sg(struct sock
*sk
, struct iov_iter
*out_iov
,
1462 struct scatterlist
*out_sg
,
1463 struct tls_decrypt_arg
*darg
)
1465 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1466 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1467 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1468 int n_sgin
, n_sgout
, aead_size
, err
, pages
= 0;
1469 struct sk_buff
*skb
= tls_strp_msg(ctx
);
1470 const struct strp_msg
*rxm
= strp_msg(skb
);
1471 const struct tls_msg
*tlm
= tls_msg(skb
);
1472 struct aead_request
*aead_req
;
1473 struct scatterlist
*sgin
= NULL
;
1474 struct scatterlist
*sgout
= NULL
;
1475 const int data_len
= rxm
->full_len
- prot
->overhead_size
;
1476 int tail_pages
= !!prot
->tail_size
;
1477 struct tls_decrypt_ctx
*dctx
;
1478 struct sk_buff
*clear_skb
;
1482 n_sgin
= skb_nsg(skb
, rxm
->offset
+ prot
->prepend_size
,
1483 rxm
->full_len
- prot
->prepend_size
);
1485 return n_sgin
?: -EBADMSG
;
1487 if (darg
->zc
&& (out_iov
|| out_sg
)) {
1491 n_sgout
= 1 + tail_pages
+
1492 iov_iter_npages_cap(out_iov
, INT_MAX
, data_len
);
1494 n_sgout
= sg_nents(out_sg
);
1498 clear_skb
= tls_alloc_clrtxt_skb(sk
, skb
, rxm
->full_len
);
1502 n_sgout
= 1 + skb_shinfo(clear_skb
)->nr_frags
;
1505 /* Increment to accommodate AAD */
1506 n_sgin
= n_sgin
+ 1;
1508 /* Allocate a single block of memory which contains
1509 * aead_req || tls_decrypt_ctx.
1510 * Both structs are variable length.
1512 aead_size
= sizeof(*aead_req
) + crypto_aead_reqsize(ctx
->aead_recv
);
1513 aead_size
= ALIGN(aead_size
, __alignof__(*dctx
));
1514 mem
= kmalloc(aead_size
+ struct_size(dctx
, sg
, size_add(n_sgin
, n_sgout
)),
1521 /* Segment the allocated memory */
1522 aead_req
= (struct aead_request
*)mem
;
1523 dctx
= (struct tls_decrypt_ctx
*)(mem
+ aead_size
);
1525 sgin
= &dctx
->sg
[0];
1526 sgout
= &dctx
->sg
[n_sgin
];
1528 /* For CCM based ciphers, first byte of nonce+iv is a constant */
1529 switch (prot
->cipher_type
) {
1530 case TLS_CIPHER_AES_CCM_128
:
1531 dctx
->iv
[0] = TLS_AES_CCM_IV_B0_BYTE
;
1534 case TLS_CIPHER_SM4_CCM
:
1535 dctx
->iv
[0] = TLS_SM4_CCM_IV_B0_BYTE
;
1541 if (prot
->version
== TLS_1_3_VERSION
||
1542 prot
->cipher_type
== TLS_CIPHER_CHACHA20_POLY1305
) {
1543 memcpy(&dctx
->iv
[iv_offset
], tls_ctx
->rx
.iv
,
1544 prot
->iv_size
+ prot
->salt_size
);
1546 err
= skb_copy_bits(skb
, rxm
->offset
+ TLS_HEADER_SIZE
,
1547 &dctx
->iv
[iv_offset
] + prot
->salt_size
,
1551 memcpy(&dctx
->iv
[iv_offset
], tls_ctx
->rx
.iv
, prot
->salt_size
);
1553 tls_xor_iv_with_seq(prot
, &dctx
->iv
[iv_offset
], tls_ctx
->rx
.rec_seq
);
1556 tls_make_aad(dctx
->aad
, rxm
->full_len
- prot
->overhead_size
+
1558 tls_ctx
->rx
.rec_seq
, tlm
->control
, prot
);
1561 sg_init_table(sgin
, n_sgin
);
1562 sg_set_buf(&sgin
[0], dctx
->aad
, prot
->aad_size
);
1563 err
= skb_to_sgvec(skb
, &sgin
[1],
1564 rxm
->offset
+ prot
->prepend_size
,
1565 rxm
->full_len
- prot
->prepend_size
);
1570 sg_init_table(sgout
, n_sgout
);
1571 sg_set_buf(&sgout
[0], dctx
->aad
, prot
->aad_size
);
1573 err
= skb_to_sgvec(clear_skb
, &sgout
[1], prot
->prepend_size
,
1574 data_len
+ prot
->tail_size
);
1577 } else if (out_iov
) {
1578 sg_init_table(sgout
, n_sgout
);
1579 sg_set_buf(&sgout
[0], dctx
->aad
, prot
->aad_size
);
1581 err
= tls_setup_from_iter(out_iov
, data_len
, &pages
, &sgout
[1],
1582 (n_sgout
- 1 - tail_pages
));
1584 goto exit_free_pages
;
1586 if (prot
->tail_size
) {
1587 sg_unmark_end(&sgout
[pages
]);
1588 sg_set_buf(&sgout
[pages
+ 1], &dctx
->tail
,
1590 sg_mark_end(&sgout
[pages
+ 1]);
1592 } else if (out_sg
) {
1593 memcpy(sgout
, out_sg
, n_sgout
* sizeof(*sgout
));
1595 dctx
->free_sgout
= !!pages
;
1597 /* Prepare and submit AEAD request */
1598 err
= tls_do_decryption(sk
, sgin
, sgout
, dctx
->iv
,
1599 data_len
+ prot
->tail_size
, aead_req
, darg
);
1601 if (darg
->async_done
)
1603 goto exit_free_pages
;
1606 darg
->skb
= clear_skb
?: tls_strp_msg(ctx
);
1609 if (unlikely(darg
->async
)) {
1610 err
= tls_strp_msg_hold(&ctx
->strp
, &ctx
->async_hold
);
1612 __skb_queue_tail(&ctx
->async_hold
, darg
->skb
);
1616 if (unlikely(darg
->async_done
))
1619 if (prot
->tail_size
)
1620 darg
->tail
= dctx
->tail
;
1623 /* Release the pages in case iov was mapped to pages */
1624 for (; pages
> 0; pages
--)
1625 put_page(sg_page(&sgout
[pages
]));
1629 consume_skb(clear_skb
);
1634 tls_decrypt_sw(struct sock
*sk
, struct tls_context
*tls_ctx
,
1635 struct msghdr
*msg
, struct tls_decrypt_arg
*darg
)
1637 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1638 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1639 struct strp_msg
*rxm
;
1642 err
= tls_decrypt_sg(sk
, &msg
->msg_iter
, NULL
, darg
);
1644 if (err
== -EBADMSG
)
1645 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSDECRYPTERROR
);
1648 /* keep going even for ->async, the code below is TLS 1.3 */
1650 /* If opportunistic TLS 1.3 ZC failed retry without ZC */
1651 if (unlikely(darg
->zc
&& prot
->version
== TLS_1_3_VERSION
&&
1652 darg
->tail
!= TLS_RECORD_TYPE_DATA
)) {
1655 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXNOPADVIOL
);
1656 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSDECRYPTRETRY
);
1657 return tls_decrypt_sw(sk
, tls_ctx
, msg
, darg
);
1660 pad
= tls_padding_length(prot
, darg
->skb
, darg
);
1662 if (darg
->skb
!= tls_strp_msg(ctx
))
1663 consume_skb(darg
->skb
);
1667 rxm
= strp_msg(darg
->skb
);
1668 rxm
->full_len
-= pad
;
1674 tls_decrypt_device(struct sock
*sk
, struct msghdr
*msg
,
1675 struct tls_context
*tls_ctx
, struct tls_decrypt_arg
*darg
)
1677 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1678 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1679 struct strp_msg
*rxm
;
1682 if (tls_ctx
->rx_conf
!= TLS_HW
)
1685 err
= tls_device_decrypted(sk
, tls_ctx
);
1689 pad
= tls_padding_length(prot
, tls_strp_msg(ctx
), darg
);
1693 darg
->async
= false;
1694 darg
->skb
= tls_strp_msg(ctx
);
1695 /* ->zc downgrade check, in case TLS 1.3 gets here */
1696 darg
->zc
&= !(prot
->version
== TLS_1_3_VERSION
&&
1697 tls_msg(darg
->skb
)->control
!= TLS_RECORD_TYPE_DATA
);
1699 rxm
= strp_msg(darg
->skb
);
1700 rxm
->full_len
-= pad
;
1703 /* Non-ZC case needs a real skb */
1704 darg
->skb
= tls_strp_msg_detach(ctx
);
1708 unsigned int off
, len
;
1710 /* In ZC case nobody cares about the output skb.
1711 * Just copy the data here. Note the skb is not fully trimmed.
1713 off
= rxm
->offset
+ prot
->prepend_size
;
1714 len
= rxm
->full_len
- prot
->overhead_size
;
1716 err
= skb_copy_datagram_msg(darg
->skb
, off
, msg
, len
);
1723 static int tls_rx_one_record(struct sock
*sk
, struct msghdr
*msg
,
1724 struct tls_decrypt_arg
*darg
)
1726 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1727 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1728 struct strp_msg
*rxm
;
1731 err
= tls_decrypt_device(sk
, msg
, tls_ctx
, darg
);
1733 err
= tls_decrypt_sw(sk
, tls_ctx
, msg
, darg
);
1737 rxm
= strp_msg(darg
->skb
);
1738 rxm
->offset
+= prot
->prepend_size
;
1739 rxm
->full_len
-= prot
->overhead_size
;
1740 tls_advance_record_sn(sk
, prot
, &tls_ctx
->rx
);
1745 int decrypt_skb(struct sock
*sk
, struct scatterlist
*sgout
)
1747 struct tls_decrypt_arg darg
= { .zc
= true, };
1749 return tls_decrypt_sg(sk
, NULL
, sgout
, &darg
);
1752 static int tls_record_content_type(struct msghdr
*msg
, struct tls_msg
*tlm
,
1758 *control
= tlm
->control
;
1762 err
= put_cmsg(msg
, SOL_TLS
, TLS_GET_RECORD_TYPE
,
1763 sizeof(*control
), control
);
1764 if (*control
!= TLS_RECORD_TYPE_DATA
) {
1765 if (err
|| msg
->msg_flags
& MSG_CTRUNC
)
1768 } else if (*control
!= tlm
->control
) {
1775 static void tls_rx_rec_done(struct tls_sw_context_rx
*ctx
)
1777 tls_strp_msg_done(&ctx
->strp
);
1780 /* This function traverses the rx_list in tls receive context to copies the
1781 * decrypted records into the buffer provided by caller zero copy is not
1782 * true. Further, the records are removed from the rx_list if it is not a peek
1783 * case and the record has been consumed completely.
1785 static int process_rx_list(struct tls_sw_context_rx
*ctx
,
1793 struct sk_buff
*skb
= skb_peek(&ctx
->rx_list
);
1794 struct tls_msg
*tlm
;
1798 while (skip
&& skb
) {
1799 struct strp_msg
*rxm
= strp_msg(skb
);
1802 err
= tls_record_content_type(msg
, tlm
, control
);
1806 if (skip
< rxm
->full_len
)
1809 skip
= skip
- rxm
->full_len
;
1810 skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1813 while (len
&& skb
) {
1814 struct sk_buff
*next_skb
;
1815 struct strp_msg
*rxm
= strp_msg(skb
);
1816 int chunk
= min_t(unsigned int, rxm
->full_len
- skip
, len
);
1820 err
= tls_record_content_type(msg
, tlm
, control
);
1824 err
= skb_copy_datagram_msg(skb
, rxm
->offset
+ skip
,
1830 copied
= copied
+ chunk
;
1832 /* Consume the data from record if it is non-peek case*/
1834 rxm
->offset
= rxm
->offset
+ chunk
;
1835 rxm
->full_len
= rxm
->full_len
- chunk
;
1837 /* Return if there is unconsumed data in the record */
1838 if (rxm
->full_len
- skip
)
1842 /* The remaining skip-bytes must lie in 1st record in rx_list.
1843 * So from the 2nd record, 'skip' should be 0.
1848 msg
->msg_flags
|= MSG_EOR
;
1850 next_skb
= skb_peek_next(skb
, &ctx
->rx_list
);
1853 __skb_unlink(skb
, &ctx
->rx_list
);
1862 return copied
? : err
;
1870 tls_read_flush_backlog(struct sock
*sk
, struct tls_prot_info
*prot
,
1871 size_t len_left
, size_t decrypted
, ssize_t done
,
1876 if (len_left
<= decrypted
)
1879 max_rec
= prot
->overhead_size
- prot
->tail_size
+ TLS_MAX_PAYLOAD_SIZE
;
1880 if (done
- *flushed_at
< SZ_128K
&& tcp_inq(sk
) > max_rec
)
1884 return sk_flush_backlog(sk
);
1887 static int tls_rx_reader_acquire(struct sock
*sk
, struct tls_sw_context_rx
*ctx
,
1893 timeo
= sock_rcvtimeo(sk
, nonblock
);
1895 while (unlikely(ctx
->reader_present
)) {
1896 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
1898 ctx
->reader_contended
= 1;
1900 add_wait_queue(&ctx
->wq
, &wait
);
1901 ret
= sk_wait_event(sk
, &timeo
,
1902 !READ_ONCE(ctx
->reader_present
), &wait
);
1903 remove_wait_queue(&ctx
->wq
, &wait
);
1907 if (signal_pending(current
))
1908 return sock_intr_errno(timeo
);
1913 WRITE_ONCE(ctx
->reader_present
, 1);
1918 static int tls_rx_reader_lock(struct sock
*sk
, struct tls_sw_context_rx
*ctx
,
1924 err
= tls_rx_reader_acquire(sk
, ctx
, nonblock
);
1930 static void tls_rx_reader_release(struct sock
*sk
, struct tls_sw_context_rx
*ctx
)
1932 if (unlikely(ctx
->reader_contended
)) {
1933 if (wq_has_sleeper(&ctx
->wq
))
1936 ctx
->reader_contended
= 0;
1938 WARN_ON_ONCE(!ctx
->reader_present
);
1941 WRITE_ONCE(ctx
->reader_present
, 0);
1944 static void tls_rx_reader_unlock(struct sock
*sk
, struct tls_sw_context_rx
*ctx
)
1946 tls_rx_reader_release(sk
, ctx
);
1950 int tls_sw_recvmsg(struct sock
*sk
,
1956 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
1957 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
1958 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
1959 ssize_t decrypted
= 0, async_copy_bytes
= 0;
1960 struct sk_psock
*psock
;
1961 unsigned char control
= 0;
1962 size_t flushed_at
= 0;
1963 struct strp_msg
*rxm
;
1964 struct tls_msg
*tlm
;
1969 bool is_kvec
= iov_iter_is_kvec(&msg
->msg_iter
);
1970 bool is_peek
= flags
& MSG_PEEK
;
1971 bool rx_more
= false;
1972 bool released
= true;
1973 bool bpf_strp_enabled
;
1976 if (unlikely(flags
& MSG_ERRQUEUE
))
1977 return sock_recv_errqueue(sk
, msg
, len
, SOL_IP
, IP_RECVERR
);
1979 err
= tls_rx_reader_lock(sk
, ctx
, flags
& MSG_DONTWAIT
);
1982 psock
= sk_psock_get(sk
);
1983 bpf_strp_enabled
= sk_psock_strp_enabled(psock
);
1985 /* If crypto failed the connection is broken */
1986 err
= ctx
->async_wait
.err
;
1990 /* Process pending decrypted records. It must be non-zero-copy */
1991 err
= process_rx_list(ctx
, msg
, &control
, 0, len
, is_peek
, &rx_more
);
1996 if (len
<= copied
|| (copied
&& control
!= TLS_RECORD_TYPE_DATA
) || rx_more
)
1999 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
2002 zc_capable
= !bpf_strp_enabled
&& !is_kvec
&& !is_peek
&&
2005 while (len
&& (decrypted
+ copied
< target
|| tls_strp_msg_ready(ctx
))) {
2006 struct tls_decrypt_arg darg
;
2007 int to_decrypt
, chunk
;
2009 err
= tls_rx_rec_wait(sk
, psock
, flags
& MSG_DONTWAIT
,
2013 chunk
= sk_msg_recvmsg(sk
, psock
, msg
, len
,
2024 memset(&darg
.inargs
, 0, sizeof(darg
.inargs
));
2026 rxm
= strp_msg(tls_strp_msg(ctx
));
2027 tlm
= tls_msg(tls_strp_msg(ctx
));
2029 to_decrypt
= rxm
->full_len
- prot
->overhead_size
;
2031 if (zc_capable
&& to_decrypt
<= len
&&
2032 tlm
->control
== TLS_RECORD_TYPE_DATA
)
2035 /* Do not use async mode if record is non-data */
2036 if (tlm
->control
== TLS_RECORD_TYPE_DATA
&& !bpf_strp_enabled
)
2037 darg
.async
= ctx
->async_capable
;
2041 err
= tls_rx_one_record(sk
, msg
, &darg
);
2043 tls_err_abort(sk
, -EBADMSG
);
2047 async
|= darg
.async
;
2049 /* If the type of records being processed is not known yet,
2050 * set it to record type just dequeued. If it is already known,
2051 * but does not match the record type just dequeued, go to end.
2052 * We always get record type here since for tls1.2, record type
2053 * is known just after record is dequeued from stream parser.
2054 * For tls1.3, we disable async.
2056 err
= tls_record_content_type(msg
, tls_msg(darg
.skb
), &control
);
2058 DEBUG_NET_WARN_ON_ONCE(darg
.zc
);
2059 tls_rx_rec_done(ctx
);
2061 __skb_queue_tail(&ctx
->rx_list
, darg
.skb
);
2065 /* periodically flush backlog, and feed strparser */
2066 released
= tls_read_flush_backlog(sk
, prot
, len
, to_decrypt
,
2070 /* TLS 1.3 may have updated the length by more than overhead */
2071 rxm
= strp_msg(darg
.skb
);
2072 chunk
= rxm
->full_len
;
2073 tls_rx_rec_done(ctx
);
2076 bool partially_consumed
= chunk
> len
;
2077 struct sk_buff
*skb
= darg
.skb
;
2079 DEBUG_NET_WARN_ON_ONCE(darg
.skb
== ctx
->strp
.anchor
);
2082 /* TLS 1.2-only, to_decrypt must be text len */
2083 chunk
= min_t(int, to_decrypt
, len
);
2084 async_copy_bytes
+= chunk
;
2088 __skb_queue_tail(&ctx
->rx_list
, skb
);
2089 if (unlikely(control
!= TLS_RECORD_TYPE_DATA
))
2094 if (bpf_strp_enabled
) {
2096 err
= sk_psock_tls_strp_read(psock
, skb
);
2097 if (err
!= __SK_PASS
) {
2098 rxm
->offset
= rxm
->offset
+ rxm
->full_len
;
2100 if (err
== __SK_DROP
)
2106 if (partially_consumed
)
2109 err
= skb_copy_datagram_msg(skb
, rxm
->offset
,
2112 goto put_on_rx_list_err
;
2116 goto put_on_rx_list
;
2119 if (partially_consumed
) {
2120 rxm
->offset
+= chunk
;
2121 rxm
->full_len
-= chunk
;
2122 goto put_on_rx_list
;
2131 /* Return full control message to userspace before trying
2132 * to parse another message type
2134 msg
->msg_flags
|= MSG_EOR
;
2135 if (control
!= TLS_RECORD_TYPE_DATA
)
2143 /* Wait for all previously submitted records to be decrypted */
2144 ret
= tls_decrypt_async_wait(ctx
);
2145 __skb_queue_purge(&ctx
->async_hold
);
2148 if (err
>= 0 || err
== -EINPROGRESS
)
2153 /* Drain records from the rx_list & copy if required */
2155 err
= process_rx_list(ctx
, msg
, &control
, copied
+ peeked
,
2156 decrypted
- peeked
, is_peek
, NULL
);
2158 err
= process_rx_list(ctx
, msg
, &control
, 0,
2159 async_copy_bytes
, is_peek
, NULL
);
2161 /* we could have copied less than we wanted, and possibly nothing */
2162 decrypted
+= max(err
, 0) - async_copy_bytes
;
2165 copied
+= decrypted
;
2168 tls_rx_reader_unlock(sk
, ctx
);
2170 sk_psock_put(sk
, psock
);
2171 return copied
? : err
;
2174 ssize_t
tls_sw_splice_read(struct socket
*sock
, loff_t
*ppos
,
2175 struct pipe_inode_info
*pipe
,
2176 size_t len
, unsigned int flags
)
2178 struct tls_context
*tls_ctx
= tls_get_ctx(sock
->sk
);
2179 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2180 struct strp_msg
*rxm
= NULL
;
2181 struct sock
*sk
= sock
->sk
;
2182 struct tls_msg
*tlm
;
2183 struct sk_buff
*skb
;
2188 err
= tls_rx_reader_lock(sk
, ctx
, flags
& SPLICE_F_NONBLOCK
);
2192 if (!skb_queue_empty(&ctx
->rx_list
)) {
2193 skb
= __skb_dequeue(&ctx
->rx_list
);
2195 struct tls_decrypt_arg darg
;
2197 err
= tls_rx_rec_wait(sk
, NULL
, flags
& SPLICE_F_NONBLOCK
,
2200 goto splice_read_end
;
2202 memset(&darg
.inargs
, 0, sizeof(darg
.inargs
));
2204 err
= tls_rx_one_record(sk
, NULL
, &darg
);
2206 tls_err_abort(sk
, -EBADMSG
);
2207 goto splice_read_end
;
2210 tls_rx_rec_done(ctx
);
2214 rxm
= strp_msg(skb
);
2217 /* splice does not support reading control messages */
2218 if (tlm
->control
!= TLS_RECORD_TYPE_DATA
) {
2220 goto splice_requeue
;
2223 chunk
= min_t(unsigned int, rxm
->full_len
, len
);
2224 copied
= skb_splice_bits(skb
, sk
, rxm
->offset
, pipe
, chunk
, flags
);
2226 goto splice_requeue
;
2228 if (chunk
< rxm
->full_len
) {
2230 rxm
->full_len
-= len
;
2231 goto splice_requeue
;
2237 tls_rx_reader_unlock(sk
, ctx
);
2238 return copied
? : err
;
2241 __skb_queue_head(&ctx
->rx_list
, skb
);
2242 goto splice_read_end
;
2245 int tls_sw_read_sock(struct sock
*sk
, read_descriptor_t
*desc
,
2246 sk_read_actor_t read_actor
)
2248 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2249 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2250 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2251 struct strp_msg
*rxm
= NULL
;
2252 struct sk_buff
*skb
= NULL
;
2253 struct sk_psock
*psock
;
2254 size_t flushed_at
= 0;
2255 bool released
= true;
2256 struct tls_msg
*tlm
;
2261 psock
= sk_psock_get(sk
);
2263 sk_psock_put(sk
, psock
);
2266 err
= tls_rx_reader_acquire(sk
, ctx
, true);
2270 /* If crypto failed the connection is broken */
2271 err
= ctx
->async_wait
.err
;
2277 if (!skb_queue_empty(&ctx
->rx_list
)) {
2278 skb
= __skb_dequeue(&ctx
->rx_list
);
2279 rxm
= strp_msg(skb
);
2282 struct tls_decrypt_arg darg
;
2284 err
= tls_rx_rec_wait(sk
, NULL
, true, released
);
2288 memset(&darg
.inargs
, 0, sizeof(darg
.inargs
));
2290 err
= tls_rx_one_record(sk
, NULL
, &darg
);
2292 tls_err_abort(sk
, -EBADMSG
);
2296 released
= tls_read_flush_backlog(sk
, prot
, INT_MAX
,
2300 rxm
= strp_msg(skb
);
2302 decrypted
+= rxm
->full_len
;
2304 tls_rx_rec_done(ctx
);
2307 /* read_sock does not support reading control messages */
2308 if (tlm
->control
!= TLS_RECORD_TYPE_DATA
) {
2310 goto read_sock_requeue
;
2313 used
= read_actor(desc
, skb
, rxm
->offset
, rxm
->full_len
);
2317 goto read_sock_requeue
;
2320 if (used
< rxm
->full_len
) {
2321 rxm
->offset
+= used
;
2322 rxm
->full_len
-= used
;
2324 goto read_sock_requeue
;
2333 tls_rx_reader_release(sk
, ctx
);
2334 return copied
? : err
;
2337 __skb_queue_head(&ctx
->rx_list
, skb
);
2341 bool tls_sw_sock_is_readable(struct sock
*sk
)
2343 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2344 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2345 bool ingress_empty
= true;
2346 struct sk_psock
*psock
;
2349 psock
= sk_psock(sk
);
2351 ingress_empty
= list_empty(&psock
->ingress_msg
);
2354 return !ingress_empty
|| tls_strp_msg_ready(ctx
) ||
2355 !skb_queue_empty(&ctx
->rx_list
);
2358 int tls_rx_msg_size(struct tls_strparser
*strp
, struct sk_buff
*skb
)
2360 struct tls_context
*tls_ctx
= tls_get_ctx(strp
->sk
);
2361 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
2362 char header
[TLS_HEADER_SIZE
+ TLS_MAX_IV_SIZE
];
2363 size_t cipher_overhead
;
2364 size_t data_len
= 0;
2367 /* Verify that we have a full TLS header, or wait for more data */
2368 if (strp
->stm
.offset
+ prot
->prepend_size
> skb
->len
)
2371 /* Sanity-check size of on-stack buffer. */
2372 if (WARN_ON(prot
->prepend_size
> sizeof(header
))) {
2377 /* Linearize header to local buffer */
2378 ret
= skb_copy_bits(skb
, strp
->stm
.offset
, header
, prot
->prepend_size
);
2382 strp
->mark
= header
[0];
2384 data_len
= ((header
[4] & 0xFF) | (header
[3] << 8));
2386 cipher_overhead
= prot
->tag_size
;
2387 if (prot
->version
!= TLS_1_3_VERSION
&&
2388 prot
->cipher_type
!= TLS_CIPHER_CHACHA20_POLY1305
)
2389 cipher_overhead
+= prot
->iv_size
;
2391 if (data_len
> TLS_MAX_PAYLOAD_SIZE
+ cipher_overhead
+
2396 if (data_len
< cipher_overhead
) {
2401 /* Note that both TLS1.3 and TLS1.2 use TLS_1_2 version here */
2402 if (header
[1] != TLS_1_2_VERSION_MINOR
||
2403 header
[2] != TLS_1_2_VERSION_MAJOR
) {
2408 tls_device_rx_resync_new_rec(strp
->sk
, data_len
+ TLS_HEADER_SIZE
,
2409 TCP_SKB_CB(skb
)->seq
+ strp
->stm
.offset
);
2410 return data_len
+ TLS_HEADER_SIZE
;
2413 tls_err_abort(strp
->sk
, ret
);
2418 void tls_rx_msg_ready(struct tls_strparser
*strp
)
2420 struct tls_sw_context_rx
*ctx
;
2422 ctx
= container_of(strp
, struct tls_sw_context_rx
, strp
);
2423 ctx
->saved_data_ready(strp
->sk
);
2426 static void tls_data_ready(struct sock
*sk
)
2428 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2429 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2430 struct sk_psock
*psock
;
2433 trace_sk_data_ready(sk
);
2435 alloc_save
= sk
->sk_allocation
;
2436 sk
->sk_allocation
= GFP_ATOMIC
;
2437 tls_strp_data_ready(&ctx
->strp
);
2438 sk
->sk_allocation
= alloc_save
;
2440 psock
= sk_psock_get(sk
);
2442 if (!list_empty(&psock
->ingress_msg
))
2443 ctx
->saved_data_ready(sk
);
2444 sk_psock_put(sk
, psock
);
2448 void tls_sw_cancel_work_tx(struct tls_context
*tls_ctx
)
2450 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2452 set_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
);
2453 set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
);
2454 cancel_delayed_work_sync(&ctx
->tx_work
.work
);
2457 void tls_sw_release_resources_tx(struct sock
*sk
)
2459 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2460 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2461 struct tls_rec
*rec
, *tmp
;
2463 /* Wait for any pending async encryptions to complete */
2464 tls_encrypt_async_wait(ctx
);
2466 tls_tx_records(sk
, -1);
2468 /* Free up un-sent records in tx_list. First, free
2469 * the partially sent record if any at head of tx_list.
2471 if (tls_ctx
->partially_sent_record
) {
2472 tls_free_partial_record(sk
, tls_ctx
);
2473 rec
= list_first_entry(&ctx
->tx_list
,
2474 struct tls_rec
, list
);
2475 list_del(&rec
->list
);
2476 sk_msg_free(sk
, &rec
->msg_plaintext
);
2480 list_for_each_entry_safe(rec
, tmp
, &ctx
->tx_list
, list
) {
2481 list_del(&rec
->list
);
2482 sk_msg_free(sk
, &rec
->msg_encrypted
);
2483 sk_msg_free(sk
, &rec
->msg_plaintext
);
2487 crypto_free_aead(ctx
->aead_send
);
2488 tls_free_open_rec(sk
);
2491 void tls_sw_free_ctx_tx(struct tls_context
*tls_ctx
)
2493 struct tls_sw_context_tx
*ctx
= tls_sw_ctx_tx(tls_ctx
);
2498 void tls_sw_release_resources_rx(struct sock
*sk
)
2500 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2501 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2503 if (ctx
->aead_recv
) {
2504 __skb_queue_purge(&ctx
->rx_list
);
2505 crypto_free_aead(ctx
->aead_recv
);
2506 tls_strp_stop(&ctx
->strp
);
2507 /* If tls_sw_strparser_arm() was not called (cleanup paths)
2508 * we still want to tls_strp_stop(), but sk->sk_data_ready was
2511 if (ctx
->saved_data_ready
) {
2512 write_lock_bh(&sk
->sk_callback_lock
);
2513 sk
->sk_data_ready
= ctx
->saved_data_ready
;
2514 write_unlock_bh(&sk
->sk_callback_lock
);
2519 void tls_sw_strparser_done(struct tls_context
*tls_ctx
)
2521 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2523 tls_strp_done(&ctx
->strp
);
2526 void tls_sw_free_ctx_rx(struct tls_context
*tls_ctx
)
2528 struct tls_sw_context_rx
*ctx
= tls_sw_ctx_rx(tls_ctx
);
2533 void tls_sw_free_resources_rx(struct sock
*sk
)
2535 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2537 tls_sw_release_resources_rx(sk
);
2538 tls_sw_free_ctx_rx(tls_ctx
);
2541 /* The work handler to transmitt the encrypted records in tx_list */
2542 static void tx_work_handler(struct work_struct
*work
)
2544 struct delayed_work
*delayed_work
= to_delayed_work(work
);
2545 struct tx_work
*tx_work
= container_of(delayed_work
,
2546 struct tx_work
, work
);
2547 struct sock
*sk
= tx_work
->sk
;
2548 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
2549 struct tls_sw_context_tx
*ctx
;
2551 if (unlikely(!tls_ctx
))
2554 ctx
= tls_sw_ctx_tx(tls_ctx
);
2555 if (test_bit(BIT_TX_CLOSING
, &ctx
->tx_bitmask
))
2558 if (!test_and_clear_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
))
2561 if (mutex_trylock(&tls_ctx
->tx_lock
)) {
2563 tls_tx_records(sk
, -1);
2565 mutex_unlock(&tls_ctx
->tx_lock
);
2566 } else if (!test_and_set_bit(BIT_TX_SCHEDULED
, &ctx
->tx_bitmask
)) {
2567 /* Someone is holding the tx_lock, they will likely run Tx
2568 * and cancel the work on their way out of the lock section.
2569 * Schedule a long delay just in case.
2571 schedule_delayed_work(&ctx
->tx_work
.work
, msecs_to_jiffies(10));
2575 static bool tls_is_tx_ready(struct tls_sw_context_tx
*ctx
)
2577 struct tls_rec
*rec
;
2579 rec
= list_first_entry_or_null(&ctx
->tx_list
, struct tls_rec
, list
);
2583 return READ_ONCE(rec
->tx_ready
);
2586 void tls_sw_write_space(struct sock
*sk
, struct tls_context
*ctx
)
2588 struct tls_sw_context_tx
*tx_ctx
= tls_sw_ctx_tx(ctx
);
2590 /* Schedule the transmission if tx list is ready */
2591 if (tls_is_tx_ready(tx_ctx
) &&
2592 !test_and_set_bit(BIT_TX_SCHEDULED
, &tx_ctx
->tx_bitmask
))
2593 schedule_delayed_work(&tx_ctx
->tx_work
.work
, 0);
2596 void tls_sw_strparser_arm(struct sock
*sk
, struct tls_context
*tls_ctx
)
2598 struct tls_sw_context_rx
*rx_ctx
= tls_sw_ctx_rx(tls_ctx
);
2600 write_lock_bh(&sk
->sk_callback_lock
);
2601 rx_ctx
->saved_data_ready
= sk
->sk_data_ready
;
2602 sk
->sk_data_ready
= tls_data_ready
;
2603 write_unlock_bh(&sk
->sk_callback_lock
);
2606 void tls_update_rx_zc_capable(struct tls_context
*tls_ctx
)
2608 struct tls_sw_context_rx
*rx_ctx
= tls_sw_ctx_rx(tls_ctx
);
2610 rx_ctx
->zc_capable
= tls_ctx
->rx_no_pad
||
2611 tls_ctx
->prot_info
.version
!= TLS_1_3_VERSION
;
2614 static struct tls_sw_context_tx
*init_ctx_tx(struct tls_context
*ctx
, struct sock
*sk
)
2616 struct tls_sw_context_tx
*sw_ctx_tx
;
2618 if (!ctx
->priv_ctx_tx
) {
2619 sw_ctx_tx
= kzalloc(sizeof(*sw_ctx_tx
), GFP_KERNEL
);
2623 sw_ctx_tx
= ctx
->priv_ctx_tx
;
2626 crypto_init_wait(&sw_ctx_tx
->async_wait
);
2627 atomic_set(&sw_ctx_tx
->encrypt_pending
, 1);
2628 INIT_LIST_HEAD(&sw_ctx_tx
->tx_list
);
2629 INIT_DELAYED_WORK(&sw_ctx_tx
->tx_work
.work
, tx_work_handler
);
2630 sw_ctx_tx
->tx_work
.sk
= sk
;
2635 static struct tls_sw_context_rx
*init_ctx_rx(struct tls_context
*ctx
)
2637 struct tls_sw_context_rx
*sw_ctx_rx
;
2639 if (!ctx
->priv_ctx_rx
) {
2640 sw_ctx_rx
= kzalloc(sizeof(*sw_ctx_rx
), GFP_KERNEL
);
2644 sw_ctx_rx
= ctx
->priv_ctx_rx
;
2647 crypto_init_wait(&sw_ctx_rx
->async_wait
);
2648 atomic_set(&sw_ctx_rx
->decrypt_pending
, 1);
2649 init_waitqueue_head(&sw_ctx_rx
->wq
);
2650 skb_queue_head_init(&sw_ctx_rx
->rx_list
);
2651 skb_queue_head_init(&sw_ctx_rx
->async_hold
);
2656 int init_prot_info(struct tls_prot_info
*prot
,
2657 const struct tls_crypto_info
*crypto_info
,
2658 const struct tls_cipher_desc
*cipher_desc
)
2660 u16 nonce_size
= cipher_desc
->nonce
;
2662 if (crypto_info
->version
== TLS_1_3_VERSION
) {
2664 prot
->aad_size
= TLS_HEADER_SIZE
;
2665 prot
->tail_size
= 1;
2667 prot
->aad_size
= TLS_AAD_SPACE_SIZE
;
2668 prot
->tail_size
= 0;
2671 /* Sanity-check the sizes for stack allocations. */
2672 if (nonce_size
> TLS_MAX_IV_SIZE
|| prot
->aad_size
> TLS_MAX_AAD_SIZE
)
2675 prot
->version
= crypto_info
->version
;
2676 prot
->cipher_type
= crypto_info
->cipher_type
;
2677 prot
->prepend_size
= TLS_HEADER_SIZE
+ nonce_size
;
2678 prot
->tag_size
= cipher_desc
->tag
;
2679 prot
->overhead_size
= prot
->prepend_size
+ prot
->tag_size
+ prot
->tail_size
;
2680 prot
->iv_size
= cipher_desc
->iv
;
2681 prot
->salt_size
= cipher_desc
->salt
;
2682 prot
->rec_seq_size
= cipher_desc
->rec_seq
;
2687 int tls_set_sw_offload(struct sock
*sk
, int tx
)
2689 struct tls_sw_context_tx
*sw_ctx_tx
= NULL
;
2690 struct tls_sw_context_rx
*sw_ctx_rx
= NULL
;
2691 const struct tls_cipher_desc
*cipher_desc
;
2692 struct tls_crypto_info
*crypto_info
;
2693 char *iv
, *rec_seq
, *key
, *salt
;
2694 struct cipher_context
*cctx
;
2695 struct tls_prot_info
*prot
;
2696 struct crypto_aead
**aead
;
2697 struct tls_context
*ctx
;
2698 struct crypto_tfm
*tfm
;
2701 ctx
= tls_get_ctx(sk
);
2702 prot
= &ctx
->prot_info
;
2705 ctx
->priv_ctx_tx
= init_ctx_tx(ctx
, sk
);
2706 if (!ctx
->priv_ctx_tx
)
2709 sw_ctx_tx
= ctx
->priv_ctx_tx
;
2710 crypto_info
= &ctx
->crypto_send
.info
;
2712 aead
= &sw_ctx_tx
->aead_send
;
2714 ctx
->priv_ctx_rx
= init_ctx_rx(ctx
);
2715 if (!ctx
->priv_ctx_rx
)
2718 sw_ctx_rx
= ctx
->priv_ctx_rx
;
2719 crypto_info
= &ctx
->crypto_recv
.info
;
2721 aead
= &sw_ctx_rx
->aead_recv
;
2724 cipher_desc
= get_cipher_desc(crypto_info
->cipher_type
);
2730 rc
= init_prot_info(prot
, crypto_info
, cipher_desc
);
2734 iv
= crypto_info_iv(crypto_info
, cipher_desc
);
2735 key
= crypto_info_key(crypto_info
, cipher_desc
);
2736 salt
= crypto_info_salt(crypto_info
, cipher_desc
);
2737 rec_seq
= crypto_info_rec_seq(crypto_info
, cipher_desc
);
2739 memcpy(cctx
->iv
, salt
, cipher_desc
->salt
);
2740 memcpy(cctx
->iv
+ cipher_desc
->salt
, iv
, cipher_desc
->iv
);
2741 memcpy(cctx
->rec_seq
, rec_seq
, cipher_desc
->rec_seq
);
2744 *aead
= crypto_alloc_aead(cipher_desc
->cipher_name
, 0, 0);
2745 if (IS_ERR(*aead
)) {
2746 rc
= PTR_ERR(*aead
);
2752 ctx
->push_pending_record
= tls_sw_push_pending_record
;
2754 rc
= crypto_aead_setkey(*aead
, key
, cipher_desc
->key
);
2758 rc
= crypto_aead_setauthsize(*aead
, prot
->tag_size
);
2763 tfm
= crypto_aead_tfm(sw_ctx_rx
->aead_recv
);
2765 tls_update_rx_zc_capable(ctx
);
2766 sw_ctx_rx
->async_capable
=
2767 crypto_info
->version
!= TLS_1_3_VERSION
&&
2768 !!(tfm
->__crt_alg
->cra_flags
& CRYPTO_ALG_ASYNC
);
2770 rc
= tls_strp_init(&sw_ctx_rx
->strp
, sk
);
2778 crypto_free_aead(*aead
);
2782 kfree(ctx
->priv_ctx_tx
);
2783 ctx
->priv_ctx_tx
= NULL
;
2785 kfree(ctx
->priv_ctx_rx
);
2786 ctx
->priv_ctx_rx
= NULL
;