1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <crypto/aead.h>
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/netdevice.h>
37 #include <net/inet_connection_sock.h>
41 /* device_offload_lock is used to synchronize tls_dev_add
42 * against NETDEV_DOWN notifications.
44 static DECLARE_RWSEM(device_offload_lock
);
46 static void tls_device_gc_task(struct work_struct
*work
);
48 static DECLARE_WORK(tls_device_gc_work
, tls_device_gc_task
);
49 static LIST_HEAD(tls_device_gc_list
);
50 static LIST_HEAD(tls_device_list
);
51 static DEFINE_SPINLOCK(tls_device_lock
);
53 static void tls_device_free_ctx(struct tls_context
*ctx
)
55 if (ctx
->tx_conf
== TLS_HW
) {
56 kfree(tls_offload_ctx_tx(ctx
));
57 kfree(ctx
->tx
.rec_seq
);
61 if (ctx
->rx_conf
== TLS_HW
)
62 kfree(tls_offload_ctx_rx(ctx
));
67 static void tls_device_gc_task(struct work_struct
*work
)
69 struct tls_context
*ctx
, *tmp
;
73 spin_lock_irqsave(&tls_device_lock
, flags
);
74 list_splice_init(&tls_device_gc_list
, &gc_list
);
75 spin_unlock_irqrestore(&tls_device_lock
, flags
);
77 list_for_each_entry_safe(ctx
, tmp
, &gc_list
, list
) {
78 struct net_device
*netdev
= ctx
->netdev
;
80 if (netdev
&& ctx
->tx_conf
== TLS_HW
) {
81 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
82 TLS_OFFLOAD_CTX_DIR_TX
);
88 tls_device_free_ctx(ctx
);
92 static void tls_device_queue_ctx_destruction(struct tls_context
*ctx
)
96 spin_lock_irqsave(&tls_device_lock
, flags
);
97 list_move_tail(&ctx
->list
, &tls_device_gc_list
);
99 /* schedule_work inside the spinlock
100 * to make sure tls_device_down waits for that work.
102 schedule_work(&tls_device_gc_work
);
104 spin_unlock_irqrestore(&tls_device_lock
, flags
);
107 /* We assume that the socket is already connected */
108 static struct net_device
*get_netdev_for_sock(struct sock
*sk
)
110 struct dst_entry
*dst
= sk_dst_get(sk
);
111 struct net_device
*netdev
= NULL
;
123 static void destroy_record(struct tls_record_info
*record
)
125 int nr_frags
= record
->num_frags
;
128 while (nr_frags
-- > 0) {
129 frag
= &record
->frags
[nr_frags
];
130 __skb_frag_unref(frag
);
135 static void delete_all_records(struct tls_offload_context_tx
*offload_ctx
)
137 struct tls_record_info
*info
, *temp
;
139 list_for_each_entry_safe(info
, temp
, &offload_ctx
->records_list
, list
) {
140 list_del(&info
->list
);
141 destroy_record(info
);
144 offload_ctx
->retransmit_hint
= NULL
;
147 static void tls_icsk_clean_acked(struct sock
*sk
, u32 acked_seq
)
149 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
150 struct tls_record_info
*info
, *temp
;
151 struct tls_offload_context_tx
*ctx
;
152 u64 deleted_records
= 0;
158 ctx
= tls_offload_ctx_tx(tls_ctx
);
160 spin_lock_irqsave(&ctx
->lock
, flags
);
161 info
= ctx
->retransmit_hint
;
162 if (info
&& !before(acked_seq
, info
->end_seq
)) {
163 ctx
->retransmit_hint
= NULL
;
164 list_del(&info
->list
);
165 destroy_record(info
);
169 list_for_each_entry_safe(info
, temp
, &ctx
->records_list
, list
) {
170 if (before(acked_seq
, info
->end_seq
))
172 list_del(&info
->list
);
174 destroy_record(info
);
178 ctx
->unacked_record_sn
+= deleted_records
;
179 spin_unlock_irqrestore(&ctx
->lock
, flags
);
182 /* At this point, there should be no references on this
183 * socket and no in-flight SKBs associated with this
184 * socket, so it is safe to free all the resources.
186 static void tls_device_sk_destruct(struct sock
*sk
)
188 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
189 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
191 tls_ctx
->sk_destruct(sk
);
193 if (tls_ctx
->tx_conf
== TLS_HW
) {
194 if (ctx
->open_record
)
195 destroy_record(ctx
->open_record
);
196 delete_all_records(ctx
);
197 crypto_free_aead(ctx
->aead_send
);
198 clean_acked_data_disable(inet_csk(sk
));
201 if (refcount_dec_and_test(&tls_ctx
->refcount
))
202 tls_device_queue_ctx_destruction(tls_ctx
);
205 void tls_device_free_resources_tx(struct sock
*sk
)
207 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
209 tls_free_partial_record(sk
, tls_ctx
);
212 static void tls_append_frag(struct tls_record_info
*record
,
213 struct page_frag
*pfrag
,
218 frag
= &record
->frags
[record
->num_frags
- 1];
219 if (frag
->page
.p
== pfrag
->page
&&
220 frag
->page_offset
+ frag
->size
== pfrag
->offset
) {
224 frag
->page
.p
= pfrag
->page
;
225 frag
->page_offset
= pfrag
->offset
;
228 get_page(pfrag
->page
);
231 pfrag
->offset
+= size
;
235 static int tls_push_record(struct sock
*sk
,
236 struct tls_context
*ctx
,
237 struct tls_offload_context_tx
*offload_ctx
,
238 struct tls_record_info
*record
,
239 struct page_frag
*pfrag
,
241 unsigned char record_type
)
243 struct tls_prot_info
*prot
= &ctx
->prot_info
;
244 struct tcp_sock
*tp
= tcp_sk(sk
);
245 struct page_frag dummy_tag_frag
;
250 frag
= &record
->frags
[0];
251 tls_fill_prepend(ctx
,
252 skb_frag_address(frag
),
253 record
->len
- prot
->prepend_size
,
255 ctx
->crypto_send
.info
.version
);
257 /* HW doesn't care about the data in the tag, because it fills it. */
258 dummy_tag_frag
.page
= skb_frag_page(frag
);
259 dummy_tag_frag
.offset
= 0;
261 tls_append_frag(record
, &dummy_tag_frag
, prot
->tag_size
);
262 record
->end_seq
= tp
->write_seq
+ record
->len
;
263 spin_lock_irq(&offload_ctx
->lock
);
264 list_add_tail(&record
->list
, &offload_ctx
->records_list
);
265 spin_unlock_irq(&offload_ctx
->lock
);
266 offload_ctx
->open_record
= NULL
;
267 tls_advance_record_sn(sk
, &ctx
->tx
, ctx
->crypto_send
.info
.version
);
269 for (i
= 0; i
< record
->num_frags
; i
++) {
270 frag
= &record
->frags
[i
];
271 sg_unmark_end(&offload_ctx
->sg_tx_data
[i
]);
272 sg_set_page(&offload_ctx
->sg_tx_data
[i
], skb_frag_page(frag
),
273 frag
->size
, frag
->page_offset
);
274 sk_mem_charge(sk
, frag
->size
);
275 get_page(skb_frag_page(frag
));
277 sg_mark_end(&offload_ctx
->sg_tx_data
[record
->num_frags
- 1]);
279 /* all ready, send */
280 return tls_push_sg(sk
, ctx
, offload_ctx
->sg_tx_data
, 0, flags
);
283 static int tls_create_new_record(struct tls_offload_context_tx
*offload_ctx
,
284 struct page_frag
*pfrag
,
287 struct tls_record_info
*record
;
290 record
= kmalloc(sizeof(*record
), GFP_KERNEL
);
294 frag
= &record
->frags
[0];
295 __skb_frag_set_page(frag
, pfrag
->page
);
296 frag
->page_offset
= pfrag
->offset
;
297 skb_frag_size_set(frag
, prepend_size
);
299 get_page(pfrag
->page
);
300 pfrag
->offset
+= prepend_size
;
302 record
->num_frags
= 1;
303 record
->len
= prepend_size
;
304 offload_ctx
->open_record
= record
;
308 static int tls_do_allocation(struct sock
*sk
,
309 struct tls_offload_context_tx
*offload_ctx
,
310 struct page_frag
*pfrag
,
315 if (!offload_ctx
->open_record
) {
316 if (unlikely(!skb_page_frag_refill(prepend_size
, pfrag
,
317 sk
->sk_allocation
))) {
318 sk
->sk_prot
->enter_memory_pressure(sk
);
319 sk_stream_moderate_sndbuf(sk
);
323 ret
= tls_create_new_record(offload_ctx
, pfrag
, prepend_size
);
327 if (pfrag
->size
> pfrag
->offset
)
331 if (!sk_page_frag_refill(sk
, pfrag
))
337 static int tls_push_data(struct sock
*sk
,
338 struct iov_iter
*msg_iter
,
339 size_t size
, int flags
,
340 unsigned char record_type
)
342 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
343 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
344 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
345 int tls_push_record_flags
= flags
| MSG_SENDPAGE_NOTLAST
;
346 int more
= flags
& (MSG_SENDPAGE_NOTLAST
| MSG_MORE
);
347 struct tls_record_info
*record
= ctx
->open_record
;
348 struct page_frag
*pfrag
;
349 size_t orig_size
= size
;
350 u32 max_open_record_len
;
356 ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
| MSG_SENDPAGE_NOTLAST
))
362 timeo
= sock_sndtimeo(sk
, flags
& MSG_DONTWAIT
);
363 if (tls_is_partially_sent_record(tls_ctx
)) {
364 rc
= tls_push_partial_record(sk
, tls_ctx
, flags
);
369 pfrag
= sk_page_frag(sk
);
371 /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
372 * we need to leave room for an authentication tag.
374 max_open_record_len
= TLS_MAX_PAYLOAD_SIZE
+
377 rc
= tls_do_allocation(sk
, ctx
, pfrag
,
380 rc
= sk_stream_wait_memory(sk
, &timeo
);
384 record
= ctx
->open_record
;
388 if (record_type
!= TLS_RECORD_TYPE_DATA
) {
389 /* avoid sending partial
390 * record with type !=
394 destroy_record(record
);
395 ctx
->open_record
= NULL
;
396 } else if (record
->len
> prot
->prepend_size
) {
403 record
= ctx
->open_record
;
404 copy
= min_t(size_t, size
, (pfrag
->size
- pfrag
->offset
));
405 copy
= min_t(size_t, copy
, (max_open_record_len
- record
->len
));
407 if (copy_from_iter_nocache(page_address(pfrag
->page
) +
409 copy
, msg_iter
) != copy
) {
413 tls_append_frag(record
, pfrag
, copy
);
418 tls_push_record_flags
= flags
;
420 tls_ctx
->pending_open_record_frags
=
428 if (done
|| record
->len
>= max_open_record_len
||
429 (record
->num_frags
>= MAX_SKB_FRAGS
- 1)) {
430 rc
= tls_push_record(sk
,
435 tls_push_record_flags
,
442 if (orig_size
- size
> 0)
443 rc
= orig_size
- size
;
448 int tls_device_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t size
)
450 unsigned char record_type
= TLS_RECORD_TYPE_DATA
;
455 if (unlikely(msg
->msg_controllen
)) {
456 rc
= tls_proccess_cmsg(sk
, msg
, &record_type
);
461 rc
= tls_push_data(sk
, &msg
->msg_iter
, size
,
462 msg
->msg_flags
, record_type
);
469 int tls_device_sendpage(struct sock
*sk
, struct page
*page
,
470 int offset
, size_t size
, int flags
)
472 struct iov_iter msg_iter
;
473 char *kaddr
= kmap(page
);
477 if (flags
& MSG_SENDPAGE_NOTLAST
)
482 if (flags
& MSG_OOB
) {
487 iov
.iov_base
= kaddr
+ offset
;
489 iov_iter_kvec(&msg_iter
, WRITE
, &iov
, 1, size
);
490 rc
= tls_push_data(sk
, &msg_iter
, size
,
491 flags
, TLS_RECORD_TYPE_DATA
);
499 struct tls_record_info
*tls_get_record(struct tls_offload_context_tx
*context
,
500 u32 seq
, u64
*p_record_sn
)
502 u64 record_sn
= context
->hint_record_sn
;
503 struct tls_record_info
*info
;
505 info
= context
->retransmit_hint
;
507 before(seq
, info
->end_seq
- info
->len
)) {
508 /* if retransmit_hint is irrelevant start
509 * from the beggining of the list
511 info
= list_first_entry(&context
->records_list
,
512 struct tls_record_info
, list
);
513 record_sn
= context
->unacked_record_sn
;
516 list_for_each_entry_from(info
, &context
->records_list
, list
) {
517 if (before(seq
, info
->end_seq
)) {
518 if (!context
->retransmit_hint
||
520 context
->retransmit_hint
->end_seq
)) {
521 context
->hint_record_sn
= record_sn
;
522 context
->retransmit_hint
= info
;
524 *p_record_sn
= record_sn
;
532 EXPORT_SYMBOL(tls_get_record
);
534 static int tls_device_push_pending_record(struct sock
*sk
, int flags
)
536 struct iov_iter msg_iter
;
538 iov_iter_kvec(&msg_iter
, WRITE
, NULL
, 0, 0);
539 return tls_push_data(sk
, &msg_iter
, 0, flags
, TLS_RECORD_TYPE_DATA
);
542 void tls_device_write_space(struct sock
*sk
, struct tls_context
*ctx
)
544 if (!sk
->sk_write_pending
&& tls_is_partially_sent_record(ctx
)) {
545 gfp_t sk_allocation
= sk
->sk_allocation
;
547 sk
->sk_allocation
= GFP_ATOMIC
;
548 tls_push_partial_record(sk
, ctx
, MSG_DONTWAIT
| MSG_NOSIGNAL
);
549 sk
->sk_allocation
= sk_allocation
;
553 void handle_device_resync(struct sock
*sk
, u32 seq
, u64 rcd_sn
)
555 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
556 struct net_device
*netdev
= tls_ctx
->netdev
;
557 struct tls_offload_context_rx
*rx_ctx
;
562 if (tls_ctx
->rx_conf
!= TLS_HW
)
565 rx_ctx
= tls_offload_ctx_rx(tls_ctx
);
566 resync_req
= atomic64_read(&rx_ctx
->resync_req
);
567 req_seq
= (resync_req
>> 32) - ((u32
)TLS_HEADER_SIZE
- 1);
568 is_req_pending
= resync_req
;
570 if (unlikely(is_req_pending
) && req_seq
== seq
&&
571 atomic64_try_cmpxchg(&rx_ctx
->resync_req
, &resync_req
, 0))
572 netdev
->tlsdev_ops
->tls_dev_resync_rx(netdev
, sk
,
573 seq
+ TLS_HEADER_SIZE
- 1,
577 static int tls_device_reencrypt(struct sock
*sk
, struct sk_buff
*skb
)
579 struct strp_msg
*rxm
= strp_msg(skb
);
580 int err
= 0, offset
= rxm
->offset
, copy
, nsg
, data_len
, pos
;
581 struct sk_buff
*skb_iter
, *unused
;
582 struct scatterlist sg
[1];
583 char *orig_buf
, *buf
;
585 orig_buf
= kmalloc(rxm
->full_len
+ TLS_HEADER_SIZE
+
586 TLS_CIPHER_AES_GCM_128_IV_SIZE
, sk
->sk_allocation
);
591 nsg
= skb_cow_data(skb
, 0, &unused
);
592 if (unlikely(nsg
< 0)) {
597 sg_init_table(sg
, 1);
598 sg_set_buf(&sg
[0], buf
,
599 rxm
->full_len
+ TLS_HEADER_SIZE
+
600 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
601 skb_copy_bits(skb
, offset
, buf
,
602 TLS_HEADER_SIZE
+ TLS_CIPHER_AES_GCM_128_IV_SIZE
);
604 /* We are interested only in the decrypted data not the auth */
605 err
= decrypt_skb(sk
, skb
, sg
);
611 data_len
= rxm
->full_len
- TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
613 if (skb_pagelen(skb
) > offset
) {
614 copy
= min_t(int, skb_pagelen(skb
) - offset
, data_len
);
617 skb_store_bits(skb
, offset
, buf
, copy
);
623 pos
= skb_pagelen(skb
);
624 skb_walk_frags(skb
, skb_iter
) {
627 /* Practically all frags must belong to msg if reencrypt
628 * is needed with current strparser and coalescing logic,
629 * but strparser may "get optimized", so let's be safe.
631 if (pos
+ skb_iter
->len
<= offset
)
633 if (pos
>= data_len
+ rxm
->offset
)
636 frag_pos
= offset
- pos
;
637 copy
= min_t(int, skb_iter
->len
- frag_pos
,
638 data_len
+ rxm
->offset
- offset
);
640 if (skb_iter
->decrypted
)
641 skb_store_bits(skb_iter
, frag_pos
, buf
, copy
);
646 pos
+= skb_iter
->len
;
654 int tls_device_decrypted(struct sock
*sk
, struct sk_buff
*skb
)
656 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
657 struct tls_offload_context_rx
*ctx
= tls_offload_ctx_rx(tls_ctx
);
658 int is_decrypted
= skb
->decrypted
;
659 int is_encrypted
= !is_decrypted
;
660 struct sk_buff
*skb_iter
;
662 /* Skip if it is already decrypted */
663 if (ctx
->sw
.decrypted
)
666 /* Check if all the data is decrypted already */
667 skb_walk_frags(skb
, skb_iter
) {
668 is_decrypted
&= skb_iter
->decrypted
;
669 is_encrypted
&= !skb_iter
->decrypted
;
672 ctx
->sw
.decrypted
|= is_decrypted
;
674 /* Return immedeatly if the record is either entirely plaintext or
675 * entirely ciphertext. Otherwise handle reencrypt partially decrypted
678 return (is_encrypted
|| is_decrypted
) ? 0 :
679 tls_device_reencrypt(sk
, skb
);
682 static void tls_device_attach(struct tls_context
*ctx
, struct sock
*sk
,
683 struct net_device
*netdev
)
685 if (sk
->sk_destruct
!= tls_device_sk_destruct
) {
686 refcount_set(&ctx
->refcount
, 1);
688 ctx
->netdev
= netdev
;
689 spin_lock_irq(&tls_device_lock
);
690 list_add_tail(&ctx
->list
, &tls_device_list
);
691 spin_unlock_irq(&tls_device_lock
);
693 ctx
->sk_destruct
= sk
->sk_destruct
;
694 sk
->sk_destruct
= tls_device_sk_destruct
;
698 int tls_set_device_offload(struct sock
*sk
, struct tls_context
*ctx
)
700 u16 nonce_size
, tag_size
, iv_size
, rec_seq_size
;
701 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
702 struct tls_prot_info
*prot
= &tls_ctx
->prot_info
;
703 struct tls_record_info
*start_marker_record
;
704 struct tls_offload_context_tx
*offload_ctx
;
705 struct tls_crypto_info
*crypto_info
;
706 struct net_device
*netdev
;
715 if (ctx
->priv_ctx_tx
) {
720 start_marker_record
= kmalloc(sizeof(*start_marker_record
), GFP_KERNEL
);
721 if (!start_marker_record
) {
726 offload_ctx
= kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_TX
, GFP_KERNEL
);
729 goto free_marker_record
;
732 crypto_info
= &ctx
->crypto_send
.info
;
733 switch (crypto_info
->cipher_type
) {
734 case TLS_CIPHER_AES_GCM_128
:
735 nonce_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
736 tag_size
= TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
737 iv_size
= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
738 iv
= ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->iv
;
739 rec_seq_size
= TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
;
741 ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->rec_seq
;
745 goto free_offload_ctx
;
748 prot
->prepend_size
= TLS_HEADER_SIZE
+ nonce_size
;
749 prot
->tag_size
= tag_size
;
750 prot
->overhead_size
= prot
->prepend_size
+ prot
->tag_size
;
751 prot
->iv_size
= iv_size
;
752 ctx
->tx
.iv
= kmalloc(iv_size
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
756 goto free_offload_ctx
;
759 memcpy(ctx
->tx
.iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
, iv
, iv_size
);
761 prot
->rec_seq_size
= rec_seq_size
;
762 ctx
->tx
.rec_seq
= kmemdup(rec_seq
, rec_seq_size
, GFP_KERNEL
);
763 if (!ctx
->tx
.rec_seq
) {
768 rc
= tls_sw_fallback_init(sk
, offload_ctx
, crypto_info
);
772 /* start at rec_seq - 1 to account for the start marker record */
773 memcpy(&rcd_sn
, ctx
->tx
.rec_seq
, sizeof(rcd_sn
));
774 offload_ctx
->unacked_record_sn
= be64_to_cpu(rcd_sn
) - 1;
776 start_marker_record
->end_seq
= tcp_sk(sk
)->write_seq
;
777 start_marker_record
->len
= 0;
778 start_marker_record
->num_frags
= 0;
780 INIT_LIST_HEAD(&offload_ctx
->records_list
);
781 list_add_tail(&start_marker_record
->list
, &offload_ctx
->records_list
);
782 spin_lock_init(&offload_ctx
->lock
);
783 sg_init_table(offload_ctx
->sg_tx_data
,
784 ARRAY_SIZE(offload_ctx
->sg_tx_data
));
786 clean_acked_data_enable(inet_csk(sk
), &tls_icsk_clean_acked
);
787 ctx
->push_pending_record
= tls_device_push_pending_record
;
789 /* TLS offload is greatly simplified if we don't send
790 * SKBs where only part of the payload needs to be encrypted.
791 * So mark the last skb in the write queue as end of record.
793 skb
= tcp_write_queue_tail(sk
);
795 TCP_SKB_CB(skb
)->eor
= 1;
797 /* We support starting offload on multiple sockets
798 * concurrently, so we only need a read lock here.
799 * This lock must precede get_netdev_for_sock to prevent races between
800 * NETDEV_DOWN and setsockopt.
802 down_read(&device_offload_lock
);
803 netdev
= get_netdev_for_sock(sk
);
805 pr_err_ratelimited("%s: netdev not found\n", __func__
);
810 if (!(netdev
->features
& NETIF_F_HW_TLS_TX
)) {
815 /* Avoid offloading if the device is down
816 * We don't want to offload new flows after
817 * the NETDEV_DOWN event
819 if (!(netdev
->flags
& IFF_UP
)) {
824 ctx
->priv_ctx_tx
= offload_ctx
;
825 rc
= netdev
->tlsdev_ops
->tls_dev_add(netdev
, sk
, TLS_OFFLOAD_CTX_DIR_TX
,
826 &ctx
->crypto_send
.info
,
827 tcp_sk(sk
)->write_seq
);
831 tls_device_attach(ctx
, sk
, netdev
);
833 /* following this assignment tls_is_sk_tx_device_offloaded
834 * will return true and the context might be accessed
835 * by the netdev's xmit function.
837 smp_store_release(&sk
->sk_validate_xmit_skb
, tls_validate_xmit_skb
);
839 up_read(&device_offload_lock
);
845 up_read(&device_offload_lock
);
846 clean_acked_data_disable(inet_csk(sk
));
847 crypto_free_aead(offload_ctx
->aead_send
);
849 kfree(ctx
->tx
.rec_seq
);
854 ctx
->priv_ctx_tx
= NULL
;
856 kfree(start_marker_record
);
861 int tls_set_device_offload_rx(struct sock
*sk
, struct tls_context
*ctx
)
863 struct tls_offload_context_rx
*context
;
864 struct net_device
*netdev
;
867 /* We support starting offload on multiple sockets
868 * concurrently, so we only need a read lock here.
869 * This lock must precede get_netdev_for_sock to prevent races between
870 * NETDEV_DOWN and setsockopt.
872 down_read(&device_offload_lock
);
873 netdev
= get_netdev_for_sock(sk
);
875 pr_err_ratelimited("%s: netdev not found\n", __func__
);
880 if (!(netdev
->features
& NETIF_F_HW_TLS_RX
)) {
885 /* Avoid offloading if the device is down
886 * We don't want to offload new flows after
887 * the NETDEV_DOWN event
889 if (!(netdev
->flags
& IFF_UP
)) {
894 context
= kzalloc(TLS_OFFLOAD_CONTEXT_SIZE_RX
, GFP_KERNEL
);
900 ctx
->priv_ctx_rx
= context
;
901 rc
= tls_set_sw_offload(sk
, ctx
, 0);
905 rc
= netdev
->tlsdev_ops
->tls_dev_add(netdev
, sk
, TLS_OFFLOAD_CTX_DIR_RX
,
906 &ctx
->crypto_recv
.info
,
907 tcp_sk(sk
)->copied_seq
);
909 goto free_sw_resources
;
911 tls_device_attach(ctx
, sk
, netdev
);
915 up_read(&device_offload_lock
);
916 tls_sw_free_resources_rx(sk
);
917 down_read(&device_offload_lock
);
919 ctx
->priv_ctx_rx
= NULL
;
923 up_read(&device_offload_lock
);
927 void tls_device_offload_cleanup_rx(struct sock
*sk
)
929 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
930 struct net_device
*netdev
;
932 down_read(&device_offload_lock
);
933 netdev
= tls_ctx
->netdev
;
937 if (!(netdev
->features
& NETIF_F_HW_TLS_RX
)) {
938 pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n",
943 netdev
->tlsdev_ops
->tls_dev_del(netdev
, tls_ctx
,
944 TLS_OFFLOAD_CTX_DIR_RX
);
946 if (tls_ctx
->tx_conf
!= TLS_HW
) {
948 tls_ctx
->netdev
= NULL
;
951 up_read(&device_offload_lock
);
952 tls_sw_release_resources_rx(sk
);
955 static int tls_device_down(struct net_device
*netdev
)
957 struct tls_context
*ctx
, *tmp
;
961 /* Request a write lock to block new offload attempts */
962 down_write(&device_offload_lock
);
964 spin_lock_irqsave(&tls_device_lock
, flags
);
965 list_for_each_entry_safe(ctx
, tmp
, &tls_device_list
, list
) {
966 if (ctx
->netdev
!= netdev
||
967 !refcount_inc_not_zero(&ctx
->refcount
))
970 list_move(&ctx
->list
, &list
);
972 spin_unlock_irqrestore(&tls_device_lock
, flags
);
974 list_for_each_entry_safe(ctx
, tmp
, &list
, list
) {
975 if (ctx
->tx_conf
== TLS_HW
)
976 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
977 TLS_OFFLOAD_CTX_DIR_TX
);
978 if (ctx
->rx_conf
== TLS_HW
)
979 netdev
->tlsdev_ops
->tls_dev_del(netdev
, ctx
,
980 TLS_OFFLOAD_CTX_DIR_RX
);
983 list_del_init(&ctx
->list
);
985 if (refcount_dec_and_test(&ctx
->refcount
))
986 tls_device_free_ctx(ctx
);
989 up_write(&device_offload_lock
);
991 flush_work(&tls_device_gc_work
);
996 static int tls_dev_event(struct notifier_block
*this, unsigned long event
,
999 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1001 if (!(dev
->features
& (NETIF_F_HW_TLS_RX
| NETIF_F_HW_TLS_TX
)))
1005 case NETDEV_REGISTER
:
1006 case NETDEV_FEAT_CHANGE
:
1007 if ((dev
->features
& NETIF_F_HW_TLS_RX
) &&
1008 !dev
->tlsdev_ops
->tls_dev_resync_rx
)
1011 if (dev
->tlsdev_ops
&&
1012 dev
->tlsdev_ops
->tls_dev_add
&&
1013 dev
->tlsdev_ops
->tls_dev_del
)
1018 return tls_device_down(dev
);
1023 static struct notifier_block tls_dev_notifier
= {
1024 .notifier_call
= tls_dev_event
,
1027 void __init
tls_device_init(void)
1029 register_netdevice_notifier(&tls_dev_notifier
);
1032 void __exit
tls_device_cleanup(void)
1034 unregister_netdevice_notifier(&tls_dev_notifier
);
1035 flush_work(&tls_device_gc_work
);
1036 clean_acked_data_flush();