2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
41 #include <linux/inetdevice.h>
45 MODULE_AUTHOR("Mellanox Technologies");
46 MODULE_DESCRIPTION("Transport Layer Security Support");
47 MODULE_LICENSE("Dual BSD/GPL");
48 MODULE_ALIAS_TCP_ULP("tls");
56 static struct proto
*saved_tcpv6_prot
;
57 static DEFINE_MUTEX(tcpv6_prot_mutex
);
58 static struct proto
*saved_tcpv4_prot
;
59 static DEFINE_MUTEX(tcpv4_prot_mutex
);
60 static LIST_HEAD(device_list
);
61 static DEFINE_SPINLOCK(device_spinlock
);
62 static struct proto tls_prots
[TLS_NUM_PROTS
][TLS_NUM_CONFIG
][TLS_NUM_CONFIG
];
63 static struct proto_ops tls_sw_proto_ops
;
64 static void build_protos(struct proto prot
[TLS_NUM_CONFIG
][TLS_NUM_CONFIG
],
67 static void update_sk_prot(struct sock
*sk
, struct tls_context
*ctx
)
69 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
71 sk
->sk_prot
= &tls_prots
[ip_ver
][ctx
->tx_conf
][ctx
->rx_conf
];
74 int wait_on_pending_writer(struct sock
*sk
, long *timeo
)
77 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
79 add_wait_queue(sk_sleep(sk
), &wait
);
86 if (signal_pending(current
)) {
87 rc
= sock_intr_errno(*timeo
);
91 if (sk_wait_event(sk
, timeo
, !sk
->sk_write_pending
, &wait
))
94 remove_wait_queue(sk_sleep(sk
), &wait
);
98 int tls_push_sg(struct sock
*sk
,
99 struct tls_context
*ctx
,
100 struct scatterlist
*sg
,
104 int sendpage_flags
= flags
| MSG_SENDPAGE_NOTLAST
;
108 int offset
= first_offset
;
110 size
= sg
->length
- offset
;
111 offset
+= sg
->offset
;
113 ctx
->in_tcp_sendpages
= true;
116 sendpage_flags
= flags
;
118 /* is sending application-limited? */
119 tcp_rate_check_app_limited(sk
);
122 ret
= do_tcp_sendpages(sk
, p
, offset
, size
, sendpage_flags
);
131 offset
-= sg
->offset
;
132 ctx
->partially_sent_offset
= offset
;
133 ctx
->partially_sent_record
= (void *)sg
;
134 ctx
->in_tcp_sendpages
= false;
139 sk_mem_uncharge(sk
, sg
->length
);
148 ctx
->in_tcp_sendpages
= false;
153 static int tls_handle_open_record(struct sock
*sk
, int flags
)
155 struct tls_context
*ctx
= tls_get_ctx(sk
);
157 if (tls_is_pending_open_record(ctx
))
158 return ctx
->push_pending_record(sk
, flags
);
163 int tls_proccess_cmsg(struct sock
*sk
, struct msghdr
*msg
,
164 unsigned char *record_type
)
166 struct cmsghdr
*cmsg
;
169 for_each_cmsghdr(cmsg
, msg
) {
170 if (!CMSG_OK(msg
, cmsg
))
172 if (cmsg
->cmsg_level
!= SOL_TLS
)
175 switch (cmsg
->cmsg_type
) {
176 case TLS_SET_RECORD_TYPE
:
177 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(*record_type
)))
180 if (msg
->msg_flags
& MSG_MORE
)
183 rc
= tls_handle_open_record(sk
, msg
->msg_flags
);
187 *record_type
= *(unsigned char *)CMSG_DATA(cmsg
);
198 int tls_push_partial_record(struct sock
*sk
, struct tls_context
*ctx
,
201 struct scatterlist
*sg
;
204 sg
= ctx
->partially_sent_record
;
205 offset
= ctx
->partially_sent_offset
;
207 ctx
->partially_sent_record
= NULL
;
208 return tls_push_sg(sk
, ctx
, sg
, offset
, flags
);
211 bool tls_free_partial_record(struct sock
*sk
, struct tls_context
*ctx
)
213 struct scatterlist
*sg
;
215 sg
= ctx
->partially_sent_record
;
220 put_page(sg_page(sg
));
221 sk_mem_uncharge(sk
, sg
->length
);
227 ctx
->partially_sent_record
= NULL
;
231 static void tls_write_space(struct sock
*sk
)
233 struct tls_context
*ctx
= tls_get_ctx(sk
);
235 /* If in_tcp_sendpages call lower protocol write space handler
236 * to ensure we wake up any waiting operations there. For example
237 * if do_tcp_sendpages where to call sk_wait_event.
239 if (ctx
->in_tcp_sendpages
) {
240 ctx
->sk_write_space(sk
);
244 #ifdef CONFIG_TLS_DEVICE
245 if (ctx
->tx_conf
== TLS_HW
)
246 tls_device_write_space(sk
, ctx
);
249 tls_sw_write_space(sk
, ctx
);
251 ctx
->sk_write_space(sk
);
254 static void tls_ctx_free(struct tls_context
*ctx
)
259 memzero_explicit(&ctx
->crypto_send
, sizeof(ctx
->crypto_send
));
260 memzero_explicit(&ctx
->crypto_recv
, sizeof(ctx
->crypto_recv
));
264 static void tls_sk_proto_close(struct sock
*sk
, long timeout
)
266 struct tls_context
*ctx
= tls_get_ctx(sk
);
267 long timeo
= sock_sndtimeo(sk
, 0);
268 void (*sk_proto_close
)(struct sock
*sk
, long timeout
);
269 bool free_ctx
= false;
272 sk_proto_close
= ctx
->sk_proto_close
;
274 if (ctx
->tx_conf
== TLS_HW_RECORD
&& ctx
->rx_conf
== TLS_HW_RECORD
)
275 goto skip_tx_cleanup
;
277 if (ctx
->tx_conf
== TLS_BASE
&& ctx
->rx_conf
== TLS_BASE
) {
279 goto skip_tx_cleanup
;
282 if (!tls_complete_pending_work(sk
, ctx
, 0, &timeo
))
283 tls_handle_open_record(sk
, 0);
285 /* We need these for tls_sw_fallback handling of other packets */
286 if (ctx
->tx_conf
== TLS_SW
) {
287 kfree(ctx
->tx
.rec_seq
);
289 tls_sw_free_resources_tx(sk
);
290 #ifdef CONFIG_TLS_DEVICE
291 } else if (ctx
->tx_conf
== TLS_HW
) {
292 tls_device_free_resources_tx(sk
);
296 if (ctx
->rx_conf
== TLS_SW
)
297 tls_sw_free_resources_rx(sk
);
299 #ifdef CONFIG_TLS_DEVICE
300 if (ctx
->rx_conf
== TLS_HW
)
301 tls_device_offload_cleanup_rx(sk
);
303 if (ctx
->tx_conf
!= TLS_HW
&& ctx
->rx_conf
!= TLS_HW
) {
313 sk_proto_close(sk
, timeout
);
314 /* free ctx for TLS_HW_RECORD, used by tcp_set_state
315 * for sk->sk_prot->unhash [tls_hw_unhash]
321 static int do_tls_getsockopt_tx(struct sock
*sk
, char __user
*optval
,
325 struct tls_context
*ctx
= tls_get_ctx(sk
);
326 struct tls_crypto_info
*crypto_info
;
329 if (get_user(len
, optlen
))
332 if (!optval
|| (len
< sizeof(*crypto_info
))) {
342 /* get user crypto info */
343 crypto_info
= &ctx
->crypto_send
.info
;
345 if (!TLS_CRYPTO_INFO_READY(crypto_info
)) {
350 if (len
== sizeof(*crypto_info
)) {
351 if (copy_to_user(optval
, crypto_info
, sizeof(*crypto_info
)))
356 switch (crypto_info
->cipher_type
) {
357 case TLS_CIPHER_AES_GCM_128
: {
358 struct tls12_crypto_info_aes_gcm_128
*
359 crypto_info_aes_gcm_128
=
360 container_of(crypto_info
,
361 struct tls12_crypto_info_aes_gcm_128
,
364 if (len
!= sizeof(*crypto_info_aes_gcm_128
)) {
369 memcpy(crypto_info_aes_gcm_128
->iv
,
370 ctx
->tx
.iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
371 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
372 memcpy(crypto_info_aes_gcm_128
->rec_seq
, ctx
->tx
.rec_seq
,
373 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
);
375 if (copy_to_user(optval
,
376 crypto_info_aes_gcm_128
,
377 sizeof(*crypto_info_aes_gcm_128
)))
381 case TLS_CIPHER_AES_GCM_256
: {
382 struct tls12_crypto_info_aes_gcm_256
*
383 crypto_info_aes_gcm_256
=
384 container_of(crypto_info
,
385 struct tls12_crypto_info_aes_gcm_256
,
388 if (len
!= sizeof(*crypto_info_aes_gcm_256
)) {
393 memcpy(crypto_info_aes_gcm_256
->iv
,
394 ctx
->tx
.iv
+ TLS_CIPHER_AES_GCM_256_SALT_SIZE
,
395 TLS_CIPHER_AES_GCM_256_IV_SIZE
);
396 memcpy(crypto_info_aes_gcm_256
->rec_seq
, ctx
->tx
.rec_seq
,
397 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE
);
399 if (copy_to_user(optval
,
400 crypto_info_aes_gcm_256
,
401 sizeof(*crypto_info_aes_gcm_256
)))
413 static int do_tls_getsockopt(struct sock
*sk
, int optname
,
414 char __user
*optval
, int __user
*optlen
)
420 rc
= do_tls_getsockopt_tx(sk
, optval
, optlen
);
429 static int tls_getsockopt(struct sock
*sk
, int level
, int optname
,
430 char __user
*optval
, int __user
*optlen
)
432 struct tls_context
*ctx
= tls_get_ctx(sk
);
434 if (level
!= SOL_TLS
)
435 return ctx
->getsockopt(sk
, level
, optname
, optval
, optlen
);
437 return do_tls_getsockopt(sk
, optname
, optval
, optlen
);
440 static int do_tls_setsockopt_conf(struct sock
*sk
, char __user
*optval
,
441 unsigned int optlen
, int tx
)
443 struct tls_crypto_info
*crypto_info
;
444 struct tls_crypto_info
*alt_crypto_info
;
445 struct tls_context
*ctx
= tls_get_ctx(sk
);
450 if (!optval
|| (optlen
< sizeof(*crypto_info
))) {
456 crypto_info
= &ctx
->crypto_send
.info
;
457 alt_crypto_info
= &ctx
->crypto_recv
.info
;
459 crypto_info
= &ctx
->crypto_recv
.info
;
460 alt_crypto_info
= &ctx
->crypto_send
.info
;
463 /* Currently we don't support set crypto info more than one time */
464 if (TLS_CRYPTO_INFO_READY(crypto_info
)) {
469 rc
= copy_from_user(crypto_info
, optval
, sizeof(*crypto_info
));
472 goto err_crypto_info
;
476 if (crypto_info
->version
!= TLS_1_2_VERSION
&&
477 crypto_info
->version
!= TLS_1_3_VERSION
) {
479 goto err_crypto_info
;
482 /* Ensure that TLS version and ciphers are same in both directions */
483 if (TLS_CRYPTO_INFO_READY(alt_crypto_info
)) {
484 if (alt_crypto_info
->version
!= crypto_info
->version
||
485 alt_crypto_info
->cipher_type
!= crypto_info
->cipher_type
) {
487 goto err_crypto_info
;
491 switch (crypto_info
->cipher_type
) {
492 case TLS_CIPHER_AES_GCM_128
:
493 case TLS_CIPHER_AES_GCM_256
: {
494 optsize
= crypto_info
->cipher_type
== TLS_CIPHER_AES_GCM_128
?
495 sizeof(struct tls12_crypto_info_aes_gcm_128
) :
496 sizeof(struct tls12_crypto_info_aes_gcm_256
);
497 if (optlen
!= optsize
) {
499 goto err_crypto_info
;
501 rc
= copy_from_user(crypto_info
+ 1, optval
+ sizeof(*crypto_info
),
502 optlen
- sizeof(*crypto_info
));
505 goto err_crypto_info
;
511 goto err_crypto_info
;
515 #ifdef CONFIG_TLS_DEVICE
516 rc
= tls_set_device_offload(sk
, ctx
);
522 rc
= tls_set_sw_offload(sk
, ctx
, 1);
526 #ifdef CONFIG_TLS_DEVICE
527 rc
= tls_set_device_offload_rx(sk
, ctx
);
533 rc
= tls_set_sw_offload(sk
, ctx
, 0);
539 goto err_crypto_info
;
545 update_sk_prot(sk
, ctx
);
547 ctx
->sk_write_space
= sk
->sk_write_space
;
548 sk
->sk_write_space
= tls_write_space
;
550 sk
->sk_socket
->ops
= &tls_sw_proto_ops
;
555 memzero_explicit(crypto_info
, sizeof(union tls_crypto_context
));
560 static int do_tls_setsockopt(struct sock
*sk
, int optname
,
561 char __user
*optval
, unsigned int optlen
)
569 rc
= do_tls_setsockopt_conf(sk
, optval
, optlen
,
580 static int tls_setsockopt(struct sock
*sk
, int level
, int optname
,
581 char __user
*optval
, unsigned int optlen
)
583 struct tls_context
*ctx
= tls_get_ctx(sk
);
585 if (level
!= SOL_TLS
)
586 return ctx
->setsockopt(sk
, level
, optname
, optval
, optlen
);
588 return do_tls_setsockopt(sk
, optname
, optval
, optlen
);
591 static struct tls_context
*create_ctx(struct sock
*sk
)
593 struct inet_connection_sock
*icsk
= inet_csk(sk
);
594 struct tls_context
*ctx
;
596 ctx
= kzalloc(sizeof(*ctx
), GFP_ATOMIC
);
600 icsk
->icsk_ulp_data
= ctx
;
601 ctx
->setsockopt
= sk
->sk_prot
->setsockopt
;
602 ctx
->getsockopt
= sk
->sk_prot
->getsockopt
;
603 ctx
->sk_proto_close
= sk
->sk_prot
->close
;
607 static void tls_build_proto(struct sock
*sk
)
609 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
611 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
612 if (ip_ver
== TLSV6
&&
613 unlikely(sk
->sk_prot
!= smp_load_acquire(&saved_tcpv6_prot
))) {
614 mutex_lock(&tcpv6_prot_mutex
);
615 if (likely(sk
->sk_prot
!= saved_tcpv6_prot
)) {
616 build_protos(tls_prots
[TLSV6
], sk
->sk_prot
);
617 smp_store_release(&saved_tcpv6_prot
, sk
->sk_prot
);
619 mutex_unlock(&tcpv6_prot_mutex
);
622 if (ip_ver
== TLSV4
&&
623 unlikely(sk
->sk_prot
!= smp_load_acquire(&saved_tcpv4_prot
))) {
624 mutex_lock(&tcpv4_prot_mutex
);
625 if (likely(sk
->sk_prot
!= saved_tcpv4_prot
)) {
626 build_protos(tls_prots
[TLSV4
], sk
->sk_prot
);
627 smp_store_release(&saved_tcpv4_prot
, sk
->sk_prot
);
629 mutex_unlock(&tcpv4_prot_mutex
);
633 static void tls_hw_sk_destruct(struct sock
*sk
)
635 struct tls_context
*ctx
= tls_get_ctx(sk
);
636 struct inet_connection_sock
*icsk
= inet_csk(sk
);
638 ctx
->sk_destruct(sk
);
641 icsk
->icsk_ulp_data
= NULL
;
644 static int tls_hw_prot(struct sock
*sk
)
646 struct tls_context
*ctx
;
647 struct tls_device
*dev
;
650 spin_lock_bh(&device_spinlock
);
651 list_for_each_entry(dev
, &device_list
, dev_list
) {
652 if (dev
->feature
&& dev
->feature(dev
)) {
653 ctx
= create_ctx(sk
);
657 spin_unlock_bh(&device_spinlock
);
659 ctx
->hash
= sk
->sk_prot
->hash
;
660 ctx
->unhash
= sk
->sk_prot
->unhash
;
661 ctx
->sk_proto_close
= sk
->sk_prot
->close
;
662 ctx
->sk_destruct
= sk
->sk_destruct
;
663 sk
->sk_destruct
= tls_hw_sk_destruct
;
664 ctx
->rx_conf
= TLS_HW_RECORD
;
665 ctx
->tx_conf
= TLS_HW_RECORD
;
666 update_sk_prot(sk
, ctx
);
667 spin_lock_bh(&device_spinlock
);
673 spin_unlock_bh(&device_spinlock
);
677 static void tls_hw_unhash(struct sock
*sk
)
679 struct tls_context
*ctx
= tls_get_ctx(sk
);
680 struct tls_device
*dev
;
682 spin_lock_bh(&device_spinlock
);
683 list_for_each_entry(dev
, &device_list
, dev_list
) {
685 kref_get(&dev
->kref
);
686 spin_unlock_bh(&device_spinlock
);
687 dev
->unhash(dev
, sk
);
688 kref_put(&dev
->kref
, dev
->release
);
689 spin_lock_bh(&device_spinlock
);
692 spin_unlock_bh(&device_spinlock
);
696 static int tls_hw_hash(struct sock
*sk
)
698 struct tls_context
*ctx
= tls_get_ctx(sk
);
699 struct tls_device
*dev
;
703 spin_lock_bh(&device_spinlock
);
704 list_for_each_entry(dev
, &device_list
, dev_list
) {
706 kref_get(&dev
->kref
);
707 spin_unlock_bh(&device_spinlock
);
708 err
|= dev
->hash(dev
, sk
);
709 kref_put(&dev
->kref
, dev
->release
);
710 spin_lock_bh(&device_spinlock
);
713 spin_unlock_bh(&device_spinlock
);
720 static void build_protos(struct proto prot
[TLS_NUM_CONFIG
][TLS_NUM_CONFIG
],
723 prot
[TLS_BASE
][TLS_BASE
] = *base
;
724 prot
[TLS_BASE
][TLS_BASE
].setsockopt
= tls_setsockopt
;
725 prot
[TLS_BASE
][TLS_BASE
].getsockopt
= tls_getsockopt
;
726 prot
[TLS_BASE
][TLS_BASE
].close
= tls_sk_proto_close
;
728 prot
[TLS_SW
][TLS_BASE
] = prot
[TLS_BASE
][TLS_BASE
];
729 prot
[TLS_SW
][TLS_BASE
].sendmsg
= tls_sw_sendmsg
;
730 prot
[TLS_SW
][TLS_BASE
].sendpage
= tls_sw_sendpage
;
732 prot
[TLS_BASE
][TLS_SW
] = prot
[TLS_BASE
][TLS_BASE
];
733 prot
[TLS_BASE
][TLS_SW
].recvmsg
= tls_sw_recvmsg
;
734 prot
[TLS_BASE
][TLS_SW
].stream_memory_read
= tls_sw_stream_read
;
735 prot
[TLS_BASE
][TLS_SW
].close
= tls_sk_proto_close
;
737 prot
[TLS_SW
][TLS_SW
] = prot
[TLS_SW
][TLS_BASE
];
738 prot
[TLS_SW
][TLS_SW
].recvmsg
= tls_sw_recvmsg
;
739 prot
[TLS_SW
][TLS_SW
].stream_memory_read
= tls_sw_stream_read
;
740 prot
[TLS_SW
][TLS_SW
].close
= tls_sk_proto_close
;
742 #ifdef CONFIG_TLS_DEVICE
743 prot
[TLS_HW
][TLS_BASE
] = prot
[TLS_BASE
][TLS_BASE
];
744 prot
[TLS_HW
][TLS_BASE
].sendmsg
= tls_device_sendmsg
;
745 prot
[TLS_HW
][TLS_BASE
].sendpage
= tls_device_sendpage
;
747 prot
[TLS_HW
][TLS_SW
] = prot
[TLS_BASE
][TLS_SW
];
748 prot
[TLS_HW
][TLS_SW
].sendmsg
= tls_device_sendmsg
;
749 prot
[TLS_HW
][TLS_SW
].sendpage
= tls_device_sendpage
;
751 prot
[TLS_BASE
][TLS_HW
] = prot
[TLS_BASE
][TLS_SW
];
753 prot
[TLS_SW
][TLS_HW
] = prot
[TLS_SW
][TLS_SW
];
755 prot
[TLS_HW
][TLS_HW
] = prot
[TLS_HW
][TLS_SW
];
758 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
] = *base
;
759 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
].hash
= tls_hw_hash
;
760 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
].unhash
= tls_hw_unhash
;
761 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
].close
= tls_sk_proto_close
;
764 static int tls_init(struct sock
*sk
)
766 struct tls_context
*ctx
;
772 /* The TLS ulp is currently supported only for TCP sockets
773 * in ESTABLISHED state.
774 * Supporting sockets in LISTEN state will require us
775 * to modify the accept implementation to clone rather then
776 * share the ulp context.
778 if (sk
->sk_state
!= TCP_ESTABLISHED
)
781 /* allocate tls context */
782 ctx
= create_ctx(sk
);
789 ctx
->tx_conf
= TLS_BASE
;
790 ctx
->rx_conf
= TLS_BASE
;
791 update_sk_prot(sk
, ctx
);
796 void tls_register_device(struct tls_device
*device
)
798 spin_lock_bh(&device_spinlock
);
799 list_add_tail(&device
->dev_list
, &device_list
);
800 spin_unlock_bh(&device_spinlock
);
802 EXPORT_SYMBOL(tls_register_device
);
804 void tls_unregister_device(struct tls_device
*device
)
806 spin_lock_bh(&device_spinlock
);
807 list_del(&device
->dev_list
);
808 spin_unlock_bh(&device_spinlock
);
810 EXPORT_SYMBOL(tls_unregister_device
);
812 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly
= {
814 .owner
= THIS_MODULE
,
818 static int __init
tls_register(void)
820 tls_sw_proto_ops
= inet_stream_ops
;
821 tls_sw_proto_ops
.splice_read
= tls_sw_splice_read
;
823 #ifdef CONFIG_TLS_DEVICE
826 tcp_register_ulp(&tcp_tls_ulp_ops
);
831 static void __exit
tls_unregister(void)
833 tcp_unregister_ulp(&tcp_tls_ulp_ops
);
834 #ifdef CONFIG_TLS_DEVICE
835 tls_device_cleanup();
839 module_init(tls_register
);
840 module_exit(tls_unregister
);