2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
41 #include <linux/inetdevice.h>
42 #include <linux/inet_diag.h>
46 #include <net/tls_toe.h>
48 MODULE_AUTHOR("Mellanox Technologies");
49 MODULE_DESCRIPTION("Transport Layer Security Support");
50 MODULE_LICENSE("Dual BSD/GPL");
51 MODULE_ALIAS_TCP_ULP("tls");
59 static const struct proto
*saved_tcpv6_prot
;
60 static DEFINE_MUTEX(tcpv6_prot_mutex
);
61 static const struct proto
*saved_tcpv4_prot
;
62 static DEFINE_MUTEX(tcpv4_prot_mutex
);
63 static struct proto tls_prots
[TLS_NUM_PROTS
][TLS_NUM_CONFIG
][TLS_NUM_CONFIG
];
64 static struct proto_ops tls_sw_proto_ops
;
65 static void build_protos(struct proto prot
[TLS_NUM_CONFIG
][TLS_NUM_CONFIG
],
66 const struct proto
*base
);
68 void update_sk_prot(struct sock
*sk
, struct tls_context
*ctx
)
70 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
72 WRITE_ONCE(sk
->sk_prot
,
73 &tls_prots
[ip_ver
][ctx
->tx_conf
][ctx
->rx_conf
]);
76 int wait_on_pending_writer(struct sock
*sk
, long *timeo
)
79 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
81 add_wait_queue(sk_sleep(sk
), &wait
);
88 if (signal_pending(current
)) {
89 rc
= sock_intr_errno(*timeo
);
93 if (sk_wait_event(sk
, timeo
, !sk
->sk_write_pending
, &wait
))
96 remove_wait_queue(sk_sleep(sk
), &wait
);
100 int tls_push_sg(struct sock
*sk
,
101 struct tls_context
*ctx
,
102 struct scatterlist
*sg
,
106 int sendpage_flags
= flags
| MSG_SENDPAGE_NOTLAST
;
110 int offset
= first_offset
;
112 size
= sg
->length
- offset
;
113 offset
+= sg
->offset
;
115 ctx
->in_tcp_sendpages
= true;
118 sendpage_flags
= flags
;
120 /* is sending application-limited? */
121 tcp_rate_check_app_limited(sk
);
124 ret
= do_tcp_sendpages(sk
, p
, offset
, size
, sendpage_flags
);
133 offset
-= sg
->offset
;
134 ctx
->partially_sent_offset
= offset
;
135 ctx
->partially_sent_record
= (void *)sg
;
136 ctx
->in_tcp_sendpages
= false;
141 sk_mem_uncharge(sk
, sg
->length
);
150 ctx
->in_tcp_sendpages
= false;
155 static int tls_handle_open_record(struct sock
*sk
, int flags
)
157 struct tls_context
*ctx
= tls_get_ctx(sk
);
159 if (tls_is_pending_open_record(ctx
))
160 return ctx
->push_pending_record(sk
, flags
);
165 int tls_proccess_cmsg(struct sock
*sk
, struct msghdr
*msg
,
166 unsigned char *record_type
)
168 struct cmsghdr
*cmsg
;
171 for_each_cmsghdr(cmsg
, msg
) {
172 if (!CMSG_OK(msg
, cmsg
))
174 if (cmsg
->cmsg_level
!= SOL_TLS
)
177 switch (cmsg
->cmsg_type
) {
178 case TLS_SET_RECORD_TYPE
:
179 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(*record_type
)))
182 if (msg
->msg_flags
& MSG_MORE
)
185 rc
= tls_handle_open_record(sk
, msg
->msg_flags
);
189 *record_type
= *(unsigned char *)CMSG_DATA(cmsg
);
200 int tls_push_partial_record(struct sock
*sk
, struct tls_context
*ctx
,
203 struct scatterlist
*sg
;
206 sg
= ctx
->partially_sent_record
;
207 offset
= ctx
->partially_sent_offset
;
209 ctx
->partially_sent_record
= NULL
;
210 return tls_push_sg(sk
, ctx
, sg
, offset
, flags
);
213 void tls_free_partial_record(struct sock
*sk
, struct tls_context
*ctx
)
215 struct scatterlist
*sg
;
217 for (sg
= ctx
->partially_sent_record
; sg
; sg
= sg_next(sg
)) {
218 put_page(sg_page(sg
));
219 sk_mem_uncharge(sk
, sg
->length
);
221 ctx
->partially_sent_record
= NULL
;
224 static void tls_write_space(struct sock
*sk
)
226 struct tls_context
*ctx
= tls_get_ctx(sk
);
228 /* If in_tcp_sendpages call lower protocol write space handler
229 * to ensure we wake up any waiting operations there. For example
230 * if do_tcp_sendpages where to call sk_wait_event.
232 if (ctx
->in_tcp_sendpages
) {
233 ctx
->sk_write_space(sk
);
237 #ifdef CONFIG_TLS_DEVICE
238 if (ctx
->tx_conf
== TLS_HW
)
239 tls_device_write_space(sk
, ctx
);
242 tls_sw_write_space(sk
, ctx
);
244 ctx
->sk_write_space(sk
);
248 * tls_ctx_free() - free TLS ULP context
249 * @sk: socket to with @ctx is attached
250 * @ctx: TLS context structure
252 * Free TLS context. If @sk is %NULL caller guarantees that the socket
253 * to which @ctx was attached has no outstanding references.
255 void tls_ctx_free(struct sock
*sk
, struct tls_context
*ctx
)
260 memzero_explicit(&ctx
->crypto_send
, sizeof(ctx
->crypto_send
));
261 memzero_explicit(&ctx
->crypto_recv
, sizeof(ctx
->crypto_recv
));
262 mutex_destroy(&ctx
->tx_lock
);
270 static void tls_sk_proto_cleanup(struct sock
*sk
,
271 struct tls_context
*ctx
, long timeo
)
273 if (unlikely(sk
->sk_write_pending
) &&
274 !wait_on_pending_writer(sk
, &timeo
))
275 tls_handle_open_record(sk
, 0);
277 /* We need these for tls_sw_fallback handling of other packets */
278 if (ctx
->tx_conf
== TLS_SW
) {
279 kfree(ctx
->tx
.rec_seq
);
281 tls_sw_release_resources_tx(sk
);
282 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXSW
);
283 } else if (ctx
->tx_conf
== TLS_HW
) {
284 tls_device_free_resources_tx(sk
);
285 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXDEVICE
);
288 if (ctx
->rx_conf
== TLS_SW
) {
289 tls_sw_release_resources_rx(sk
);
290 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXSW
);
291 } else if (ctx
->rx_conf
== TLS_HW
) {
292 tls_device_offload_cleanup_rx(sk
);
293 TLS_DEC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXDEVICE
);
297 static void tls_sk_proto_close(struct sock
*sk
, long timeout
)
299 struct inet_connection_sock
*icsk
= inet_csk(sk
);
300 struct tls_context
*ctx
= tls_get_ctx(sk
);
301 long timeo
= sock_sndtimeo(sk
, 0);
304 if (ctx
->tx_conf
== TLS_SW
)
305 tls_sw_cancel_work_tx(ctx
);
308 free_ctx
= ctx
->tx_conf
!= TLS_HW
&& ctx
->rx_conf
!= TLS_HW
;
310 if (ctx
->tx_conf
!= TLS_BASE
|| ctx
->rx_conf
!= TLS_BASE
)
311 tls_sk_proto_cleanup(sk
, ctx
, timeo
);
313 write_lock_bh(&sk
->sk_callback_lock
);
315 rcu_assign_pointer(icsk
->icsk_ulp_data
, NULL
);
316 WRITE_ONCE(sk
->sk_prot
, ctx
->sk_proto
);
317 if (sk
->sk_write_space
== tls_write_space
)
318 sk
->sk_write_space
= ctx
->sk_write_space
;
319 write_unlock_bh(&sk
->sk_callback_lock
);
321 if (ctx
->tx_conf
== TLS_SW
)
322 tls_sw_free_ctx_tx(ctx
);
323 if (ctx
->rx_conf
== TLS_SW
|| ctx
->rx_conf
== TLS_HW
)
324 tls_sw_strparser_done(ctx
);
325 if (ctx
->rx_conf
== TLS_SW
)
326 tls_sw_free_ctx_rx(ctx
);
327 ctx
->sk_proto
->close(sk
, timeout
);
330 tls_ctx_free(sk
, ctx
);
333 static int do_tls_getsockopt_conf(struct sock
*sk
, char __user
*optval
,
334 int __user
*optlen
, int tx
)
337 struct tls_context
*ctx
= tls_get_ctx(sk
);
338 struct tls_crypto_info
*crypto_info
;
339 struct cipher_context
*cctx
;
342 if (get_user(len
, optlen
))
345 if (!optval
|| (len
< sizeof(*crypto_info
))) {
355 /* get user crypto info */
357 crypto_info
= &ctx
->crypto_send
.info
;
360 crypto_info
= &ctx
->crypto_recv
.info
;
364 if (!TLS_CRYPTO_INFO_READY(crypto_info
)) {
369 if (len
== sizeof(*crypto_info
)) {
370 if (copy_to_user(optval
, crypto_info
, sizeof(*crypto_info
)))
375 switch (crypto_info
->cipher_type
) {
376 case TLS_CIPHER_AES_GCM_128
: {
377 struct tls12_crypto_info_aes_gcm_128
*
378 crypto_info_aes_gcm_128
=
379 container_of(crypto_info
,
380 struct tls12_crypto_info_aes_gcm_128
,
383 if (len
!= sizeof(*crypto_info_aes_gcm_128
)) {
388 memcpy(crypto_info_aes_gcm_128
->iv
,
389 cctx
->iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
390 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
391 memcpy(crypto_info_aes_gcm_128
->rec_seq
, cctx
->rec_seq
,
392 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
);
394 if (copy_to_user(optval
,
395 crypto_info_aes_gcm_128
,
396 sizeof(*crypto_info_aes_gcm_128
)))
400 case TLS_CIPHER_AES_GCM_256
: {
401 struct tls12_crypto_info_aes_gcm_256
*
402 crypto_info_aes_gcm_256
=
403 container_of(crypto_info
,
404 struct tls12_crypto_info_aes_gcm_256
,
407 if (len
!= sizeof(*crypto_info_aes_gcm_256
)) {
412 memcpy(crypto_info_aes_gcm_256
->iv
,
413 cctx
->iv
+ TLS_CIPHER_AES_GCM_256_SALT_SIZE
,
414 TLS_CIPHER_AES_GCM_256_IV_SIZE
);
415 memcpy(crypto_info_aes_gcm_256
->rec_seq
, cctx
->rec_seq
,
416 TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE
);
418 if (copy_to_user(optval
,
419 crypto_info_aes_gcm_256
,
420 sizeof(*crypto_info_aes_gcm_256
)))
432 static int do_tls_getsockopt(struct sock
*sk
, int optname
,
433 char __user
*optval
, int __user
*optlen
)
440 rc
= do_tls_getsockopt_conf(sk
, optval
, optlen
,
450 static int tls_getsockopt(struct sock
*sk
, int level
, int optname
,
451 char __user
*optval
, int __user
*optlen
)
453 struct tls_context
*ctx
= tls_get_ctx(sk
);
455 if (level
!= SOL_TLS
)
456 return ctx
->sk_proto
->getsockopt(sk
, level
,
457 optname
, optval
, optlen
);
459 return do_tls_getsockopt(sk
, optname
, optval
, optlen
);
462 static int do_tls_setsockopt_conf(struct sock
*sk
, sockptr_t optval
,
463 unsigned int optlen
, int tx
)
465 struct tls_crypto_info
*crypto_info
;
466 struct tls_crypto_info
*alt_crypto_info
;
467 struct tls_context
*ctx
= tls_get_ctx(sk
);
472 if (sockptr_is_null(optval
) || (optlen
< sizeof(*crypto_info
))) {
478 crypto_info
= &ctx
->crypto_send
.info
;
479 alt_crypto_info
= &ctx
->crypto_recv
.info
;
481 crypto_info
= &ctx
->crypto_recv
.info
;
482 alt_crypto_info
= &ctx
->crypto_send
.info
;
485 /* Currently we don't support set crypto info more than one time */
486 if (TLS_CRYPTO_INFO_READY(crypto_info
)) {
491 rc
= copy_from_sockptr(crypto_info
, optval
, sizeof(*crypto_info
));
494 goto err_crypto_info
;
498 if (crypto_info
->version
!= TLS_1_2_VERSION
&&
499 crypto_info
->version
!= TLS_1_3_VERSION
) {
501 goto err_crypto_info
;
504 /* Ensure that TLS version and ciphers are same in both directions */
505 if (TLS_CRYPTO_INFO_READY(alt_crypto_info
)) {
506 if (alt_crypto_info
->version
!= crypto_info
->version
||
507 alt_crypto_info
->cipher_type
!= crypto_info
->cipher_type
) {
509 goto err_crypto_info
;
513 switch (crypto_info
->cipher_type
) {
514 case TLS_CIPHER_AES_GCM_128
:
515 optsize
= sizeof(struct tls12_crypto_info_aes_gcm_128
);
517 case TLS_CIPHER_AES_GCM_256
: {
518 optsize
= sizeof(struct tls12_crypto_info_aes_gcm_256
);
521 case TLS_CIPHER_AES_CCM_128
:
522 optsize
= sizeof(struct tls12_crypto_info_aes_ccm_128
);
524 case TLS_CIPHER_CHACHA20_POLY1305
:
525 optsize
= sizeof(struct tls12_crypto_info_chacha20_poly1305
);
529 goto err_crypto_info
;
532 if (optlen
!= optsize
) {
534 goto err_crypto_info
;
537 rc
= copy_from_sockptr_offset(crypto_info
+ 1, optval
,
538 sizeof(*crypto_info
),
539 optlen
- sizeof(*crypto_info
));
542 goto err_crypto_info
;
546 rc
= tls_set_device_offload(sk
, ctx
);
549 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSTXDEVICE
);
550 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXDEVICE
);
552 rc
= tls_set_sw_offload(sk
, ctx
, 1);
554 goto err_crypto_info
;
555 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSTXSW
);
556 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRTXSW
);
560 rc
= tls_set_device_offload_rx(sk
, ctx
);
563 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXDEVICE
);
564 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXDEVICE
);
566 rc
= tls_set_sw_offload(sk
, ctx
, 0);
568 goto err_crypto_info
;
569 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSRXSW
);
570 TLS_INC_STATS(sock_net(sk
), LINUX_MIB_TLSCURRRXSW
);
573 tls_sw_strparser_arm(sk
, ctx
);
580 update_sk_prot(sk
, ctx
);
582 ctx
->sk_write_space
= sk
->sk_write_space
;
583 sk
->sk_write_space
= tls_write_space
;
585 sk
->sk_socket
->ops
= &tls_sw_proto_ops
;
590 memzero_explicit(crypto_info
, sizeof(union tls_crypto_context
));
595 static int do_tls_setsockopt(struct sock
*sk
, int optname
, sockptr_t optval
,
604 rc
= do_tls_setsockopt_conf(sk
, optval
, optlen
,
615 static int tls_setsockopt(struct sock
*sk
, int level
, int optname
,
616 sockptr_t optval
, unsigned int optlen
)
618 struct tls_context
*ctx
= tls_get_ctx(sk
);
620 if (level
!= SOL_TLS
)
621 return ctx
->sk_proto
->setsockopt(sk
, level
, optname
, optval
,
624 return do_tls_setsockopt(sk
, optname
, optval
, optlen
);
627 struct tls_context
*tls_ctx_create(struct sock
*sk
)
629 struct inet_connection_sock
*icsk
= inet_csk(sk
);
630 struct tls_context
*ctx
;
632 ctx
= kzalloc(sizeof(*ctx
), GFP_ATOMIC
);
636 mutex_init(&ctx
->tx_lock
);
637 rcu_assign_pointer(icsk
->icsk_ulp_data
, ctx
);
638 ctx
->sk_proto
= READ_ONCE(sk
->sk_prot
);
642 static void tls_build_proto(struct sock
*sk
)
644 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
645 struct proto
*prot
= READ_ONCE(sk
->sk_prot
);
647 /* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
648 if (ip_ver
== TLSV6
&&
649 unlikely(prot
!= smp_load_acquire(&saved_tcpv6_prot
))) {
650 mutex_lock(&tcpv6_prot_mutex
);
651 if (likely(prot
!= saved_tcpv6_prot
)) {
652 build_protos(tls_prots
[TLSV6
], prot
);
653 smp_store_release(&saved_tcpv6_prot
, prot
);
655 mutex_unlock(&tcpv6_prot_mutex
);
658 if (ip_ver
== TLSV4
&&
659 unlikely(prot
!= smp_load_acquire(&saved_tcpv4_prot
))) {
660 mutex_lock(&tcpv4_prot_mutex
);
661 if (likely(prot
!= saved_tcpv4_prot
)) {
662 build_protos(tls_prots
[TLSV4
], prot
);
663 smp_store_release(&saved_tcpv4_prot
, prot
);
665 mutex_unlock(&tcpv4_prot_mutex
);
669 static void build_protos(struct proto prot
[TLS_NUM_CONFIG
][TLS_NUM_CONFIG
],
670 const struct proto
*base
)
672 prot
[TLS_BASE
][TLS_BASE
] = *base
;
673 prot
[TLS_BASE
][TLS_BASE
].setsockopt
= tls_setsockopt
;
674 prot
[TLS_BASE
][TLS_BASE
].getsockopt
= tls_getsockopt
;
675 prot
[TLS_BASE
][TLS_BASE
].close
= tls_sk_proto_close
;
677 prot
[TLS_SW
][TLS_BASE
] = prot
[TLS_BASE
][TLS_BASE
];
678 prot
[TLS_SW
][TLS_BASE
].sendmsg
= tls_sw_sendmsg
;
679 prot
[TLS_SW
][TLS_BASE
].sendpage
= tls_sw_sendpage
;
681 prot
[TLS_BASE
][TLS_SW
] = prot
[TLS_BASE
][TLS_BASE
];
682 prot
[TLS_BASE
][TLS_SW
].recvmsg
= tls_sw_recvmsg
;
683 prot
[TLS_BASE
][TLS_SW
].stream_memory_read
= tls_sw_stream_read
;
684 prot
[TLS_BASE
][TLS_SW
].close
= tls_sk_proto_close
;
686 prot
[TLS_SW
][TLS_SW
] = prot
[TLS_SW
][TLS_BASE
];
687 prot
[TLS_SW
][TLS_SW
].recvmsg
= tls_sw_recvmsg
;
688 prot
[TLS_SW
][TLS_SW
].stream_memory_read
= tls_sw_stream_read
;
689 prot
[TLS_SW
][TLS_SW
].close
= tls_sk_proto_close
;
691 #ifdef CONFIG_TLS_DEVICE
692 prot
[TLS_HW
][TLS_BASE
] = prot
[TLS_BASE
][TLS_BASE
];
693 prot
[TLS_HW
][TLS_BASE
].sendmsg
= tls_device_sendmsg
;
694 prot
[TLS_HW
][TLS_BASE
].sendpage
= tls_device_sendpage
;
696 prot
[TLS_HW
][TLS_SW
] = prot
[TLS_BASE
][TLS_SW
];
697 prot
[TLS_HW
][TLS_SW
].sendmsg
= tls_device_sendmsg
;
698 prot
[TLS_HW
][TLS_SW
].sendpage
= tls_device_sendpage
;
700 prot
[TLS_BASE
][TLS_HW
] = prot
[TLS_BASE
][TLS_SW
];
702 prot
[TLS_SW
][TLS_HW
] = prot
[TLS_SW
][TLS_SW
];
704 prot
[TLS_HW
][TLS_HW
] = prot
[TLS_HW
][TLS_SW
];
706 #ifdef CONFIG_TLS_TOE
707 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
] = *base
;
708 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
].hash
= tls_toe_hash
;
709 prot
[TLS_HW_RECORD
][TLS_HW_RECORD
].unhash
= tls_toe_unhash
;
713 static int tls_init(struct sock
*sk
)
715 struct tls_context
*ctx
;
720 #ifdef CONFIG_TLS_TOE
721 if (tls_toe_bypass(sk
))
725 /* The TLS ulp is currently supported only for TCP sockets
726 * in ESTABLISHED state.
727 * Supporting sockets in LISTEN state will require us
728 * to modify the accept implementation to clone rather then
729 * share the ulp context.
731 if (sk
->sk_state
!= TCP_ESTABLISHED
)
734 /* allocate tls context */
735 write_lock_bh(&sk
->sk_callback_lock
);
736 ctx
= tls_ctx_create(sk
);
742 ctx
->tx_conf
= TLS_BASE
;
743 ctx
->rx_conf
= TLS_BASE
;
744 update_sk_prot(sk
, ctx
);
746 write_unlock_bh(&sk
->sk_callback_lock
);
750 static void tls_update(struct sock
*sk
, struct proto
*p
,
751 void (*write_space
)(struct sock
*sk
))
753 struct tls_context
*ctx
;
755 ctx
= tls_get_ctx(sk
);
757 ctx
->sk_write_space
= write_space
;
760 /* Pairs with lockless read in sk_clone_lock(). */
761 WRITE_ONCE(sk
->sk_prot
, p
);
762 sk
->sk_write_space
= write_space
;
766 static int tls_get_info(const struct sock
*sk
, struct sk_buff
*skb
)
768 u16 version
, cipher_type
;
769 struct tls_context
*ctx
;
770 struct nlattr
*start
;
773 start
= nla_nest_start_noflag(skb
, INET_ULP_INFO_TLS
);
778 ctx
= rcu_dereference(inet_csk(sk
)->icsk_ulp_data
);
783 version
= ctx
->prot_info
.version
;
785 err
= nla_put_u16(skb
, TLS_INFO_VERSION
, version
);
789 cipher_type
= ctx
->prot_info
.cipher_type
;
791 err
= nla_put_u16(skb
, TLS_INFO_CIPHER
, cipher_type
);
795 err
= nla_put_u16(skb
, TLS_INFO_TXCONF
, tls_user_config(ctx
, true));
799 err
= nla_put_u16(skb
, TLS_INFO_RXCONF
, tls_user_config(ctx
, false));
804 nla_nest_end(skb
, start
);
809 nla_nest_cancel(skb
, start
);
813 static size_t tls_get_info_size(const struct sock
*sk
)
817 size
+= nla_total_size(0) + /* INET_ULP_INFO_TLS */
818 nla_total_size(sizeof(u16
)) + /* TLS_INFO_VERSION */
819 nla_total_size(sizeof(u16
)) + /* TLS_INFO_CIPHER */
820 nla_total_size(sizeof(u16
)) + /* TLS_INFO_RXCONF */
821 nla_total_size(sizeof(u16
)) + /* TLS_INFO_TXCONF */
827 static int __net_init
tls_init_net(struct net
*net
)
831 net
->mib
.tls_statistics
= alloc_percpu(struct linux_tls_mib
);
832 if (!net
->mib
.tls_statistics
)
835 err
= tls_proc_init(net
);
841 free_percpu(net
->mib
.tls_statistics
);
845 static void __net_exit
tls_exit_net(struct net
*net
)
848 free_percpu(net
->mib
.tls_statistics
);
851 static struct pernet_operations tls_proc_ops
= {
852 .init
= tls_init_net
,
853 .exit
= tls_exit_net
,
856 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly
= {
858 .owner
= THIS_MODULE
,
860 .update
= tls_update
,
861 .get_info
= tls_get_info
,
862 .get_info_size
= tls_get_info_size
,
865 static int __init
tls_register(void)
869 err
= register_pernet_subsys(&tls_proc_ops
);
873 tls_sw_proto_ops
= inet_stream_ops
;
874 tls_sw_proto_ops
.splice_read
= tls_sw_splice_read
;
875 tls_sw_proto_ops
.sendpage_locked
= tls_sw_sendpage_locked
;
878 tcp_register_ulp(&tcp_tls_ulp_ops
);
883 static void __exit
tls_unregister(void)
885 tcp_unregister_ulp(&tcp_tls_ulp_ops
);
886 tls_device_cleanup();
887 unregister_pernet_subsys(&tls_proc_ops
);
890 module_init(tls_register
);
891 module_exit(tls_unregister
);