2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
44 MODULE_AUTHOR("Mellanox Technologies");
45 MODULE_DESCRIPTION("Transport Layer Security Support");
46 MODULE_LICENSE("Dual BSD/GPL");
60 static struct proto
*saved_tcpv6_prot
;
61 static DEFINE_MUTEX(tcpv6_prot_mutex
);
62 static struct proto tls_prots
[TLS_NUM_PROTS
][TLS_NUM_CONFIG
];
64 static inline void update_sk_prot(struct sock
*sk
, struct tls_context
*ctx
)
66 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
68 sk
->sk_prot
= &tls_prots
[ip_ver
][ctx
->tx_conf
];
71 int wait_on_pending_writer(struct sock
*sk
, long *timeo
)
74 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
76 add_wait_queue(sk_sleep(sk
), &wait
);
83 if (signal_pending(current
)) {
84 rc
= sock_intr_errno(*timeo
);
88 if (sk_wait_event(sk
, timeo
, !sk
->sk_write_pending
, &wait
))
91 remove_wait_queue(sk_sleep(sk
), &wait
);
95 int tls_push_sg(struct sock
*sk
,
96 struct tls_context
*ctx
,
97 struct scatterlist
*sg
,
101 int sendpage_flags
= flags
| MSG_SENDPAGE_NOTLAST
;
105 int offset
= first_offset
;
107 size
= sg
->length
- offset
;
108 offset
+= sg
->offset
;
112 sendpage_flags
= flags
;
114 /* is sending application-limited? */
115 tcp_rate_check_app_limited(sk
);
118 ret
= do_tcp_sendpages(sk
, p
, offset
, size
, sendpage_flags
);
127 offset
-= sg
->offset
;
128 ctx
->partially_sent_offset
= offset
;
129 ctx
->partially_sent_record
= (void *)sg
;
134 sk_mem_uncharge(sk
, sg
->length
);
143 clear_bit(TLS_PENDING_CLOSED_RECORD
, &ctx
->flags
);
148 static int tls_handle_open_record(struct sock
*sk
, int flags
)
150 struct tls_context
*ctx
= tls_get_ctx(sk
);
152 if (tls_is_pending_open_record(ctx
))
153 return ctx
->push_pending_record(sk
, flags
);
158 int tls_proccess_cmsg(struct sock
*sk
, struct msghdr
*msg
,
159 unsigned char *record_type
)
161 struct cmsghdr
*cmsg
;
164 for_each_cmsghdr(cmsg
, msg
) {
165 if (!CMSG_OK(msg
, cmsg
))
167 if (cmsg
->cmsg_level
!= SOL_TLS
)
170 switch (cmsg
->cmsg_type
) {
171 case TLS_SET_RECORD_TYPE
:
172 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(*record_type
)))
175 if (msg
->msg_flags
& MSG_MORE
)
178 rc
= tls_handle_open_record(sk
, msg
->msg_flags
);
182 *record_type
= *(unsigned char *)CMSG_DATA(cmsg
);
193 int tls_push_pending_closed_record(struct sock
*sk
, struct tls_context
*ctx
,
194 int flags
, long *timeo
)
196 struct scatterlist
*sg
;
199 if (!tls_is_partially_sent_record(ctx
))
200 return ctx
->push_pending_record(sk
, flags
);
202 sg
= ctx
->partially_sent_record
;
203 offset
= ctx
->partially_sent_offset
;
205 ctx
->partially_sent_record
= NULL
;
206 return tls_push_sg(sk
, ctx
, sg
, offset
, flags
);
209 static void tls_write_space(struct sock
*sk
)
211 struct tls_context
*ctx
= tls_get_ctx(sk
);
213 if (!sk
->sk_write_pending
&& tls_is_pending_closed_record(ctx
)) {
214 gfp_t sk_allocation
= sk
->sk_allocation
;
218 sk
->sk_allocation
= GFP_ATOMIC
;
219 rc
= tls_push_pending_closed_record(sk
, ctx
,
223 sk
->sk_allocation
= sk_allocation
;
229 ctx
->sk_write_space(sk
);
232 static void tls_sk_proto_close(struct sock
*sk
, long timeout
)
234 struct tls_context
*ctx
= tls_get_ctx(sk
);
235 long timeo
= sock_sndtimeo(sk
, 0);
236 void (*sk_proto_close
)(struct sock
*sk
, long timeout
);
239 sk_proto_close
= ctx
->sk_proto_close
;
241 if (ctx
->tx_conf
== TLS_BASE_TX
) {
243 goto skip_tx_cleanup
;
246 if (!tls_complete_pending_work(sk
, ctx
, 0, &timeo
))
247 tls_handle_open_record(sk
, 0);
249 if (ctx
->partially_sent_record
) {
250 struct scatterlist
*sg
= ctx
->partially_sent_record
;
253 put_page(sg_page(sg
));
254 sk_mem_uncharge(sk
, sg
->length
);
265 if (ctx
->tx_conf
== TLS_SW_TX
)
266 tls_sw_free_tx_resources(sk
);
270 sk_proto_close(sk
, timeout
);
273 static int do_tls_getsockopt_tx(struct sock
*sk
, char __user
*optval
,
277 struct tls_context
*ctx
= tls_get_ctx(sk
);
278 struct tls_crypto_info
*crypto_info
;
281 if (get_user(len
, optlen
))
284 if (!optval
|| (len
< sizeof(*crypto_info
))) {
294 /* get user crypto info */
295 crypto_info
= &ctx
->crypto_send
;
297 if (!TLS_CRYPTO_INFO_READY(crypto_info
)) {
302 if (len
== sizeof(*crypto_info
)) {
303 if (copy_to_user(optval
, crypto_info
, sizeof(*crypto_info
)))
308 switch (crypto_info
->cipher_type
) {
309 case TLS_CIPHER_AES_GCM_128
: {
310 struct tls12_crypto_info_aes_gcm_128
*
311 crypto_info_aes_gcm_128
=
312 container_of(crypto_info
,
313 struct tls12_crypto_info_aes_gcm_128
,
316 if (len
!= sizeof(*crypto_info_aes_gcm_128
)) {
321 memcpy(crypto_info_aes_gcm_128
->iv
,
322 ctx
->iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
,
323 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
324 memcpy(crypto_info_aes_gcm_128
->rec_seq
, ctx
->rec_seq
,
325 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE
);
327 if (copy_to_user(optval
,
328 crypto_info_aes_gcm_128
,
329 sizeof(*crypto_info_aes_gcm_128
)))
341 static int do_tls_getsockopt(struct sock
*sk
, int optname
,
342 char __user
*optval
, int __user
*optlen
)
348 rc
= do_tls_getsockopt_tx(sk
, optval
, optlen
);
357 static int tls_getsockopt(struct sock
*sk
, int level
, int optname
,
358 char __user
*optval
, int __user
*optlen
)
360 struct tls_context
*ctx
= tls_get_ctx(sk
);
362 if (level
!= SOL_TLS
)
363 return ctx
->getsockopt(sk
, level
, optname
, optval
, optlen
);
365 return do_tls_getsockopt(sk
, optname
, optval
, optlen
);
368 static int do_tls_setsockopt_tx(struct sock
*sk
, char __user
*optval
,
371 struct tls_crypto_info
*crypto_info
;
372 struct tls_context
*ctx
= tls_get_ctx(sk
);
376 if (!optval
|| (optlen
< sizeof(*crypto_info
))) {
381 crypto_info
= &ctx
->crypto_send
;
382 /* Currently we don't support set crypto info more than one time */
383 if (TLS_CRYPTO_INFO_READY(crypto_info
)) {
388 rc
= copy_from_user(crypto_info
, optval
, sizeof(*crypto_info
));
391 goto err_crypto_info
;
395 if (crypto_info
->version
!= TLS_1_2_VERSION
) {
397 goto err_crypto_info
;
400 switch (crypto_info
->cipher_type
) {
401 case TLS_CIPHER_AES_GCM_128
: {
402 if (optlen
!= sizeof(struct tls12_crypto_info_aes_gcm_128
)) {
404 goto err_crypto_info
;
406 rc
= copy_from_user(crypto_info
+ 1, optval
+ sizeof(*crypto_info
),
407 optlen
- sizeof(*crypto_info
));
410 goto err_crypto_info
;
416 goto err_crypto_info
;
419 /* currently SW is default, we will have ethtool in future */
420 rc
= tls_set_sw_offload(sk
, ctx
);
423 goto err_crypto_info
;
425 ctx
->tx_conf
= tx_conf
;
426 update_sk_prot(sk
, ctx
);
427 ctx
->sk_write_space
= sk
->sk_write_space
;
428 sk
->sk_write_space
= tls_write_space
;
432 memset(crypto_info
, 0, sizeof(*crypto_info
));
437 static int do_tls_setsockopt(struct sock
*sk
, int optname
,
438 char __user
*optval
, unsigned int optlen
)
445 rc
= do_tls_setsockopt_tx(sk
, optval
, optlen
);
455 static int tls_setsockopt(struct sock
*sk
, int level
, int optname
,
456 char __user
*optval
, unsigned int optlen
)
458 struct tls_context
*ctx
= tls_get_ctx(sk
);
460 if (level
!= SOL_TLS
)
461 return ctx
->setsockopt(sk
, level
, optname
, optval
, optlen
);
463 return do_tls_setsockopt(sk
, optname
, optval
, optlen
);
466 static void build_protos(struct proto
*prot
, struct proto
*base
)
468 prot
[TLS_BASE_TX
] = *base
;
469 prot
[TLS_BASE_TX
].setsockopt
= tls_setsockopt
;
470 prot
[TLS_BASE_TX
].getsockopt
= tls_getsockopt
;
471 prot
[TLS_BASE_TX
].close
= tls_sk_proto_close
;
473 prot
[TLS_SW_TX
] = prot
[TLS_BASE_TX
];
474 prot
[TLS_SW_TX
].sendmsg
= tls_sw_sendmsg
;
475 prot
[TLS_SW_TX
].sendpage
= tls_sw_sendpage
;
478 static int tls_init(struct sock
*sk
)
480 int ip_ver
= sk
->sk_family
== AF_INET6
? TLSV6
: TLSV4
;
481 struct inet_connection_sock
*icsk
= inet_csk(sk
);
482 struct tls_context
*ctx
;
485 /* The TLS ulp is currently supported only for TCP sockets
486 * in ESTABLISHED state.
487 * Supporting sockets in LISTEN state will require us
488 * to modify the accept implementation to clone rather then
489 * share the ulp context.
491 if (sk
->sk_state
!= TCP_ESTABLISHED
)
494 /* allocate tls context */
495 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
500 icsk
->icsk_ulp_data
= ctx
;
501 ctx
->setsockopt
= sk
->sk_prot
->setsockopt
;
502 ctx
->getsockopt
= sk
->sk_prot
->getsockopt
;
503 ctx
->sk_proto_close
= sk
->sk_prot
->close
;
505 /* Build IPv6 TLS whenever the address of tcpv6_prot changes */
506 if (ip_ver
== TLSV6
&&
507 unlikely(sk
->sk_prot
!= smp_load_acquire(&saved_tcpv6_prot
))) {
508 mutex_lock(&tcpv6_prot_mutex
);
509 if (likely(sk
->sk_prot
!= saved_tcpv6_prot
)) {
510 build_protos(tls_prots
[TLSV6
], sk
->sk_prot
);
511 smp_store_release(&saved_tcpv6_prot
, sk
->sk_prot
);
513 mutex_unlock(&tcpv6_prot_mutex
);
516 ctx
->tx_conf
= TLS_BASE_TX
;
517 update_sk_prot(sk
, ctx
);
522 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly
= {
525 .user_visible
= true,
526 .owner
= THIS_MODULE
,
530 static int __init
tls_register(void)
532 build_protos(tls_prots
[TLSV4
], &tcp_prot
);
534 tcp_register_ulp(&tcp_tls_ulp_ops
);
539 static void __exit
tls_unregister(void)
541 tcp_unregister_ulp(&tcp_tls_ulp_ops
);
544 module_init(tls_register
);
545 module_exit(tls_unregister
);