2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
37 #include <net/inet_common.h>
38 #include <linux/highmem.h>
39 #include <linux/netdevice.h>
40 #include <linux/sched/signal.h>
44 MODULE_AUTHOR("Mellanox Technologies");
45 MODULE_DESCRIPTION("Transport Layer Security Support");
46 MODULE_LICENSE("Dual BSD/GPL");
54 static struct proto tls_prots
[TLS_NUM_CONFIG
];
56 static inline void update_sk_prot(struct sock
*sk
, struct tls_context
*ctx
)
58 sk
->sk_prot
= &tls_prots
[ctx
->tx_conf
];
61 int wait_on_pending_writer(struct sock
*sk
, long *timeo
)
64 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
66 add_wait_queue(sk_sleep(sk
), &wait
);
73 if (signal_pending(current
)) {
74 rc
= sock_intr_errno(*timeo
);
78 if (sk_wait_event(sk
, timeo
, !sk
->sk_write_pending
, &wait
))
81 remove_wait_queue(sk_sleep(sk
), &wait
);
85 int tls_push_sg(struct sock
*sk
,
86 struct tls_context
*ctx
,
87 struct scatterlist
*sg
,
91 int sendpage_flags
= flags
| MSG_SENDPAGE_NOTLAST
;
95 int offset
= first_offset
;
97 size
= sg
->length
- offset
;
102 sendpage_flags
= flags
;
104 /* is sending application-limited? */
105 tcp_rate_check_app_limited(sk
);
108 ret
= do_tcp_sendpages(sk
, p
, offset
, size
, sendpage_flags
);
117 offset
-= sg
->offset
;
118 ctx
->partially_sent_offset
= offset
;
119 ctx
->partially_sent_record
= (void *)sg
;
124 sk_mem_uncharge(sk
, sg
->length
);
133 clear_bit(TLS_PENDING_CLOSED_RECORD
, &ctx
->flags
);
138 static int tls_handle_open_record(struct sock
*sk
, int flags
)
140 struct tls_context
*ctx
= tls_get_ctx(sk
);
142 if (tls_is_pending_open_record(ctx
))
143 return ctx
->push_pending_record(sk
, flags
);
148 int tls_proccess_cmsg(struct sock
*sk
, struct msghdr
*msg
,
149 unsigned char *record_type
)
151 struct cmsghdr
*cmsg
;
154 for_each_cmsghdr(cmsg
, msg
) {
155 if (!CMSG_OK(msg
, cmsg
))
157 if (cmsg
->cmsg_level
!= SOL_TLS
)
160 switch (cmsg
->cmsg_type
) {
161 case TLS_SET_RECORD_TYPE
:
162 if (cmsg
->cmsg_len
< CMSG_LEN(sizeof(*record_type
)))
165 if (msg
->msg_flags
& MSG_MORE
)
168 rc
= tls_handle_open_record(sk
, msg
->msg_flags
);
172 *record_type
= *(unsigned char *)CMSG_DATA(cmsg
);
183 int tls_push_pending_closed_record(struct sock
*sk
, struct tls_context
*ctx
,
184 int flags
, long *timeo
)
186 struct scatterlist
*sg
;
189 if (!tls_is_partially_sent_record(ctx
))
190 return ctx
->push_pending_record(sk
, flags
);
192 sg
= ctx
->partially_sent_record
;
193 offset
= ctx
->partially_sent_offset
;
195 ctx
->partially_sent_record
= NULL
;
196 return tls_push_sg(sk
, ctx
, sg
, offset
, flags
);
199 static void tls_write_space(struct sock
*sk
)
201 struct tls_context
*ctx
= tls_get_ctx(sk
);
203 if (!sk
->sk_write_pending
&& tls_is_pending_closed_record(ctx
)) {
204 gfp_t sk_allocation
= sk
->sk_allocation
;
208 sk
->sk_allocation
= GFP_ATOMIC
;
209 rc
= tls_push_pending_closed_record(sk
, ctx
,
213 sk
->sk_allocation
= sk_allocation
;
219 ctx
->sk_write_space(sk
);
222 static void tls_sk_proto_close(struct sock
*sk
, long timeout
)
224 struct tls_context
*ctx
= tls_get_ctx(sk
);
225 long timeo
= sock_sndtimeo(sk
, 0);
226 void (*sk_proto_close
)(struct sock
*sk
, long timeout
);
229 sk_proto_close
= ctx
->sk_proto_close
;
231 if (ctx
->tx_conf
== TLS_BASE_TX
) {
233 goto skip_tx_cleanup
;
236 if (!tls_complete_pending_work(sk
, ctx
, 0, &timeo
))
237 tls_handle_open_record(sk
, 0);
239 if (ctx
->partially_sent_record
) {
240 struct scatterlist
*sg
= ctx
->partially_sent_record
;
243 put_page(sg_page(sg
));
244 sk_mem_uncharge(sk
, sg
->length
);
255 if (ctx
->tx_conf
== TLS_SW_TX
)
256 tls_sw_free_tx_resources(sk
);
260 sk_proto_close(sk
, timeout
);
263 static int do_tls_getsockopt_tx(struct sock
*sk
, char __user
*optval
,
267 struct tls_context
*ctx
= tls_get_ctx(sk
);
268 struct tls_crypto_info
*crypto_info
;
271 if (get_user(len
, optlen
))
274 if (!optval
|| (len
< sizeof(*crypto_info
))) {
284 /* get user crypto info */
285 crypto_info
= &ctx
->crypto_send
;
287 if (!TLS_CRYPTO_INFO_READY(crypto_info
)) {
292 if (len
== sizeof(*crypto_info
)) {
293 if (copy_to_user(optval
, crypto_info
, sizeof(*crypto_info
)))
298 switch (crypto_info
->cipher_type
) {
299 case TLS_CIPHER_AES_GCM_128
: {
300 struct tls12_crypto_info_aes_gcm_128
*
301 crypto_info_aes_gcm_128
=
302 container_of(crypto_info
,
303 struct tls12_crypto_info_aes_gcm_128
,
306 if (len
!= sizeof(*crypto_info_aes_gcm_128
)) {
311 memcpy(crypto_info_aes_gcm_128
->iv
, ctx
->iv
,
312 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
314 if (copy_to_user(optval
,
315 crypto_info_aes_gcm_128
,
316 sizeof(*crypto_info_aes_gcm_128
)))
328 static int do_tls_getsockopt(struct sock
*sk
, int optname
,
329 char __user
*optval
, int __user
*optlen
)
335 rc
= do_tls_getsockopt_tx(sk
, optval
, optlen
);
344 static int tls_getsockopt(struct sock
*sk
, int level
, int optname
,
345 char __user
*optval
, int __user
*optlen
)
347 struct tls_context
*ctx
= tls_get_ctx(sk
);
349 if (level
!= SOL_TLS
)
350 return ctx
->getsockopt(sk
, level
, optname
, optval
, optlen
);
352 return do_tls_getsockopt(sk
, optname
, optval
, optlen
);
355 static int do_tls_setsockopt_tx(struct sock
*sk
, char __user
*optval
,
358 struct tls_crypto_info
*crypto_info
;
359 struct tls_context
*ctx
= tls_get_ctx(sk
);
363 if (!optval
|| (optlen
< sizeof(*crypto_info
))) {
368 crypto_info
= &ctx
->crypto_send
;
369 /* Currently we don't support set crypto info more than one time */
370 if (TLS_CRYPTO_INFO_READY(crypto_info
))
373 rc
= copy_from_user(crypto_info
, optval
, sizeof(*crypto_info
));
380 if (crypto_info
->version
!= TLS_1_2_VERSION
) {
382 goto err_crypto_info
;
385 switch (crypto_info
->cipher_type
) {
386 case TLS_CIPHER_AES_GCM_128
: {
387 if (optlen
!= sizeof(struct tls12_crypto_info_aes_gcm_128
)) {
391 rc
= copy_from_user(crypto_info
+ 1, optval
+ sizeof(*crypto_info
),
392 optlen
- sizeof(*crypto_info
));
395 goto err_crypto_info
;
404 /* currently SW is default, we will have ethtool in future */
405 rc
= tls_set_sw_offload(sk
, ctx
);
408 goto err_crypto_info
;
410 ctx
->tx_conf
= tx_conf
;
411 update_sk_prot(sk
, ctx
);
412 ctx
->sk_write_space
= sk
->sk_write_space
;
413 sk
->sk_write_space
= tls_write_space
;
417 memset(crypto_info
, 0, sizeof(*crypto_info
));
422 static int do_tls_setsockopt(struct sock
*sk
, int optname
,
423 char __user
*optval
, unsigned int optlen
)
430 rc
= do_tls_setsockopt_tx(sk
, optval
, optlen
);
440 static int tls_setsockopt(struct sock
*sk
, int level
, int optname
,
441 char __user
*optval
, unsigned int optlen
)
443 struct tls_context
*ctx
= tls_get_ctx(sk
);
445 if (level
!= SOL_TLS
)
446 return ctx
->setsockopt(sk
, level
, optname
, optval
, optlen
);
448 return do_tls_setsockopt(sk
, optname
, optval
, optlen
);
451 static int tls_init(struct sock
*sk
)
453 struct inet_connection_sock
*icsk
= inet_csk(sk
);
454 struct tls_context
*ctx
;
457 /* allocate tls context */
458 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
463 icsk
->icsk_ulp_data
= ctx
;
464 ctx
->setsockopt
= sk
->sk_prot
->setsockopt
;
465 ctx
->getsockopt
= sk
->sk_prot
->getsockopt
;
466 ctx
->sk_proto_close
= sk
->sk_prot
->close
;
468 ctx
->tx_conf
= TLS_BASE_TX
;
469 update_sk_prot(sk
, ctx
);
474 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly
= {
476 .owner
= THIS_MODULE
,
480 static void build_protos(struct proto
*prot
, struct proto
*base
)
482 prot
[TLS_BASE_TX
] = *base
;
483 prot
[TLS_BASE_TX
].setsockopt
= tls_setsockopt
;
484 prot
[TLS_BASE_TX
].getsockopt
= tls_getsockopt
;
485 prot
[TLS_BASE_TX
].close
= tls_sk_proto_close
;
487 prot
[TLS_SW_TX
] = prot
[TLS_BASE_TX
];
488 prot
[TLS_SW_TX
].sendmsg
= tls_sw_sendmsg
;
489 prot
[TLS_SW_TX
].sendpage
= tls_sw_sendpage
;
492 static int __init
tls_register(void)
494 build_protos(tls_prots
, &tcp_prot
);
496 tcp_register_ulp(&tcp_tls_ulp_ops
);
501 static void __exit
tls_unregister(void)
503 tcp_unregister_ulp(&tcp_tls_ulp_ops
);
506 module_init(tls_register
);
507 module_exit(tls_unregister
);