1 /* Copyright (c) 2018, Mellanox Technologies All rights reserved.
3 * This software is available to you under a choice of one of two
4 * licenses. You may choose to be licensed under the terms of the GNU
5 * General Public License (GPL) Version 2, available from the file
6 * COPYING in the main directory of this source tree, or the
7 * OpenIB.org BSD license below:
9 * Redistribution and use in source and binary forms, with or
10 * without modification, are permitted provided that the following
13 * - Redistributions of source code must retain the above
14 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <crypto/aead.h>
34 #include <crypto/scatterwalk.h>
35 #include <net/ip6_checksum.h>
37 static void chain_to_walk(struct scatterlist
*sg
, struct scatter_walk
*walk
)
39 struct scatterlist
*src
= walk
->sg
;
40 int diff
= walk
->offset
- src
->offset
;
42 sg_set_page(sg
, sg_page(src
),
43 src
->length
- diff
, walk
->offset
);
45 scatterwalk_crypto_chain(sg
, sg_next(src
), 2);
48 static int tls_enc_record(struct aead_request
*aead_req
,
49 struct crypto_aead
*aead
, char *aad
,
50 char *iv
, __be64 rcd_sn
,
51 struct scatter_walk
*in
,
52 struct scatter_walk
*out
, int *in_len
)
54 unsigned char buf
[TLS_HEADER_SIZE
+ TLS_CIPHER_AES_GCM_128_IV_SIZE
];
55 struct scatterlist sg_in
[3];
56 struct scatterlist sg_out
[3];
60 len
= min_t(int, *in_len
, ARRAY_SIZE(buf
));
62 scatterwalk_copychunks(buf
, in
, len
, 0);
63 scatterwalk_copychunks(buf
, out
, len
, 1);
69 scatterwalk_pagedone(in
, 0, 1);
70 scatterwalk_pagedone(out
, 1, 1);
72 len
= buf
[4] | (buf
[3] << 8);
73 len
-= TLS_CIPHER_AES_GCM_128_IV_SIZE
;
75 tls_make_aad(aad
, len
- TLS_CIPHER_AES_GCM_128_TAG_SIZE
,
76 (char *)&rcd_sn
, sizeof(rcd_sn
), buf
[0],
79 memcpy(iv
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
, buf
+ TLS_HEADER_SIZE
,
80 TLS_CIPHER_AES_GCM_128_IV_SIZE
);
82 sg_init_table(sg_in
, ARRAY_SIZE(sg_in
));
83 sg_init_table(sg_out
, ARRAY_SIZE(sg_out
));
84 sg_set_buf(sg_in
, aad
, TLS_AAD_SPACE_SIZE
);
85 sg_set_buf(sg_out
, aad
, TLS_AAD_SPACE_SIZE
);
86 chain_to_walk(sg_in
+ 1, in
);
87 chain_to_walk(sg_out
+ 1, out
);
91 *in_len
+= TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
92 /* the input buffer doesn't contain the entire record.
93 * trim len accordingly. The resulting authentication tag
94 * will contain garbage, but we don't care, so we won't
95 * include any of it in the output skb
96 * Note that we assume the output buffer length
97 * is larger then input buffer length + tag size
106 scatterwalk_copychunks(NULL
, in
, len
, 2);
107 scatterwalk_pagedone(in
, 0, 1);
108 scatterwalk_copychunks(NULL
, out
, len
, 2);
109 scatterwalk_pagedone(out
, 1, 1);
112 len
-= TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
113 aead_request_set_crypt(aead_req
, sg_in
, sg_out
, len
, iv
);
115 rc
= crypto_aead_encrypt(aead_req
);
120 static void tls_init_aead_request(struct aead_request
*aead_req
,
121 struct crypto_aead
*aead
)
123 aead_request_set_tfm(aead_req
, aead
);
124 aead_request_set_ad(aead_req
, TLS_AAD_SPACE_SIZE
);
127 static struct aead_request
*tls_alloc_aead_request(struct crypto_aead
*aead
,
130 unsigned int req_size
= sizeof(struct aead_request
) +
131 crypto_aead_reqsize(aead
);
132 struct aead_request
*aead_req
;
134 aead_req
= kzalloc(req_size
, flags
);
136 tls_init_aead_request(aead_req
, aead
);
140 static int tls_enc_records(struct aead_request
*aead_req
,
141 struct crypto_aead
*aead
, struct scatterlist
*sg_in
,
142 struct scatterlist
*sg_out
, char *aad
, char *iv
,
145 struct scatter_walk out
, in
;
148 scatterwalk_start(&in
, sg_in
);
149 scatterwalk_start(&out
, sg_out
);
152 rc
= tls_enc_record(aead_req
, aead
, aad
, iv
,
153 cpu_to_be64(rcd_sn
), &in
, &out
, &len
);
156 } while (rc
== 0 && len
);
158 scatterwalk_done(&in
, 0, 0);
159 scatterwalk_done(&out
, 1, 0);
164 /* Can't use icsk->icsk_af_ops->send_check here because the ip addresses
165 * might have been changed by NAT.
167 static void update_chksum(struct sk_buff
*skb
, int headln
)
169 struct tcphdr
*th
= tcp_hdr(skb
);
170 int datalen
= skb
->len
- headln
;
171 const struct ipv6hdr
*ipv6h
;
172 const struct iphdr
*iph
;
174 /* We only changed the payload so if we are using partial we don't
175 * need to update anything.
177 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
))
180 skb
->ip_summed
= CHECKSUM_PARTIAL
;
181 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
182 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
184 if (skb
->sk
->sk_family
== AF_INET6
) {
185 ipv6h
= ipv6_hdr(skb
);
186 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
,
187 datalen
, IPPROTO_TCP
, 0);
190 th
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, datalen
,
195 static void complete_skb(struct sk_buff
*nskb
, struct sk_buff
*skb
, int headln
)
197 struct sock
*sk
= skb
->sk
;
200 skb_copy_header(nskb
, skb
);
202 skb_put(nskb
, skb
->len
);
203 memcpy(nskb
->data
, skb
->data
, headln
);
205 nskb
->destructor
= skb
->destructor
;
207 skb
->destructor
= NULL
;
210 update_chksum(nskb
, headln
);
212 delta
= nskb
->truesize
- skb
->truesize
;
213 if (likely(delta
< 0))
214 WARN_ON_ONCE(refcount_sub_and_test(-delta
, &sk
->sk_wmem_alloc
));
216 refcount_add(delta
, &sk
->sk_wmem_alloc
);
219 /* This function may be called after the user socket is already
220 * closed so make sure we don't use anything freed during
221 * tls_sk_proto_close here
224 static int fill_sg_in(struct scatterlist
*sg_in
,
226 struct tls_offload_context_tx
*ctx
,
231 int tcp_payload_offset
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
232 int payload_len
= skb
->len
- tcp_payload_offset
;
233 u32 tcp_seq
= ntohl(tcp_hdr(skb
)->seq
);
234 struct tls_record_info
*record
;
239 spin_lock_irqsave(&ctx
->lock
, flags
);
240 record
= tls_get_record(ctx
, tcp_seq
, rcd_sn
);
242 spin_unlock_irqrestore(&ctx
->lock
, flags
);
243 WARN(1, "Record not found for seq %u\n", tcp_seq
);
247 *sync_size
= tcp_seq
- tls_record_start_seq(record
);
248 if (*sync_size
< 0) {
249 int is_start_marker
= tls_record_is_start_marker(record
);
251 spin_unlock_irqrestore(&ctx
->lock
, flags
);
252 /* This should only occur if the relevant record was
253 * already acked. In that case it should be ok
254 * to drop the packet and avoid retransmission.
256 * There is a corner case where the packet contains
257 * both an acked and a non-acked record.
258 * We currently don't handle that case and rely
259 * on TCP to retranmit a packet that doesn't contain
260 * already acked payload.
262 if (!is_start_marker
)
267 remaining
= *sync_size
;
268 for (i
= 0; remaining
> 0; i
++) {
269 skb_frag_t
*frag
= &record
->frags
[i
];
271 __skb_frag_ref(frag
);
272 sg_set_page(sg_in
+ i
, skb_frag_page(frag
),
273 skb_frag_size(frag
), frag
->page_offset
);
275 remaining
-= skb_frag_size(frag
);
278 sg_in
[i
].length
+= remaining
;
282 spin_unlock_irqrestore(&ctx
->lock
, flags
);
283 if (skb_to_sgvec(skb
, &sg_in
[i
], tcp_payload_offset
, payload_len
) < 0)
289 static void fill_sg_out(struct scatterlist sg_out
[3], void *buf
,
290 struct tls_context
*tls_ctx
,
291 struct sk_buff
*nskb
,
292 int tcp_payload_offset
,
297 sg_set_buf(&sg_out
[0], dummy_buf
, sync_size
);
298 sg_set_buf(&sg_out
[1], nskb
->data
+ tcp_payload_offset
, payload_len
);
299 /* Add room for authentication tag produced by crypto */
300 dummy_buf
+= sync_size
;
301 sg_set_buf(&sg_out
[2], dummy_buf
, TLS_CIPHER_AES_GCM_128_TAG_SIZE
);
304 static struct sk_buff
*tls_enc_skb(struct tls_context
*tls_ctx
,
305 struct scatterlist sg_out
[3],
306 struct scatterlist
*sg_in
,
308 s32 sync_size
, u64 rcd_sn
)
310 int tcp_payload_offset
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
311 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
312 int payload_len
= skb
->len
- tcp_payload_offset
;
313 void *buf
, *iv
, *aad
, *dummy_buf
;
314 struct aead_request
*aead_req
;
315 struct sk_buff
*nskb
= NULL
;
318 aead_req
= tls_alloc_aead_request(ctx
->aead_send
, GFP_ATOMIC
);
322 buf_len
= TLS_CIPHER_AES_GCM_128_SALT_SIZE
+
323 TLS_CIPHER_AES_GCM_128_IV_SIZE
+
326 TLS_CIPHER_AES_GCM_128_TAG_SIZE
;
327 buf
= kmalloc(buf_len
, GFP_ATOMIC
);
332 memcpy(iv
, tls_ctx
->crypto_send
.aes_gcm_128
.salt
,
333 TLS_CIPHER_AES_GCM_128_SALT_SIZE
);
334 aad
= buf
+ TLS_CIPHER_AES_GCM_128_SALT_SIZE
+
335 TLS_CIPHER_AES_GCM_128_IV_SIZE
;
336 dummy_buf
= aad
+ TLS_AAD_SPACE_SIZE
;
338 nskb
= alloc_skb(skb_headroom(skb
) + skb
->len
, GFP_ATOMIC
);
342 skb_reserve(nskb
, skb_headroom(skb
));
344 fill_sg_out(sg_out
, buf
, tls_ctx
, nskb
, tcp_payload_offset
,
345 payload_len
, sync_size
, dummy_buf
);
347 if (tls_enc_records(aead_req
, ctx
->aead_send
, sg_in
, sg_out
, aad
, iv
,
348 rcd_sn
, sync_size
+ payload_len
) < 0)
351 complete_skb(nskb
, skb
, tcp_payload_offset
);
353 /* validate_xmit_skb_list assumes that if the skb wasn't segmented
354 * nskb->prev will point to the skb itself
369 static struct sk_buff
*tls_sw_fallback(struct sock
*sk
, struct sk_buff
*skb
)
371 int tcp_payload_offset
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
372 struct tls_context
*tls_ctx
= tls_get_ctx(sk
);
373 struct tls_offload_context_tx
*ctx
= tls_offload_ctx_tx(tls_ctx
);
374 int payload_len
= skb
->len
- tcp_payload_offset
;
375 struct scatterlist
*sg_in
, sg_out
[3];
376 struct sk_buff
*nskb
= NULL
;
377 int sg_in_max_elements
;
383 * MAX_SKB_FRAGS in tls_record_info
384 * MAX_SKB_FRAGS + 1 in SKB head and frags.
386 sg_in_max_elements
= 2 * MAX_SKB_FRAGS
+ 1;
391 sg_in
= kmalloc_array(sg_in_max_elements
, sizeof(*sg_in
), GFP_ATOMIC
);
395 sg_init_table(sg_in
, sg_in_max_elements
);
396 sg_init_table(sg_out
, ARRAY_SIZE(sg_out
));
398 if (fill_sg_in(sg_in
, skb
, ctx
, &rcd_sn
, &sync_size
, &resync_sgs
)) {
399 /* bypass packets before kernel TLS socket option was set */
400 if (sync_size
< 0 && payload_len
<= -sync_size
)
405 nskb
= tls_enc_skb(tls_ctx
, sg_out
, sg_in
, skb
, sync_size
, rcd_sn
);
409 put_page(sg_page(&sg_in
[--resync_sgs
]));
416 struct sk_buff
*tls_validate_xmit_skb(struct sock
*sk
,
417 struct net_device
*dev
,
420 if (dev
== tls_get_ctx(sk
)->netdev
)
423 return tls_sw_fallback(sk
, skb
);
425 EXPORT_SYMBOL_GPL(tls_validate_xmit_skb
);
427 int tls_sw_fallback_init(struct sock
*sk
,
428 struct tls_offload_context_tx
*offload_ctx
,
429 struct tls_crypto_info
*crypto_info
)
434 offload_ctx
->aead_send
=
435 crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC
);
436 if (IS_ERR(offload_ctx
->aead_send
)) {
437 rc
= PTR_ERR(offload_ctx
->aead_send
);
438 pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc
);
439 offload_ctx
->aead_send
= NULL
;
443 key
= ((struct tls12_crypto_info_aes_gcm_128
*)crypto_info
)->key
;
445 rc
= crypto_aead_setkey(offload_ctx
->aead_send
, key
,
446 TLS_CIPHER_AES_GCM_128_KEY_SIZE
);
450 rc
= crypto_aead_setauthsize(offload_ctx
->aead_send
,
451 TLS_CIPHER_AES_GCM_128_TAG_SIZE
);
457 crypto_free_aead(offload_ctx
->aead_send
);