1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
14 #include <linux/uio.h>
15 #include <linux/inetdevice.h>
16 #include <linux/socket.h>
17 #include <net/ip_tunnels.h>
21 static void wg_packet_send_handshake_initiation(struct wg_peer
*peer
)
23 struct message_handshake_initiation packet
;
25 if (!wg_birthdate_has_expired(atomic64_read(&peer
->last_sent_handshake
),
27 return; /* This function is rate limited. */
29 atomic64_set(&peer
->last_sent_handshake
, ktime_get_coarse_boottime_ns());
30 net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
31 peer
->device
->dev
->name
, peer
->internal_id
,
32 &peer
->endpoint
.addr
);
34 if (wg_noise_handshake_create_initiation(&packet
, &peer
->handshake
)) {
35 wg_cookie_add_mac_to_packet(&packet
, sizeof(packet
), peer
);
36 wg_timers_any_authenticated_packet_traversal(peer
);
37 wg_timers_any_authenticated_packet_sent(peer
);
38 atomic64_set(&peer
->last_sent_handshake
,
39 ktime_get_coarse_boottime_ns());
40 wg_socket_send_buffer_to_peer(peer
, &packet
, sizeof(packet
),
42 wg_timers_handshake_initiated(peer
);
46 void wg_packet_handshake_send_worker(struct work_struct
*work
)
48 struct wg_peer
*peer
= container_of(work
, struct wg_peer
,
49 transmit_handshake_work
);
51 wg_packet_send_handshake_initiation(peer
);
55 void wg_packet_send_queued_handshake_initiation(struct wg_peer
*peer
,
59 peer
->timer_handshake_attempts
= 0;
62 /* We check last_sent_handshake here in addition to the actual function
63 * we're queueing up, so that we don't queue things if not strictly
66 if (!wg_birthdate_has_expired(atomic64_read(&peer
->last_sent_handshake
),
68 unlikely(READ_ONCE(peer
->is_dead
)))
72 /* Queues up calling packet_send_queued_handshakes(peer), where we do a
73 * peer_put(peer) after:
75 if (!queue_work(peer
->device
->handshake_send_wq
,
76 &peer
->transmit_handshake_work
))
77 /* If the work was already queued, we want to drop the
85 void wg_packet_send_handshake_response(struct wg_peer
*peer
)
87 struct message_handshake_response packet
;
89 atomic64_set(&peer
->last_sent_handshake
, ktime_get_coarse_boottime_ns());
90 net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
91 peer
->device
->dev
->name
, peer
->internal_id
,
92 &peer
->endpoint
.addr
);
94 if (wg_noise_handshake_create_response(&packet
, &peer
->handshake
)) {
95 wg_cookie_add_mac_to_packet(&packet
, sizeof(packet
), peer
);
96 if (wg_noise_handshake_begin_session(&peer
->handshake
,
98 wg_timers_session_derived(peer
);
99 wg_timers_any_authenticated_packet_traversal(peer
);
100 wg_timers_any_authenticated_packet_sent(peer
);
101 atomic64_set(&peer
->last_sent_handshake
,
102 ktime_get_coarse_boottime_ns());
103 wg_socket_send_buffer_to_peer(peer
, &packet
,
110 void wg_packet_send_handshake_cookie(struct wg_device
*wg
,
111 struct sk_buff
*initiating_skb
,
114 struct message_handshake_cookie packet
;
116 net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
117 wg
->dev
->name
, initiating_skb
);
118 wg_cookie_message_create(&packet
, initiating_skb
, sender_index
,
119 &wg
->cookie_checker
);
120 wg_socket_send_buffer_as_reply_to_skb(wg
, initiating_skb
, &packet
,
124 static void keep_key_fresh(struct wg_peer
*peer
)
126 struct noise_keypair
*keypair
;
130 keypair
= rcu_dereference_bh(peer
->keypairs
.current_keypair
);
131 if (likely(keypair
&& READ_ONCE(keypair
->sending
.is_valid
)) &&
132 (unlikely(atomic64_read(&keypair
->sending
.counter
.counter
) >
133 REKEY_AFTER_MESSAGES
) ||
134 (keypair
->i_am_the_initiator
&&
135 unlikely(wg_birthdate_has_expired(keypair
->sending
.birthdate
,
136 REKEY_AFTER_TIME
)))))
138 rcu_read_unlock_bh();
141 wg_packet_send_queued_handshake_initiation(peer
, false);
144 static unsigned int calculate_skb_padding(struct sk_buff
*skb
)
146 /* We do this modulo business with the MTU, just in case the networking
147 * layer gives us a packet that's bigger than the MTU. In that case, we
148 * wouldn't want the final subtraction to overflow in the case of the
149 * padded_size being clamped.
151 unsigned int last_unit
= skb
->len
% PACKET_CB(skb
)->mtu
;
152 unsigned int padded_size
= ALIGN(last_unit
, MESSAGE_PADDING_MULTIPLE
);
154 if (padded_size
> PACKET_CB(skb
)->mtu
)
155 padded_size
= PACKET_CB(skb
)->mtu
;
156 return padded_size
- last_unit
;
159 static bool encrypt_packet(struct sk_buff
*skb
, struct noise_keypair
*keypair
)
161 unsigned int padding_len
, plaintext_len
, trailer_len
;
162 struct scatterlist sg
[MAX_SKB_FRAGS
+ 8];
163 struct message_data
*header
;
164 struct sk_buff
*trailer
;
167 /* Calculate lengths. */
168 padding_len
= calculate_skb_padding(skb
);
169 trailer_len
= padding_len
+ noise_encrypted_len(0);
170 plaintext_len
= skb
->len
+ padding_len
;
172 /* Expand data section to have room for padding and auth tag. */
173 num_frags
= skb_cow_data(skb
, trailer_len
, &trailer
);
174 if (unlikely(num_frags
< 0 || num_frags
> ARRAY_SIZE(sg
)))
177 /* Set the padding to zeros, and make sure it and the auth tag are part
180 memset(skb_tail_pointer(trailer
), 0, padding_len
);
182 /* Expand head section to have room for our header and the network
185 if (unlikely(skb_cow_head(skb
, DATA_PACKET_HEAD_ROOM
) < 0))
188 /* Finalize checksum calculation for the inner packet, if required. */
189 if (unlikely(skb
->ip_summed
== CHECKSUM_PARTIAL
&&
190 skb_checksum_help(skb
)))
193 /* Only after checksumming can we safely add on the padding at the end
196 skb_set_inner_network_header(skb
, 0);
197 header
= (struct message_data
*)skb_push(skb
, sizeof(*header
));
198 header
->header
.type
= cpu_to_le32(MESSAGE_DATA
);
199 header
->key_idx
= keypair
->remote_index
;
200 header
->counter
= cpu_to_le64(PACKET_CB(skb
)->nonce
);
201 pskb_put(skb
, trailer
, trailer_len
);
203 /* Now we can encrypt the scattergather segments */
204 sg_init_table(sg
, num_frags
);
205 if (skb_to_sgvec(skb
, sg
, sizeof(struct message_data
),
206 noise_encrypted_len(plaintext_len
)) <= 0)
208 return chacha20poly1305_encrypt_sg_inplace(sg
, plaintext_len
, NULL
, 0,
209 PACKET_CB(skb
)->nonce
,
210 keypair
->sending
.key
);
213 void wg_packet_send_keepalive(struct wg_peer
*peer
)
217 if (skb_queue_empty(&peer
->staged_packet_queue
)) {
218 skb
= alloc_skb(DATA_PACKET_HEAD_ROOM
+ MESSAGE_MINIMUM_LENGTH
,
222 skb_reserve(skb
, DATA_PACKET_HEAD_ROOM
);
223 skb
->dev
= peer
->device
->dev
;
224 PACKET_CB(skb
)->mtu
= skb
->dev
->mtu
;
225 skb_queue_tail(&peer
->staged_packet_queue
, skb
);
226 net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
227 peer
->device
->dev
->name
, peer
->internal_id
,
228 &peer
->endpoint
.addr
);
231 wg_packet_send_staged_packets(peer
);
234 static void wg_packet_create_data_done(struct sk_buff
*first
,
235 struct wg_peer
*peer
)
237 struct sk_buff
*skb
, *next
;
238 bool is_keepalive
, data_sent
= false;
240 wg_timers_any_authenticated_packet_traversal(peer
);
241 wg_timers_any_authenticated_packet_sent(peer
);
242 skb_list_walk_safe(first
, skb
, next
) {
243 is_keepalive
= skb
->len
== message_data_len(0);
244 if (likely(!wg_socket_send_skb_to_peer(peer
, skb
,
245 PACKET_CB(skb
)->ds
) && !is_keepalive
))
249 if (likely(data_sent
))
250 wg_timers_data_sent(peer
);
252 keep_key_fresh(peer
);
255 void wg_packet_tx_worker(struct work_struct
*work
)
257 struct crypt_queue
*queue
= container_of(work
, struct crypt_queue
,
259 struct noise_keypair
*keypair
;
260 enum packet_state state
;
261 struct sk_buff
*first
;
262 struct wg_peer
*peer
;
264 while ((first
= __ptr_ring_peek(&queue
->ring
)) != NULL
&&
265 (state
= atomic_read_acquire(&PACKET_CB(first
)->state
)) !=
266 PACKET_STATE_UNCRYPTED
) {
267 __ptr_ring_discard_one(&queue
->ring
);
268 peer
= PACKET_PEER(first
);
269 keypair
= PACKET_CB(first
)->keypair
;
271 if (likely(state
== PACKET_STATE_CRYPTED
))
272 wg_packet_create_data_done(first
, peer
);
274 kfree_skb_list(first
);
276 wg_noise_keypair_put(keypair
, false);
281 void wg_packet_encrypt_worker(struct work_struct
*work
)
283 struct crypt_queue
*queue
= container_of(work
, struct multicore_worker
,
285 struct sk_buff
*first
, *skb
, *next
;
287 while ((first
= ptr_ring_consume_bh(&queue
->ring
)) != NULL
) {
288 enum packet_state state
= PACKET_STATE_CRYPTED
;
290 skb_list_walk_safe(first
, skb
, next
) {
291 if (likely(encrypt_packet(skb
,
292 PACKET_CB(first
)->keypair
))) {
293 wg_reset_packet(skb
);
295 state
= PACKET_STATE_DEAD
;
299 wg_queue_enqueue_per_peer(&PACKET_PEER(first
)->tx_queue
, first
,
305 static void wg_packet_create_data(struct sk_buff
*first
)
307 struct wg_peer
*peer
= PACKET_PEER(first
);
308 struct wg_device
*wg
= peer
->device
;
312 if (unlikely(READ_ONCE(peer
->is_dead
)))
315 ret
= wg_queue_enqueue_per_device_and_peer(&wg
->encrypt_queue
,
316 &peer
->tx_queue
, first
,
318 &wg
->encrypt_queue
.last_cpu
);
319 if (unlikely(ret
== -EPIPE
))
320 wg_queue_enqueue_per_peer(&peer
->tx_queue
, first
,
323 rcu_read_unlock_bh();
324 if (likely(!ret
|| ret
== -EPIPE
))
326 wg_noise_keypair_put(PACKET_CB(first
)->keypair
, false);
328 kfree_skb_list(first
);
331 void wg_packet_purge_staged_packets(struct wg_peer
*peer
)
333 spin_lock_bh(&peer
->staged_packet_queue
.lock
);
334 peer
->device
->dev
->stats
.tx_dropped
+= peer
->staged_packet_queue
.qlen
;
335 __skb_queue_purge(&peer
->staged_packet_queue
);
336 spin_unlock_bh(&peer
->staged_packet_queue
.lock
);
339 void wg_packet_send_staged_packets(struct wg_peer
*peer
)
341 struct noise_symmetric_key
*key
;
342 struct noise_keypair
*keypair
;
343 struct sk_buff_head packets
;
346 /* Steal the current queue into our local one. */
347 __skb_queue_head_init(&packets
);
348 spin_lock_bh(&peer
->staged_packet_queue
.lock
);
349 skb_queue_splice_init(&peer
->staged_packet_queue
, &packets
);
350 spin_unlock_bh(&peer
->staged_packet_queue
.lock
);
351 if (unlikely(skb_queue_empty(&packets
)))
354 /* First we make sure we have a valid reference to a valid key. */
356 keypair
= wg_noise_keypair_get(
357 rcu_dereference_bh(peer
->keypairs
.current_keypair
));
358 rcu_read_unlock_bh();
359 if (unlikely(!keypair
))
361 key
= &keypair
->sending
;
362 if (unlikely(!READ_ONCE(key
->is_valid
)))
364 if (unlikely(wg_birthdate_has_expired(key
->birthdate
,
368 /* After we know we have a somewhat valid key, we now try to assign
369 * nonces to all of the packets in the queue. If we can't assign nonces
370 * for all of them, we just consider it a failure and wait for the next
373 skb_queue_walk(&packets
, skb
) {
374 /* 0 for no outer TOS: no leak. TODO: at some later point, we
375 * might consider using flowi->tos as outer instead.
377 PACKET_CB(skb
)->ds
= ip_tunnel_ecn_encap(0, ip_hdr(skb
), skb
);
378 PACKET_CB(skb
)->nonce
=
379 atomic64_inc_return(&key
->counter
.counter
) - 1;
380 if (unlikely(PACKET_CB(skb
)->nonce
>= REJECT_AFTER_MESSAGES
))
384 packets
.prev
->next
= NULL
;
385 wg_peer_get(keypair
->entry
.peer
);
386 PACKET_CB(packets
.next
)->keypair
= keypair
;
387 wg_packet_create_data(packets
.next
);
391 WRITE_ONCE(key
->is_valid
, false);
393 wg_noise_keypair_put(keypair
, false);
395 /* We orphan the packets if we're waiting on a handshake, so that they
396 * don't block a socket's pool.
398 skb_queue_walk(&packets
, skb
)
400 /* Then we put them back on the top of the queue. We're not too
401 * concerned about accidentally getting things a little out of order if
402 * packets are being added really fast, because this queue is for before
403 * packets can even be sent and it's small anyway.
405 spin_lock_bh(&peer
->staged_packet_queue
.lock
);
406 skb_queue_splice(&packets
, &peer
->staged_packet_queue
);
407 spin_unlock_bh(&peer
->staged_packet_queue
.lock
);
409 /* If we're exiting because there's something wrong with the key, it
410 * means we should initiate a new handshake.
412 wg_packet_send_queued_handshake_initiation(peer
, false);