1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
13 #include <linux/ipv6.h>
14 #include <net/ip_tunnels.h>
18 struct multicore_worker
;
23 /* queueing.c APIs: */
24 int wg_packet_queue_init(struct crypt_queue
*queue
, work_func_t function
,
26 void wg_packet_queue_free(struct crypt_queue
*queue
, bool purge
);
27 struct multicore_worker __percpu
*
28 wg_packet_percpu_multicore_worker_alloc(work_func_t function
, void *ptr
);
31 void wg_packet_receive(struct wg_device
*wg
, struct sk_buff
*skb
);
32 void wg_packet_handshake_receive_worker(struct work_struct
*work
);
33 /* NAPI poll function: */
34 int wg_packet_rx_poll(struct napi_struct
*napi
, int budget
);
35 /* Workqueue worker: */
36 void wg_packet_decrypt_worker(struct work_struct
*work
);
39 void wg_packet_send_queued_handshake_initiation(struct wg_peer
*peer
,
41 void wg_packet_send_handshake_response(struct wg_peer
*peer
);
42 void wg_packet_send_handshake_cookie(struct wg_device
*wg
,
43 struct sk_buff
*initiating_skb
,
45 void wg_packet_send_keepalive(struct wg_peer
*peer
);
46 void wg_packet_purge_staged_packets(struct wg_peer
*peer
);
47 void wg_packet_send_staged_packets(struct wg_peer
*peer
);
48 /* Workqueue workers: */
49 void wg_packet_handshake_send_worker(struct work_struct
*work
);
50 void wg_packet_tx_worker(struct work_struct
*work
);
51 void wg_packet_encrypt_worker(struct work_struct
*work
);
54 PACKET_STATE_UNCRYPTED
,
61 struct noise_keypair
*keypair
;
67 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
68 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
70 static inline bool wg_check_packet_protocol(struct sk_buff
*skb
)
72 __be16 real_protocol
= ip_tunnel_parse_protocol(skb
);
73 return real_protocol
&& skb
->protocol
== real_protocol
;
76 static inline void wg_reset_packet(struct sk_buff
*skb
, bool encapsulating
)
78 u8 l4_hash
= skb
->l4_hash
;
79 u8 sw_hash
= skb
->sw_hash
;
81 skb_scrub_packet(skb
, true);
82 memset(&skb
->headers
, 0, sizeof(skb
->headers
));
84 skb
->l4_hash
= l4_hash
;
85 skb
->sw_hash
= sw_hash
;
88 skb
->queue_mapping
= 0;
93 #ifdef CONFIG_NET_SCHED
96 skb_reset_redirect(skb
);
97 skb
->hdr_len
= skb_headroom(skb
);
98 skb_reset_mac_header(skb
);
99 skb_reset_network_header(skb
);
100 skb_reset_transport_header(skb
);
101 skb_probe_transport_header(skb
);
102 skb_reset_inner_headers(skb
);
105 static inline int wg_cpumask_choose_online(int *stored_cpu
, unsigned int id
)
107 unsigned int cpu
= *stored_cpu
, cpu_index
, i
;
109 if (unlikely(cpu
>= nr_cpu_ids
||
110 !cpumask_test_cpu(cpu
, cpu_online_mask
))) {
111 cpu_index
= id
% cpumask_weight(cpu_online_mask
);
112 cpu
= cpumask_first(cpu_online_mask
);
113 for (i
= 0; i
< cpu_index
; ++i
)
114 cpu
= cpumask_next(cpu
, cpu_online_mask
);
120 /* This function is racy, in the sense that it's called while last_cpu is
121 * unlocked, so it could return the same CPU twice. Adding locking or using
122 * atomic sequence numbers is slower though, and the consequences of racing are
123 * harmless, so live with it.
125 static inline int wg_cpumask_next_online(int *last_cpu
)
127 int cpu
= cpumask_next(READ_ONCE(*last_cpu
), cpu_online_mask
);
128 if (cpu
>= nr_cpu_ids
)
129 cpu
= cpumask_first(cpu_online_mask
);
130 WRITE_ONCE(*last_cpu
, cpu
);
134 void wg_prev_queue_init(struct prev_queue
*queue
);
137 bool wg_prev_queue_enqueue(struct prev_queue
*queue
, struct sk_buff
*skb
);
139 /* Single consumer */
140 struct sk_buff
*wg_prev_queue_dequeue(struct prev_queue
*queue
);
142 /* Single consumer */
143 static inline struct sk_buff
*wg_prev_queue_peek(struct prev_queue
*queue
)
146 return queue
->peeked
;
147 queue
->peeked
= wg_prev_queue_dequeue(queue
);
148 return queue
->peeked
;
151 /* Single consumer */
152 static inline void wg_prev_queue_drop_peeked(struct prev_queue
*queue
)
154 queue
->peeked
= NULL
;
157 static inline int wg_queue_enqueue_per_device_and_peer(
158 struct crypt_queue
*device_queue
, struct prev_queue
*peer_queue
,
159 struct sk_buff
*skb
, struct workqueue_struct
*wq
)
163 atomic_set_release(&PACKET_CB(skb
)->state
, PACKET_STATE_UNCRYPTED
);
164 /* We first queue this up for the peer ingestion, but the consumer
165 * will wait for the state to change to CRYPTED or DEAD before.
167 if (unlikely(!wg_prev_queue_enqueue(peer_queue
, skb
)))
170 /* Then we queue it up in the device queue, which consumes the
171 * packet as soon as it can.
173 cpu
= wg_cpumask_next_online(&device_queue
->last_cpu
);
174 if (unlikely(ptr_ring_produce_bh(&device_queue
->ring
, skb
)))
176 queue_work_on(cpu
, wq
, &per_cpu_ptr(device_queue
->worker
, cpu
)->work
);
180 static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff
*skb
, enum packet_state state
)
182 /* We take a reference, because as soon as we call atomic_set, the
183 * peer can be freed from below us.
185 struct wg_peer
*peer
= wg_peer_get(PACKET_PEER(skb
));
187 atomic_set_release(&PACKET_CB(skb
)->state
, state
);
188 queue_work_on(wg_cpumask_choose_online(&peer
->serial_work_cpu
, peer
->internal_id
),
189 peer
->device
->packet_crypt_wq
, &peer
->transmit_packet_work
);
193 static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff
*skb
, enum packet_state state
)
195 /* We take a reference, because as soon as we call atomic_set, the
196 * peer can be freed from below us.
198 struct wg_peer
*peer
= wg_peer_get(PACKET_PEER(skb
));
200 atomic_set_release(&PACKET_CB(skb
)->state
, state
);
201 napi_schedule(&peer
->napi
);
206 bool wg_packet_counter_selftest(void);
209 #endif /* _WG_QUEUEING_H */