treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / wireguard / queueing.h
blobfecb559cbdb6ea53412409a3c25f2eef259abefd
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 */
6 #ifndef _WG_QUEUEING_H
7 #define _WG_QUEUEING_H
9 #include "peer.h"
10 #include <linux/types.h>
11 #include <linux/skbuff.h>
12 #include <linux/ip.h>
13 #include <linux/ipv6.h>
15 struct wg_device;
16 struct wg_peer;
17 struct multicore_worker;
18 struct crypt_queue;
19 struct sk_buff;
21 /* queueing.c APIs: */
22 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
23 bool multicore, unsigned int len);
24 void wg_packet_queue_free(struct crypt_queue *queue, bool multicore);
25 struct multicore_worker __percpu *
26 wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
28 /* receive.c APIs: */
29 void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
30 void wg_packet_handshake_receive_worker(struct work_struct *work);
31 /* NAPI poll function: */
32 int wg_packet_rx_poll(struct napi_struct *napi, int budget);
33 /* Workqueue worker: */
34 void wg_packet_decrypt_worker(struct work_struct *work);
36 /* send.c APIs: */
37 void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
38 bool is_retry);
39 void wg_packet_send_handshake_response(struct wg_peer *peer);
40 void wg_packet_send_handshake_cookie(struct wg_device *wg,
41 struct sk_buff *initiating_skb,
42 __le32 sender_index);
43 void wg_packet_send_keepalive(struct wg_peer *peer);
44 void wg_packet_purge_staged_packets(struct wg_peer *peer);
45 void wg_packet_send_staged_packets(struct wg_peer *peer);
46 /* Workqueue workers: */
47 void wg_packet_handshake_send_worker(struct work_struct *work);
48 void wg_packet_tx_worker(struct work_struct *work);
49 void wg_packet_encrypt_worker(struct work_struct *work);
51 enum packet_state {
52 PACKET_STATE_UNCRYPTED,
53 PACKET_STATE_CRYPTED,
54 PACKET_STATE_DEAD
57 struct packet_cb {
58 u64 nonce;
59 struct noise_keypair *keypair;
60 atomic_t state;
61 u32 mtu;
62 u8 ds;
65 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
66 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
68 /* Returns either the correct skb->protocol value, or 0 if invalid. */
69 static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb)
71 if (skb_network_header(skb) >= skb->head &&
72 (skb_network_header(skb) + sizeof(struct iphdr)) <=
73 skb_tail_pointer(skb) &&
74 ip_hdr(skb)->version == 4)
75 return htons(ETH_P_IP);
76 if (skb_network_header(skb) >= skb->head &&
77 (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
78 skb_tail_pointer(skb) &&
79 ipv6_hdr(skb)->version == 6)
80 return htons(ETH_P_IPV6);
81 return 0;
84 static inline void wg_reset_packet(struct sk_buff *skb)
86 skb_scrub_packet(skb, true);
87 memset(&skb->headers_start, 0,
88 offsetof(struct sk_buff, headers_end) -
89 offsetof(struct sk_buff, headers_start));
90 skb->queue_mapping = 0;
91 skb->nohdr = 0;
92 skb->peeked = 0;
93 skb->mac_len = 0;
94 skb->dev = NULL;
95 #ifdef CONFIG_NET_SCHED
96 skb->tc_index = 0;
97 skb_reset_tc(skb);
98 #endif
99 skb->hdr_len = skb_headroom(skb);
100 skb_reset_mac_header(skb);
101 skb_reset_network_header(skb);
102 skb_reset_transport_header(skb);
103 skb_probe_transport_header(skb);
104 skb_reset_inner_headers(skb);
107 static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
109 unsigned int cpu = *stored_cpu, cpu_index, i;
111 if (unlikely(cpu == nr_cpumask_bits ||
112 !cpumask_test_cpu(cpu, cpu_online_mask))) {
113 cpu_index = id % cpumask_weight(cpu_online_mask);
114 cpu = cpumask_first(cpu_online_mask);
115 for (i = 0; i < cpu_index; ++i)
116 cpu = cpumask_next(cpu, cpu_online_mask);
117 *stored_cpu = cpu;
119 return cpu;
122 /* This function is racy, in the sense that next is unlocked, so it could return
123 * the same CPU twice. A race-free version of this would be to instead store an
124 * atomic sequence number, do an increment-and-return, and then iterate through
125 * every possible CPU until we get to that index -- choose_cpu. However that's
126 * a bit slower, and it doesn't seem like this potential race actually
127 * introduces any performance loss, so we live with it.
129 static inline int wg_cpumask_next_online(int *next)
131 int cpu = *next;
133 while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
134 cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
135 *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
136 return cpu;
139 static inline int wg_queue_enqueue_per_device_and_peer(
140 struct crypt_queue *device_queue, struct crypt_queue *peer_queue,
141 struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
143 int cpu;
145 atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
146 /* We first queue this up for the peer ingestion, but the consumer
147 * will wait for the state to change to CRYPTED or DEAD before.
149 if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb)))
150 return -ENOSPC;
151 /* Then we queue it up in the device queue, which consumes the
152 * packet as soon as it can.
154 cpu = wg_cpumask_next_online(next_cpu);
155 if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
156 return -EPIPE;
157 queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
158 return 0;
161 static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue,
162 struct sk_buff *skb,
163 enum packet_state state)
165 /* We take a reference, because as soon as we call atomic_set, the
166 * peer can be freed from below us.
168 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
170 atomic_set_release(&PACKET_CB(skb)->state, state);
171 queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu,
172 peer->internal_id),
173 peer->device->packet_crypt_wq, &queue->work);
174 wg_peer_put(peer);
177 static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb,
178 enum packet_state state)
180 /* We take a reference, because as soon as we call atomic_set, the
181 * peer can be freed from below us.
183 struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
185 atomic_set_release(&PACKET_CB(skb)->state, state);
186 napi_schedule(&peer->napi);
187 wg_peer_put(peer);
190 #ifdef DEBUG
191 bool wg_packet_counter_selftest(void);
192 #endif
194 #endif /* _WG_QUEUEING_H */