Linux 5.7.7
[linux/fpc-iii.git] / net / xfrm / xfrm_device.c
blob626096bd0d294bed72386e2dd385581b33deef89
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * xfrm_device.c - IPsec device offloading code.
5 * Copyright (c) 2015 secunet Security Networks AG
7 * Author:
8 * Steffen Klassert <steffen.klassert@secunet.com>
9 */
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <net/dst.h>
18 #include <net/xfrm.h>
19 #include <linux/notifier.h>
21 #ifdef CONFIG_XFRM_OFFLOAD
22 static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23 unsigned int hsize)
25 struct xfrm_offload *xo = xfrm_offload(skb);
27 skb_reset_mac_len(skb);
28 if (xo->flags & XFRM_GSO_SEGMENT)
29 skb->transport_header -= x->props.header_len;
31 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35 unsigned int hsize)
38 struct xfrm_offload *xo = xfrm_offload(skb);
40 if (xo->flags & XFRM_GSO_SEGMENT)
41 skb->transport_header = skb->network_header + hsize;
43 skb_reset_mac_len(skb);
44 pskb_pull(skb, skb->mac_len + x->props.header_len);
47 static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
48 unsigned int hsize)
50 struct xfrm_offload *xo = xfrm_offload(skb);
51 int phlen = 0;
53 if (xo->flags & XFRM_GSO_SEGMENT)
54 skb->transport_header = skb->network_header + hsize;
56 skb_reset_mac_len(skb);
57 if (x->sel.family != AF_INET6) {
58 phlen = IPV4_BEET_PHMAXLEN;
59 if (x->outer_mode.family == AF_INET6)
60 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
63 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
66 /* Adjust pointers into the packet when IPsec is done at layer2 */
67 static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
69 switch (x->outer_mode.encap) {
70 case XFRM_MODE_TUNNEL:
71 if (x->outer_mode.family == AF_INET)
72 return __xfrm_mode_tunnel_prep(x, skb,
73 sizeof(struct iphdr));
74 if (x->outer_mode.family == AF_INET6)
75 return __xfrm_mode_tunnel_prep(x, skb,
76 sizeof(struct ipv6hdr));
77 break;
78 case XFRM_MODE_TRANSPORT:
79 if (x->outer_mode.family == AF_INET)
80 return __xfrm_transport_prep(x, skb,
81 sizeof(struct iphdr));
82 if (x->outer_mode.family == AF_INET6)
83 return __xfrm_transport_prep(x, skb,
84 sizeof(struct ipv6hdr));
85 break;
86 case XFRM_MODE_BEET:
87 if (x->outer_mode.family == AF_INET)
88 return __xfrm_mode_beet_prep(x, skb,
89 sizeof(struct iphdr));
90 if (x->outer_mode.family == AF_INET6)
91 return __xfrm_mode_beet_prep(x, skb,
92 sizeof(struct ipv6hdr));
93 break;
94 case XFRM_MODE_ROUTEOPTIMIZATION:
95 case XFRM_MODE_IN_TRIGGER:
96 break;
100 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
102 int err;
103 unsigned long flags;
104 struct xfrm_state *x;
105 struct softnet_data *sd;
106 struct sk_buff *skb2, *nskb, *pskb = NULL;
107 netdev_features_t esp_features = features;
108 struct xfrm_offload *xo = xfrm_offload(skb);
109 struct sec_path *sp;
111 if (!xo || (xo->flags & XFRM_XMIT))
112 return skb;
114 if (!(features & NETIF_F_HW_ESP))
115 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
117 sp = skb_sec_path(skb);
118 x = sp->xvec[sp->len - 1];
119 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
120 return skb;
122 local_irq_save(flags);
123 sd = this_cpu_ptr(&softnet_data);
124 err = !skb_queue_empty(&sd->xfrm_backlog);
125 local_irq_restore(flags);
127 if (err) {
128 *again = true;
129 return skb;
132 xo->flags |= XFRM_XMIT;
134 if (skb_is_gso(skb)) {
135 struct net_device *dev = skb->dev;
137 if (unlikely(x->xso.dev != dev)) {
138 struct sk_buff *segs;
140 /* Packet got rerouted, fixup features and segment it. */
141 esp_features = esp_features & ~(NETIF_F_HW_ESP
142 | NETIF_F_GSO_ESP);
144 segs = skb_gso_segment(skb, esp_features);
145 if (IS_ERR(segs)) {
146 kfree_skb(skb);
147 atomic_long_inc(&dev->tx_dropped);
148 return NULL;
149 } else {
150 consume_skb(skb);
151 skb = segs;
156 if (!skb->next) {
157 esp_features |= skb->dev->gso_partial_features;
158 xfrm_outer_mode_prep(x, skb);
160 xo->flags |= XFRM_DEV_RESUME;
162 err = x->type_offload->xmit(x, skb, esp_features);
163 if (err) {
164 if (err == -EINPROGRESS)
165 return NULL;
167 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
168 kfree_skb(skb);
169 return NULL;
172 skb_push(skb, skb->data - skb_mac_header(skb));
174 return skb;
177 skb_list_walk_safe(skb, skb2, nskb) {
178 esp_features |= skb->dev->gso_partial_features;
179 skb_mark_not_on_list(skb2);
181 xo = xfrm_offload(skb2);
182 xo->flags |= XFRM_DEV_RESUME;
184 xfrm_outer_mode_prep(x, skb2);
186 err = x->type_offload->xmit(x, skb2, esp_features);
187 if (!err) {
188 skb2->next = nskb;
189 } else if (err != -EINPROGRESS) {
190 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
191 skb2->next = nskb;
192 kfree_skb_list(skb2);
193 return NULL;
194 } else {
195 if (skb == skb2)
196 skb = nskb;
197 else
198 pskb->next = nskb;
200 continue;
203 skb_push(skb2, skb2->data - skb_mac_header(skb2));
204 pskb = skb2;
207 return skb;
209 EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
211 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
212 struct xfrm_user_offload *xuo)
214 int err;
215 struct dst_entry *dst;
216 struct net_device *dev;
217 struct xfrm_state_offload *xso = &x->xso;
218 xfrm_address_t *saddr;
219 xfrm_address_t *daddr;
221 if (!x->type_offload)
222 return -EINVAL;
224 /* We don't yet support UDP encapsulation and TFC padding. */
225 if (x->encap || x->tfcpad)
226 return -EINVAL;
228 dev = dev_get_by_index(net, xuo->ifindex);
229 if (!dev) {
230 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
231 saddr = &x->props.saddr;
232 daddr = &x->id.daddr;
233 } else {
234 saddr = &x->id.daddr;
235 daddr = &x->props.saddr;
238 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
239 x->props.family,
240 xfrm_smark_get(0, x));
241 if (IS_ERR(dst))
242 return 0;
244 dev = dst->dev;
246 dev_hold(dev);
247 dst_release(dst);
250 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
251 xso->dev = NULL;
252 dev_put(dev);
253 return 0;
256 if (x->props.flags & XFRM_STATE_ESN &&
257 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
258 xso->dev = NULL;
259 dev_put(dev);
260 return -EINVAL;
263 xso->dev = dev;
264 xso->num_exthdrs = 1;
265 xso->flags = xuo->flags;
267 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
268 if (err) {
269 xso->num_exthdrs = 0;
270 xso->flags = 0;
271 xso->dev = NULL;
272 dev_put(dev);
274 if (err != -EOPNOTSUPP)
275 return err;
278 return 0;
280 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
282 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
284 int mtu;
285 struct dst_entry *dst = skb_dst(skb);
286 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
287 struct net_device *dev = x->xso.dev;
289 if (!x->type_offload || x->encap)
290 return false;
292 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
293 (!xdst->child->xfrm)) {
294 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
295 if (skb->len <= mtu)
296 goto ok;
298 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
299 goto ok;
302 return false;
305 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
306 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
308 return true;
310 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
312 void xfrm_dev_resume(struct sk_buff *skb)
314 struct net_device *dev = skb->dev;
315 int ret = NETDEV_TX_BUSY;
316 struct netdev_queue *txq;
317 struct softnet_data *sd;
318 unsigned long flags;
320 rcu_read_lock();
321 txq = netdev_core_pick_tx(dev, skb, NULL);
323 HARD_TX_LOCK(dev, txq, smp_processor_id());
324 if (!netif_xmit_frozen_or_stopped(txq))
325 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
326 HARD_TX_UNLOCK(dev, txq);
328 if (!dev_xmit_complete(ret)) {
329 local_irq_save(flags);
330 sd = this_cpu_ptr(&softnet_data);
331 skb_queue_tail(&sd->xfrm_backlog, skb);
332 raise_softirq_irqoff(NET_TX_SOFTIRQ);
333 local_irq_restore(flags);
335 rcu_read_unlock();
337 EXPORT_SYMBOL_GPL(xfrm_dev_resume);
339 void xfrm_dev_backlog(struct softnet_data *sd)
341 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
342 struct sk_buff_head list;
343 struct sk_buff *skb;
345 if (skb_queue_empty(xfrm_backlog))
346 return;
348 __skb_queue_head_init(&list);
350 spin_lock(&xfrm_backlog->lock);
351 skb_queue_splice_init(xfrm_backlog, &list);
352 spin_unlock(&xfrm_backlog->lock);
354 while (!skb_queue_empty(&list)) {
355 skb = __skb_dequeue(&list);
356 xfrm_dev_resume(skb);
360 #endif
362 static int xfrm_api_check(struct net_device *dev)
364 #ifdef CONFIG_XFRM_OFFLOAD
365 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
366 !(dev->features & NETIF_F_HW_ESP))
367 return NOTIFY_BAD;
369 if ((dev->features & NETIF_F_HW_ESP) &&
370 (!(dev->xfrmdev_ops &&
371 dev->xfrmdev_ops->xdo_dev_state_add &&
372 dev->xfrmdev_ops->xdo_dev_state_delete)))
373 return NOTIFY_BAD;
374 #else
375 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
376 return NOTIFY_BAD;
377 #endif
379 return NOTIFY_DONE;
382 static int xfrm_dev_register(struct net_device *dev)
384 return xfrm_api_check(dev);
387 static int xfrm_dev_feat_change(struct net_device *dev)
389 return xfrm_api_check(dev);
392 static int xfrm_dev_down(struct net_device *dev)
394 if (dev->features & NETIF_F_HW_ESP)
395 xfrm_dev_state_flush(dev_net(dev), dev, true);
397 return NOTIFY_DONE;
400 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
402 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
404 switch (event) {
405 case NETDEV_REGISTER:
406 return xfrm_dev_register(dev);
408 case NETDEV_FEAT_CHANGE:
409 return xfrm_dev_feat_change(dev);
411 case NETDEV_DOWN:
412 case NETDEV_UNREGISTER:
413 return xfrm_dev_down(dev);
415 return NOTIFY_DONE;
418 static struct notifier_block xfrm_dev_notifier = {
419 .notifier_call = xfrm_dev_event,
422 void __init xfrm_dev_init(void)
424 register_netdevice_notifier(&xfrm_dev_notifier);