1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * xfrm_device.c - IPsec device offloading code.
5 * Copyright (c) 2015 secunet Security Networks AG
8 * Steffen Klassert <steffen.klassert@secunet.com>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
19 #include <linux/notifier.h>
21 #ifdef CONFIG_XFRM_OFFLOAD
22 static void __xfrm_transport_prep(struct xfrm_state
*x
, struct sk_buff
*skb
,
25 struct xfrm_offload
*xo
= xfrm_offload(skb
);
27 skb_reset_mac_len(skb
);
28 if (xo
->flags
& XFRM_GSO_SEGMENT
)
29 skb
->transport_header
-= x
->props
.header_len
;
31 pskb_pull(skb
, skb_transport_offset(skb
) + x
->props
.header_len
);
34 static void __xfrm_mode_tunnel_prep(struct xfrm_state
*x
, struct sk_buff
*skb
,
38 struct xfrm_offload
*xo
= xfrm_offload(skb
);
40 if (xo
->flags
& XFRM_GSO_SEGMENT
)
41 skb
->transport_header
= skb
->network_header
+ hsize
;
43 skb_reset_mac_len(skb
);
44 pskb_pull(skb
, skb
->mac_len
+ x
->props
.header_len
);
47 static void __xfrm_mode_beet_prep(struct xfrm_state
*x
, struct sk_buff
*skb
,
50 struct xfrm_offload
*xo
= xfrm_offload(skb
);
53 if (xo
->flags
& XFRM_GSO_SEGMENT
)
54 skb
->transport_header
= skb
->network_header
+ hsize
;
56 skb_reset_mac_len(skb
);
57 if (x
->sel
.family
!= AF_INET6
) {
58 phlen
= IPV4_BEET_PHMAXLEN
;
59 if (x
->outer_mode
.family
== AF_INET6
)
60 phlen
+= sizeof(struct ipv6hdr
) - sizeof(struct iphdr
);
63 pskb_pull(skb
, skb
->mac_len
+ hsize
+ (x
->props
.header_len
- phlen
));
66 /* Adjust pointers into the packet when IPsec is done at layer2 */
67 static void xfrm_outer_mode_prep(struct xfrm_state
*x
, struct sk_buff
*skb
)
69 switch (x
->outer_mode
.encap
) {
70 case XFRM_MODE_TUNNEL
:
71 if (x
->outer_mode
.family
== AF_INET
)
72 return __xfrm_mode_tunnel_prep(x
, skb
,
73 sizeof(struct iphdr
));
74 if (x
->outer_mode
.family
== AF_INET6
)
75 return __xfrm_mode_tunnel_prep(x
, skb
,
76 sizeof(struct ipv6hdr
));
78 case XFRM_MODE_TRANSPORT
:
79 if (x
->outer_mode
.family
== AF_INET
)
80 return __xfrm_transport_prep(x
, skb
,
81 sizeof(struct iphdr
));
82 if (x
->outer_mode
.family
== AF_INET6
)
83 return __xfrm_transport_prep(x
, skb
,
84 sizeof(struct ipv6hdr
));
87 if (x
->outer_mode
.family
== AF_INET
)
88 return __xfrm_mode_beet_prep(x
, skb
,
89 sizeof(struct iphdr
));
90 if (x
->outer_mode
.family
== AF_INET6
)
91 return __xfrm_mode_beet_prep(x
, skb
,
92 sizeof(struct ipv6hdr
));
94 case XFRM_MODE_ROUTEOPTIMIZATION
:
95 case XFRM_MODE_IN_TRIGGER
:
100 struct sk_buff
*validate_xmit_xfrm(struct sk_buff
*skb
, netdev_features_t features
, bool *again
)
104 struct xfrm_state
*x
;
105 struct softnet_data
*sd
;
106 struct sk_buff
*skb2
, *nskb
, *pskb
= NULL
;
107 netdev_features_t esp_features
= features
;
108 struct xfrm_offload
*xo
= xfrm_offload(skb
);
111 if (!xo
|| (xo
->flags
& XFRM_XMIT
))
114 if (!(features
& NETIF_F_HW_ESP
))
115 esp_features
= features
& ~(NETIF_F_SG
| NETIF_F_CSUM_MASK
);
117 sp
= skb_sec_path(skb
);
118 x
= sp
->xvec
[sp
->len
- 1];
119 if (xo
->flags
& XFRM_GRO
|| x
->xso
.flags
& XFRM_OFFLOAD_INBOUND
)
122 local_irq_save(flags
);
123 sd
= this_cpu_ptr(&softnet_data
);
124 err
= !skb_queue_empty(&sd
->xfrm_backlog
);
125 local_irq_restore(flags
);
132 xo
->flags
|= XFRM_XMIT
;
134 if (skb_is_gso(skb
)) {
135 struct net_device
*dev
= skb
->dev
;
137 if (unlikely(x
->xso
.dev
!= dev
)) {
138 struct sk_buff
*segs
;
140 /* Packet got rerouted, fixup features and segment it. */
141 esp_features
= esp_features
& ~(NETIF_F_HW_ESP
144 segs
= skb_gso_segment(skb
, esp_features
);
147 atomic_long_inc(&dev
->tx_dropped
);
157 esp_features
|= skb
->dev
->gso_partial_features
;
158 xfrm_outer_mode_prep(x
, skb
);
160 xo
->flags
|= XFRM_DEV_RESUME
;
162 err
= x
->type_offload
->xmit(x
, skb
, esp_features
);
164 if (err
== -EINPROGRESS
)
167 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
172 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
177 skb_list_walk_safe(skb
, skb2
, nskb
) {
178 esp_features
|= skb
->dev
->gso_partial_features
;
179 skb_mark_not_on_list(skb2
);
181 xo
= xfrm_offload(skb2
);
182 xo
->flags
|= XFRM_DEV_RESUME
;
184 xfrm_outer_mode_prep(x
, skb2
);
186 err
= x
->type_offload
->xmit(x
, skb2
, esp_features
);
189 } else if (err
!= -EINPROGRESS
) {
190 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
192 kfree_skb_list(skb2
);
203 skb_push(skb2
, skb2
->data
- skb_mac_header(skb2
));
209 EXPORT_SYMBOL_GPL(validate_xmit_xfrm
);
211 int xfrm_dev_state_add(struct net
*net
, struct xfrm_state
*x
,
212 struct xfrm_user_offload
*xuo
)
215 struct dst_entry
*dst
;
216 struct net_device
*dev
;
217 struct xfrm_state_offload
*xso
= &x
->xso
;
218 xfrm_address_t
*saddr
;
219 xfrm_address_t
*daddr
;
221 if (!x
->type_offload
)
224 /* We don't yet support UDP encapsulation and TFC padding. */
225 if (x
->encap
|| x
->tfcpad
)
228 dev
= dev_get_by_index(net
, xuo
->ifindex
);
230 if (!(xuo
->flags
& XFRM_OFFLOAD_INBOUND
)) {
231 saddr
= &x
->props
.saddr
;
232 daddr
= &x
->id
.daddr
;
234 saddr
= &x
->id
.daddr
;
235 daddr
= &x
->props
.saddr
;
238 dst
= __xfrm_dst_lookup(net
, 0, 0, saddr
, daddr
,
240 xfrm_smark_get(0, x
));
250 if (!dev
->xfrmdev_ops
|| !dev
->xfrmdev_ops
->xdo_dev_state_add
) {
256 if (x
->props
.flags
& XFRM_STATE_ESN
&&
257 !dev
->xfrmdev_ops
->xdo_dev_state_advance_esn
) {
264 xso
->num_exthdrs
= 1;
265 xso
->flags
= xuo
->flags
;
267 err
= dev
->xfrmdev_ops
->xdo_dev_state_add(x
);
269 xso
->num_exthdrs
= 0;
274 if (err
!= -EOPNOTSUPP
)
280 EXPORT_SYMBOL_GPL(xfrm_dev_state_add
);
282 bool xfrm_dev_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*x
)
285 struct dst_entry
*dst
= skb_dst(skb
);
286 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
287 struct net_device
*dev
= x
->xso
.dev
;
289 if (!x
->type_offload
|| x
->encap
)
292 if ((!dev
|| (dev
== xfrm_dst_path(dst
)->dev
)) &&
293 (!xdst
->child
->xfrm
)) {
294 mtu
= xfrm_state_mtu(x
, xdst
->child_mtu_cached
);
298 if (skb_is_gso(skb
) && skb_gso_validate_network_len(skb
, mtu
))
305 if (dev
&& dev
->xfrmdev_ops
&& dev
->xfrmdev_ops
->xdo_dev_offload_ok
)
306 return x
->xso
.dev
->xfrmdev_ops
->xdo_dev_offload_ok(skb
, x
);
310 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok
);
312 void xfrm_dev_resume(struct sk_buff
*skb
)
314 struct net_device
*dev
= skb
->dev
;
315 int ret
= NETDEV_TX_BUSY
;
316 struct netdev_queue
*txq
;
317 struct softnet_data
*sd
;
321 txq
= netdev_core_pick_tx(dev
, skb
, NULL
);
323 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
324 if (!netif_xmit_frozen_or_stopped(txq
))
325 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &ret
);
326 HARD_TX_UNLOCK(dev
, txq
);
328 if (!dev_xmit_complete(ret
)) {
329 local_irq_save(flags
);
330 sd
= this_cpu_ptr(&softnet_data
);
331 skb_queue_tail(&sd
->xfrm_backlog
, skb
);
332 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
333 local_irq_restore(flags
);
337 EXPORT_SYMBOL_GPL(xfrm_dev_resume
);
339 void xfrm_dev_backlog(struct softnet_data
*sd
)
341 struct sk_buff_head
*xfrm_backlog
= &sd
->xfrm_backlog
;
342 struct sk_buff_head list
;
345 if (skb_queue_empty(xfrm_backlog
))
348 __skb_queue_head_init(&list
);
350 spin_lock(&xfrm_backlog
->lock
);
351 skb_queue_splice_init(xfrm_backlog
, &list
);
352 spin_unlock(&xfrm_backlog
->lock
);
354 while (!skb_queue_empty(&list
)) {
355 skb
= __skb_dequeue(&list
);
356 xfrm_dev_resume(skb
);
362 static int xfrm_api_check(struct net_device
*dev
)
364 #ifdef CONFIG_XFRM_OFFLOAD
365 if ((dev
->features
& NETIF_F_HW_ESP_TX_CSUM
) &&
366 !(dev
->features
& NETIF_F_HW_ESP
))
369 if ((dev
->features
& NETIF_F_HW_ESP
) &&
370 (!(dev
->xfrmdev_ops
&&
371 dev
->xfrmdev_ops
->xdo_dev_state_add
&&
372 dev
->xfrmdev_ops
->xdo_dev_state_delete
)))
375 if (dev
->features
& (NETIF_F_HW_ESP
| NETIF_F_HW_ESP_TX_CSUM
))
382 static int xfrm_dev_register(struct net_device
*dev
)
384 return xfrm_api_check(dev
);
387 static int xfrm_dev_feat_change(struct net_device
*dev
)
389 return xfrm_api_check(dev
);
392 static int xfrm_dev_down(struct net_device
*dev
)
394 if (dev
->features
& NETIF_F_HW_ESP
)
395 xfrm_dev_state_flush(dev_net(dev
), dev
, true);
400 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
402 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
405 case NETDEV_REGISTER
:
406 return xfrm_dev_register(dev
);
408 case NETDEV_FEAT_CHANGE
:
409 return xfrm_dev_feat_change(dev
);
412 case NETDEV_UNREGISTER
:
413 return xfrm_dev_down(dev
);
418 static struct notifier_block xfrm_dev_notifier
= {
419 .notifier_call
= xfrm_dev_event
,
422 void __init
xfrm_dev_init(void)
424 register_netdevice_notifier(&xfrm_dev_notifier
);