2 * xfrm_device.c - IPsec device offloading code.
4 * Copyright (c) 2015 secunet Security Networks AG
7 * Steffen Klassert <steffen.klassert@secunet.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/errno.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
23 #include <linux/notifier.h>
25 #ifdef CONFIG_XFRM_OFFLOAD
26 struct sk_buff
*validate_xmit_xfrm(struct sk_buff
*skb
, netdev_features_t features
, bool *again
)
32 struct softnet_data
*sd
;
33 netdev_features_t esp_features
= features
;
34 struct xfrm_offload
*xo
= xfrm_offload(skb
);
39 if (!(features
& NETIF_F_HW_ESP
))
40 esp_features
= features
& ~(NETIF_F_SG
| NETIF_F_CSUM_MASK
);
42 x
= skb
->sp
->xvec
[skb
->sp
->len
- 1];
43 if (xo
->flags
& XFRM_GRO
|| x
->xso
.flags
& XFRM_OFFLOAD_INBOUND
)
46 local_irq_save(flags
);
47 sd
= this_cpu_ptr(&softnet_data
);
48 err
= !skb_queue_empty(&sd
->xfrm_backlog
);
49 local_irq_restore(flags
);
56 if (skb_is_gso(skb
)) {
57 struct net_device
*dev
= skb
->dev
;
59 if (unlikely(!x
->xso
.offload_handle
|| (x
->xso
.dev
!= dev
))) {
62 /* Packet got rerouted, fixup features and segment it. */
63 esp_features
= esp_features
& ~(NETIF_F_HW_ESP
66 segs
= skb_gso_segment(skb
, esp_features
);
69 atomic_long_inc(&dev
->tx_dropped
);
79 x
->outer_mode
->xmit(x
, skb
);
81 xo
->flags
|= XFRM_DEV_RESUME
;
83 err
= x
->type_offload
->xmit(x
, skb
, esp_features
);
85 if (err
== -EINPROGRESS
)
88 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
93 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
101 struct sk_buff
*nskb
= skb2
->next
;
104 xo
= xfrm_offload(skb2
);
105 xo
->flags
|= XFRM_DEV_RESUME
;
107 x
->outer_mode
->xmit(x
, skb2
);
109 err
= x
->type_offload
->xmit(x
, skb2
, esp_features
);
112 } else if (err
!= -EINPROGRESS
) {
113 XFRM_INC_STATS(xs_net(x
), LINUX_MIB_XFRMOUTSTATEPROTOERROR
);
115 kfree_skb_list(skb2
);
127 skb_push(skb2
, skb2
->data
- skb_mac_header(skb2
));
135 EXPORT_SYMBOL_GPL(validate_xmit_xfrm
);
137 int xfrm_dev_state_add(struct net
*net
, struct xfrm_state
*x
,
138 struct xfrm_user_offload
*xuo
)
141 struct dst_entry
*dst
;
142 struct net_device
*dev
;
143 struct xfrm_state_offload
*xso
= &x
->xso
;
144 xfrm_address_t
*saddr
;
145 xfrm_address_t
*daddr
;
147 if (!x
->type_offload
)
150 /* We don't yet support UDP encapsulation and TFC padding. */
151 if (x
->encap
|| x
->tfcpad
)
154 dev
= dev_get_by_index(net
, xuo
->ifindex
);
156 if (!(xuo
->flags
& XFRM_OFFLOAD_INBOUND
)) {
157 saddr
= &x
->props
.saddr
;
158 daddr
= &x
->id
.daddr
;
160 saddr
= &x
->id
.daddr
;
161 daddr
= &x
->props
.saddr
;
164 dst
= __xfrm_dst_lookup(net
, 0, 0, saddr
, daddr
,
165 x
->props
.family
, x
->props
.output_mark
);
175 if (!dev
->xfrmdev_ops
|| !dev
->xfrmdev_ops
->xdo_dev_state_add
) {
181 if (x
->props
.flags
& XFRM_STATE_ESN
&&
182 !dev
->xfrmdev_ops
->xdo_dev_state_advance_esn
) {
189 xso
->num_exthdrs
= 1;
190 xso
->flags
= xuo
->flags
;
192 err
= dev
->xfrmdev_ops
->xdo_dev_state_add(x
);
201 EXPORT_SYMBOL_GPL(xfrm_dev_state_add
);
203 bool xfrm_dev_offload_ok(struct sk_buff
*skb
, struct xfrm_state
*x
)
206 struct dst_entry
*dst
= skb_dst(skb
);
207 struct xfrm_dst
*xdst
= (struct xfrm_dst
*)dst
;
208 struct net_device
*dev
= x
->xso
.dev
;
210 if (!x
->type_offload
|| x
->encap
)
213 if ((!dev
|| (x
->xso
.offload_handle
&& (dev
== xfrm_dst_path(dst
)->dev
))) &&
214 (!xdst
->child
->xfrm
&& x
->type
->get_mtu
)) {
215 mtu
= x
->type
->get_mtu(x
, xdst
->child_mtu_cached
);
220 if (skb_is_gso(skb
) && skb_gso_validate_network_len(skb
, mtu
))
227 if (dev
&& dev
->xfrmdev_ops
&& dev
->xfrmdev_ops
->xdo_dev_offload_ok
)
228 return x
->xso
.dev
->xfrmdev_ops
->xdo_dev_offload_ok(skb
, x
);
232 EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok
);
234 void xfrm_dev_resume(struct sk_buff
*skb
)
236 struct net_device
*dev
= skb
->dev
;
237 int ret
= NETDEV_TX_BUSY
;
238 struct netdev_queue
*txq
;
239 struct softnet_data
*sd
;
243 txq
= netdev_pick_tx(dev
, skb
, NULL
);
245 HARD_TX_LOCK(dev
, txq
, smp_processor_id());
246 if (!netif_xmit_frozen_or_stopped(txq
))
247 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &ret
);
248 HARD_TX_UNLOCK(dev
, txq
);
250 if (!dev_xmit_complete(ret
)) {
251 local_irq_save(flags
);
252 sd
= this_cpu_ptr(&softnet_data
);
253 skb_queue_tail(&sd
->xfrm_backlog
, skb
);
254 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
255 local_irq_restore(flags
);
259 EXPORT_SYMBOL_GPL(xfrm_dev_resume
);
261 void xfrm_dev_backlog(struct softnet_data
*sd
)
263 struct sk_buff_head
*xfrm_backlog
= &sd
->xfrm_backlog
;
264 struct sk_buff_head list
;
267 if (skb_queue_empty(xfrm_backlog
))
270 __skb_queue_head_init(&list
);
272 spin_lock(&xfrm_backlog
->lock
);
273 skb_queue_splice_init(xfrm_backlog
, &list
);
274 spin_unlock(&xfrm_backlog
->lock
);
276 while (!skb_queue_empty(&list
)) {
277 skb
= __skb_dequeue(&list
);
278 xfrm_dev_resume(skb
);
284 static int xfrm_api_check(struct net_device
*dev
)
286 #ifdef CONFIG_XFRM_OFFLOAD
287 if ((dev
->features
& NETIF_F_HW_ESP_TX_CSUM
) &&
288 !(dev
->features
& NETIF_F_HW_ESP
))
291 if ((dev
->features
& NETIF_F_HW_ESP
) &&
292 (!(dev
->xfrmdev_ops
&&
293 dev
->xfrmdev_ops
->xdo_dev_state_add
&&
294 dev
->xfrmdev_ops
->xdo_dev_state_delete
)))
297 if (dev
->features
& (NETIF_F_HW_ESP
| NETIF_F_HW_ESP_TX_CSUM
))
304 static int xfrm_dev_register(struct net_device
*dev
)
306 return xfrm_api_check(dev
);
309 static int xfrm_dev_unregister(struct net_device
*dev
)
311 xfrm_policy_cache_flush();
315 static int xfrm_dev_feat_change(struct net_device
*dev
)
317 return xfrm_api_check(dev
);
320 static int xfrm_dev_down(struct net_device
*dev
)
322 if (dev
->features
& NETIF_F_HW_ESP
)
323 xfrm_dev_state_flush(dev_net(dev
), dev
, true);
325 xfrm_policy_cache_flush();
329 static int xfrm_dev_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
331 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
334 case NETDEV_REGISTER
:
335 return xfrm_dev_register(dev
);
337 case NETDEV_UNREGISTER
:
338 return xfrm_dev_unregister(dev
);
340 case NETDEV_FEAT_CHANGE
:
341 return xfrm_dev_feat_change(dev
);
344 return xfrm_dev_down(dev
);
349 static struct notifier_block xfrm_dev_notifier
= {
350 .notifier_call
= xfrm_dev_event
,
353 void __init
xfrm_dev_init(void)
355 register_netdevice_notifier(&xfrm_dev_notifier
);