1 // SPDX-License-Identifier: GPL-2.0-only
3 * IPV6 GSO/GRO offload support
4 * Linux INET implementation
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <net/ip6_route.h>
30 #include <linux/icmpv6.h>
32 static __u16
esp6_nexthdr_esp_offset(struct ipv6hdr
*ipv6_hdr
, int nhlen
)
34 int off
= sizeof(struct ipv6hdr
);
35 struct ipv6_opt_hdr
*exthdr
;
38 if (likely(ipv6_hdr
->nexthdr
== NEXTHDR_ESP
||
39 ipv6_hdr
->nexthdr
== NEXTHDR_UDP
))
40 return offsetof(struct ipv6hdr
, nexthdr
);
43 exthdr
= (void *)ipv6_hdr
+ off
;
44 if (exthdr
->nexthdr
== NEXTHDR_ESP
)
47 off
+= ipv6_optlen(exthdr
);
53 static struct sk_buff
*esp6_gro_receive(struct list_head
*head
,
56 int offset
= skb_gro_offset(skb
);
57 struct xfrm_offload
*xo
;
64 if (NAPI_GRO_CB(skb
)->proto
== IPPROTO_UDP
)
65 encap_type
= UDP_ENCAP_ESPINUDP
;
67 if (!pskb_pull(skb
, offset
))
70 if (xfrm_parse_spi(skb
, IPPROTO_ESP
, &spi
, &seq
) != 0)
73 xo
= xfrm_offload(skb
);
74 if (!xo
|| !(xo
->flags
& CRYPTO_DONE
)) {
75 struct sec_path
*sp
= secpath_set(skb
);
80 if (sp
->len
== XFRM_MAX_DEPTH
)
83 x
= xfrm_input_state_lookup(dev_net(skb
->dev
), skb
->mark
,
84 (xfrm_address_t
*)&ipv6_hdr(skb
)->daddr
,
85 spi
, IPPROTO_ESP
, AF_INET6
);
87 if (unlikely(x
&& x
->dir
&& x
->dir
!= XFRM_SA_DIR_IN
)) {
88 /* non-offload path will record the error and audit log */
96 skb
->mark
= xfrm_smark_get(skb
->mark
, x
);
98 sp
->xvec
[sp
->len
++] = x
;
101 xo
= xfrm_offload(skb
);
106 xo
->flags
|= XFRM_GRO
;
108 nhoff
= esp6_nexthdr_esp_offset(ipv6_hdr(skb
), offset
);
112 IP6CB(skb
)->nhoff
= nhoff
;
113 XFRM_TUNNEL_SKB_CB(skb
)->tunnel
.ip6
= NULL
;
114 XFRM_SPI_SKB_CB(skb
)->family
= AF_INET6
;
115 XFRM_SPI_SKB_CB(skb
)->daddroff
= offsetof(struct ipv6hdr
, daddr
);
116 XFRM_SPI_SKB_CB(skb
)->seq
= seq
;
118 /* We don't need to handle errors from xfrm_input, it does all
119 * the error handling and frees the resources on error. */
120 xfrm_input(skb
, IPPROTO_ESP
, spi
, encap_type
);
122 return ERR_PTR(-EINPROGRESS
);
126 skb_push(skb
, offset
);
127 NAPI_GRO_CB(skb
)->same_flow
= 0;
128 NAPI_GRO_CB(skb
)->flush
= 1;
133 static void esp6_gso_encap(struct xfrm_state
*x
, struct sk_buff
*skb
)
135 struct ip_esp_hdr
*esph
;
136 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
137 struct xfrm_offload
*xo
= xfrm_offload(skb
);
138 u8 proto
= iph
->nexthdr
;
140 skb_push(skb
, -skb_network_offset(skb
));
142 if (x
->outer_mode
.encap
== XFRM_MODE_TRANSPORT
) {
145 ipv6_skip_exthdr(skb
, sizeof(struct ipv6hdr
), &proto
, &frag
);
148 esph
= ip_esp_hdr(skb
);
149 *skb_mac_header(skb
) = IPPROTO_ESP
;
151 esph
->spi
= x
->id
.spi
;
152 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
157 static struct sk_buff
*xfrm6_tunnel_gso_segment(struct xfrm_state
*x
,
159 netdev_features_t features
)
161 __be16 type
= x
->inner_mode
.family
== AF_INET
? htons(ETH_P_IP
)
164 return skb_eth_gso_segment(skb
, features
, type
);
167 static struct sk_buff
*xfrm6_transport_gso_segment(struct xfrm_state
*x
,
169 netdev_features_t features
)
171 const struct net_offload
*ops
;
172 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
173 struct xfrm_offload
*xo
= xfrm_offload(skb
);
175 skb
->transport_header
+= x
->props
.header_len
;
176 ops
= rcu_dereference(inet6_offloads
[xo
->proto
]);
177 if (likely(ops
&& ops
->callbacks
.gso_segment
))
178 segs
= ops
->callbacks
.gso_segment(skb
, features
);
183 static struct sk_buff
*xfrm6_beet_gso_segment(struct xfrm_state
*x
,
185 netdev_features_t features
)
187 struct xfrm_offload
*xo
= xfrm_offload(skb
);
188 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
189 const struct net_offload
*ops
;
190 u8 proto
= xo
->proto
;
192 skb
->transport_header
+= x
->props
.header_len
;
194 if (x
->sel
.family
!= AF_INET6
) {
195 skb
->transport_header
-=
196 (sizeof(struct ipv6hdr
) - sizeof(struct iphdr
));
198 if (proto
== IPPROTO_BEETPH
) {
199 struct ip_beet_phdr
*ph
=
200 (struct ip_beet_phdr
*)skb
->data
;
202 skb
->transport_header
+= ph
->hdrlen
* 8;
205 skb
->transport_header
-= IPV4_BEET_PHMAXLEN
;
208 if (proto
== IPPROTO_TCP
)
209 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV6
;
213 skb
->transport_header
+=
214 ipv6_skip_exthdr(skb
, 0, &proto
, &frag
);
217 if (proto
== IPPROTO_IPIP
)
218 skb_shinfo(skb
)->gso_type
|= SKB_GSO_IPXIP6
;
220 __skb_pull(skb
, skb_transport_offset(skb
));
221 ops
= rcu_dereference(inet6_offloads
[proto
]);
222 if (likely(ops
&& ops
->callbacks
.gso_segment
))
223 segs
= ops
->callbacks
.gso_segment(skb
, features
);
228 static struct sk_buff
*xfrm6_outer_mode_gso_segment(struct xfrm_state
*x
,
230 netdev_features_t features
)
232 switch (x
->outer_mode
.encap
) {
233 case XFRM_MODE_TUNNEL
:
234 return xfrm6_tunnel_gso_segment(x
, skb
, features
);
235 case XFRM_MODE_TRANSPORT
:
236 return xfrm6_transport_gso_segment(x
, skb
, features
);
238 return xfrm6_beet_gso_segment(x
, skb
, features
);
241 return ERR_PTR(-EOPNOTSUPP
);
244 static struct sk_buff
*esp6_gso_segment(struct sk_buff
*skb
,
245 netdev_features_t features
)
247 struct xfrm_state
*x
;
248 struct ip_esp_hdr
*esph
;
249 struct crypto_aead
*aead
;
250 netdev_features_t esp_features
= features
;
251 struct xfrm_offload
*xo
= xfrm_offload(skb
);
255 return ERR_PTR(-EINVAL
);
257 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_ESP
))
258 return ERR_PTR(-EINVAL
);
260 sp
= skb_sec_path(skb
);
261 x
= sp
->xvec
[sp
->len
- 1];
263 esph
= ip_esp_hdr(skb
);
265 if (esph
->spi
!= x
->id
.spi
)
266 return ERR_PTR(-EINVAL
);
268 if (!pskb_may_pull(skb
, sizeof(*esph
) + crypto_aead_ivsize(aead
)))
269 return ERR_PTR(-EINVAL
);
271 __skb_pull(skb
, sizeof(*esph
) + crypto_aead_ivsize(aead
));
273 skb
->encap_hdr_csum
= 1;
275 if (!(features
& NETIF_F_HW_ESP
) || x
->xso
.dev
!= skb
->dev
)
276 esp_features
= features
& ~(NETIF_F_SG
| NETIF_F_CSUM_MASK
|
278 else if (!(features
& NETIF_F_HW_ESP_TX_CSUM
))
279 esp_features
= features
& ~(NETIF_F_CSUM_MASK
|
282 xo
->flags
|= XFRM_GSO_SEGMENT
;
284 return xfrm6_outer_mode_gso_segment(x
, skb
, esp_features
);
287 static int esp6_input_tail(struct xfrm_state
*x
, struct sk_buff
*skb
)
289 struct crypto_aead
*aead
= x
->data
;
290 struct xfrm_offload
*xo
= xfrm_offload(skb
);
292 if (!pskb_may_pull(skb
, sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
)))
295 if (!(xo
->flags
& CRYPTO_DONE
))
296 skb
->ip_summed
= CHECKSUM_NONE
;
298 return esp6_input_done2(skb
, 0);
301 static int esp6_xmit(struct xfrm_state
*x
, struct sk_buff
*skb
, netdev_features_t features
)
307 struct xfrm_offload
*xo
;
308 struct crypto_aead
*aead
;
310 bool hw_offload
= true;
315 xo
= xfrm_offload(skb
);
320 if (!(features
& NETIF_F_HW_ESP
) || x
->xso
.dev
!= skb
->dev
) {
321 xo
->flags
|= CRYPTO_FALLBACK
;
325 esp
.proto
= xo
->proto
;
327 /* skb is pure payload to encrypt */
330 alen
= crypto_aead_authsize(aead
);
333 /* XXX: Add support for tfc padding here. */
335 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
336 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
337 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
338 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
340 if (!hw_offload
|| !skb_is_gso(skb
)) {
341 esp
.nfrags
= esp6_output_head(x
, skb
, &esp
);
348 esp
.esph
= ip_esp_hdr(skb
);
349 esp
.esph
->spi
= x
->id
.spi
;
351 skb_push(skb
, -skb_network_offset(skb
));
353 if (xo
->flags
& XFRM_GSO_SEGMENT
) {
354 esp
.esph
->seq_no
= htonl(seq
);
356 if (!skb_is_gso(skb
))
359 xo
->seq
.low
+= skb_shinfo(skb
)->gso_segs
;
362 if (xo
->seq
.low
< seq
)
365 esp
.seqno
= cpu_to_be64(xo
->seq
.low
+ ((u64
)xo
->seq
.hi
<< 32));
367 len
= skb
->len
- sizeof(struct ipv6hdr
);
368 if (len
> IPV6_MAXPLEN
)
371 ipv6_hdr(skb
)->payload_len
= htons(len
);
374 if (!skb_ext_add(skb
, SKB_EXT_SEC_PATH
))
377 xo
= xfrm_offload(skb
);
381 xo
->flags
|= XFRM_XMIT
;
385 err
= esp6_output_tail(x
, skb
, &esp
);
391 if (skb_needs_linearize(skb
, skb
->dev
->features
) &&
392 __skb_linearize(skb
))
397 static const struct net_offload esp6_offload
= {
399 .gro_receive
= esp6_gro_receive
,
400 .gso_segment
= esp6_gso_segment
,
404 static const struct xfrm_type_offload esp6_type_offload
= {
405 .owner
= THIS_MODULE
,
406 .proto
= IPPROTO_ESP
,
407 .input_tail
= esp6_input_tail
,
409 .encap
= esp6_gso_encap
,
412 static int __init
esp6_offload_init(void)
414 if (xfrm_register_type_offload(&esp6_type_offload
, AF_INET6
) < 0) {
415 pr_info("%s: can't add xfrm type offload\n", __func__
);
419 return inet6_add_offload(&esp6_offload
, IPPROTO_ESP
);
422 static void __exit
esp6_offload_exit(void)
424 xfrm_unregister_type_offload(&esp6_type_offload
, AF_INET6
);
425 inet6_del_offload(&esp6_offload
, IPPROTO_ESP
);
428 module_init(esp6_offload_init
);
429 module_exit(esp6_offload_exit
);
430 MODULE_LICENSE("GPL");
431 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
432 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6
, XFRM_PROTO_ESP
);
433 MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");