1 // SPDX-License-Identifier: GPL-2.0-only
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
30 static struct sk_buff
*esp4_gro_receive(struct list_head
*head
,
33 int offset
= skb_gro_offset(skb
);
34 struct xfrm_offload
*xo
;
40 if (!pskb_pull(skb
, offset
))
43 if (xfrm_parse_spi(skb
, IPPROTO_ESP
, &spi
, &seq
) != 0)
46 xo
= xfrm_offload(skb
);
47 if (!xo
|| !(xo
->flags
& CRYPTO_DONE
)) {
48 struct sec_path
*sp
= secpath_set(skb
);
53 if (sp
->len
== XFRM_MAX_DEPTH
)
56 x
= xfrm_input_state_lookup(dev_net(skb
->dev
), skb
->mark
,
57 (xfrm_address_t
*)&ip_hdr(skb
)->daddr
,
58 spi
, IPPROTO_ESP
, AF_INET
);
60 if (unlikely(x
&& x
->dir
&& x
->dir
!= XFRM_SA_DIR_IN
)) {
61 /* non-offload path will record the error and audit log */
69 skb
->mark
= xfrm_smark_get(skb
->mark
, x
);
71 sp
->xvec
[sp
->len
++] = x
;
74 xo
= xfrm_offload(skb
);
79 xo
->flags
|= XFRM_GRO
;
81 if (NAPI_GRO_CB(skb
)->proto
== IPPROTO_UDP
)
82 encap_type
= UDP_ENCAP_ESPINUDP
;
84 XFRM_TUNNEL_SKB_CB(skb
)->tunnel
.ip4
= NULL
;
85 XFRM_SPI_SKB_CB(skb
)->family
= AF_INET
;
86 XFRM_SPI_SKB_CB(skb
)->daddroff
= offsetof(struct iphdr
, daddr
);
87 XFRM_SPI_SKB_CB(skb
)->seq
= seq
;
89 /* We don't need to handle errors from xfrm_input, it does all
90 * the error handling and frees the resources on error. */
91 xfrm_input(skb
, IPPROTO_ESP
, spi
, encap_type
);
93 return ERR_PTR(-EINPROGRESS
);
97 skb_push(skb
, offset
);
98 NAPI_GRO_CB(skb
)->same_flow
= 0;
99 NAPI_GRO_CB(skb
)->flush
= 1;
104 static void esp4_gso_encap(struct xfrm_state
*x
, struct sk_buff
*skb
)
106 struct ip_esp_hdr
*esph
;
107 struct iphdr
*iph
= ip_hdr(skb
);
108 struct xfrm_offload
*xo
= xfrm_offload(skb
);
109 int proto
= iph
->protocol
;
111 skb_push(skb
, -skb_network_offset(skb
));
112 esph
= ip_esp_hdr(skb
);
113 *skb_mac_header(skb
) = IPPROTO_ESP
;
115 esph
->spi
= x
->id
.spi
;
116 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
121 static struct sk_buff
*xfrm4_tunnel_gso_segment(struct xfrm_state
*x
,
123 netdev_features_t features
)
125 __be16 type
= x
->inner_mode
.family
== AF_INET6
? htons(ETH_P_IPV6
)
128 return skb_eth_gso_segment(skb
, features
, type
);
131 static struct sk_buff
*xfrm4_transport_gso_segment(struct xfrm_state
*x
,
133 netdev_features_t features
)
135 const struct net_offload
*ops
;
136 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
137 struct xfrm_offload
*xo
= xfrm_offload(skb
);
139 skb
->transport_header
+= x
->props
.header_len
;
140 ops
= rcu_dereference(inet_offloads
[xo
->proto
]);
141 if (likely(ops
&& ops
->callbacks
.gso_segment
))
142 segs
= ops
->callbacks
.gso_segment(skb
, features
);
147 static struct sk_buff
*xfrm4_beet_gso_segment(struct xfrm_state
*x
,
149 netdev_features_t features
)
151 struct xfrm_offload
*xo
= xfrm_offload(skb
);
152 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
153 const struct net_offload
*ops
;
154 u8 proto
= xo
->proto
;
156 skb
->transport_header
+= x
->props
.header_len
;
158 if (x
->sel
.family
!= AF_INET6
) {
159 if (proto
== IPPROTO_BEETPH
) {
160 struct ip_beet_phdr
*ph
=
161 (struct ip_beet_phdr
*)skb
->data
;
163 skb
->transport_header
+= ph
->hdrlen
* 8;
166 skb
->transport_header
-= IPV4_BEET_PHMAXLEN
;
171 skb
->transport_header
+=
172 ipv6_skip_exthdr(skb
, 0, &proto
, &frag
);
173 if (proto
== IPPROTO_TCP
)
174 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCPV4
;
177 if (proto
== IPPROTO_IPV6
)
178 skb_shinfo(skb
)->gso_type
|= SKB_GSO_IPXIP4
;
180 __skb_pull(skb
, skb_transport_offset(skb
));
181 ops
= rcu_dereference(inet_offloads
[proto
]);
182 if (likely(ops
&& ops
->callbacks
.gso_segment
))
183 segs
= ops
->callbacks
.gso_segment(skb
, features
);
188 static struct sk_buff
*xfrm4_outer_mode_gso_segment(struct xfrm_state
*x
,
190 netdev_features_t features
)
192 switch (x
->outer_mode
.encap
) {
193 case XFRM_MODE_TUNNEL
:
194 return xfrm4_tunnel_gso_segment(x
, skb
, features
);
195 case XFRM_MODE_TRANSPORT
:
196 return xfrm4_transport_gso_segment(x
, skb
, features
);
198 return xfrm4_beet_gso_segment(x
, skb
, features
);
201 return ERR_PTR(-EOPNOTSUPP
);
204 static struct sk_buff
*esp4_gso_segment(struct sk_buff
*skb
,
205 netdev_features_t features
)
207 struct xfrm_state
*x
;
208 struct ip_esp_hdr
*esph
;
209 struct crypto_aead
*aead
;
210 netdev_features_t esp_features
= features
;
211 struct xfrm_offload
*xo
= xfrm_offload(skb
);
215 return ERR_PTR(-EINVAL
);
217 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_ESP
))
218 return ERR_PTR(-EINVAL
);
220 sp
= skb_sec_path(skb
);
221 x
= sp
->xvec
[sp
->len
- 1];
223 esph
= ip_esp_hdr(skb
);
225 if (esph
->spi
!= x
->id
.spi
)
226 return ERR_PTR(-EINVAL
);
228 if (!pskb_may_pull(skb
, sizeof(*esph
) + crypto_aead_ivsize(aead
)))
229 return ERR_PTR(-EINVAL
);
231 __skb_pull(skb
, sizeof(*esph
) + crypto_aead_ivsize(aead
));
233 skb
->encap_hdr_csum
= 1;
235 if ((!(skb
->dev
->gso_partial_features
& NETIF_F_HW_ESP
) &&
236 !(features
& NETIF_F_HW_ESP
)) || x
->xso
.dev
!= skb
->dev
)
237 esp_features
= features
& ~(NETIF_F_SG
| NETIF_F_CSUM_MASK
|
239 else if (!(features
& NETIF_F_HW_ESP_TX_CSUM
) &&
240 !(skb
->dev
->gso_partial_features
& NETIF_F_HW_ESP_TX_CSUM
))
241 esp_features
= features
& ~(NETIF_F_CSUM_MASK
|
244 xo
->flags
|= XFRM_GSO_SEGMENT
;
246 return xfrm4_outer_mode_gso_segment(x
, skb
, esp_features
);
249 static int esp_input_tail(struct xfrm_state
*x
, struct sk_buff
*skb
)
251 struct crypto_aead
*aead
= x
->data
;
252 struct xfrm_offload
*xo
= xfrm_offload(skb
);
254 if (!pskb_may_pull(skb
, sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
)))
257 if (!(xo
->flags
& CRYPTO_DONE
))
258 skb
->ip_summed
= CHECKSUM_NONE
;
260 return esp_input_done2(skb
, 0);
263 static int esp_xmit(struct xfrm_state
*x
, struct sk_buff
*skb
, netdev_features_t features
)
268 struct xfrm_offload
*xo
;
269 struct ip_esp_hdr
*esph
;
270 struct crypto_aead
*aead
;
272 bool hw_offload
= true;
278 xo
= xfrm_offload(skb
);
283 if ((!(features
& NETIF_F_HW_ESP
) &&
284 !(skb
->dev
->gso_partial_features
& NETIF_F_HW_ESP
)) ||
285 x
->xso
.dev
!= skb
->dev
) {
286 xo
->flags
|= CRYPTO_FALLBACK
;
290 esp
.proto
= xo
->proto
;
292 /* skb is pure payload to encrypt */
295 alen
= crypto_aead_authsize(aead
);
298 /* XXX: Add support for tfc padding here. */
300 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
301 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
302 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
303 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
305 esp
.esph
= ip_esp_hdr(skb
);
308 encap_type
= x
->encap
->encap_type
;
310 if (!hw_offload
|| !skb_is_gso(skb
) || (hw_offload
&& encap_type
== UDP_ENCAP_ESPINUDP
)) {
311 esp
.nfrags
= esp_output_head(x
, skb
, &esp
);
319 esph
->spi
= x
->id
.spi
;
321 skb_push(skb
, -skb_network_offset(skb
));
323 if (xo
->flags
& XFRM_GSO_SEGMENT
) {
324 esph
->seq_no
= htonl(seq
);
326 if (!skb_is_gso(skb
))
329 xo
->seq
.low
+= skb_shinfo(skb
)->gso_segs
;
332 if (xo
->seq
.low
< seq
)
335 esp
.seqno
= cpu_to_be64(seq
+ ((u64
)xo
->seq
.hi
<< 32));
337 if (hw_offload
&& encap_type
== UDP_ENCAP_ESPINUDP
) {
338 /* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by
339 * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header
340 * points to iphdr->protocol (see xfrm4_tunnel_encap_add()).
341 * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol.
342 * Therefore, the protocol field needs to be corrected.
344 ip_hdr(skb
)->protocol
= IPPROTO_UDP
;
346 esph
->seq_no
= htonl(seq
);
349 ip_hdr(skb
)->tot_len
= htons(skb
->len
);
350 ip_send_check(ip_hdr(skb
));
353 if (!skb_ext_add(skb
, SKB_EXT_SEC_PATH
))
356 xo
= xfrm_offload(skb
);
360 xo
->flags
|= XFRM_XMIT
;
364 err
= esp_output_tail(x
, skb
, &esp
);
370 if (skb_needs_linearize(skb
, skb
->dev
->features
) &&
371 __skb_linearize(skb
))
376 static const struct net_offload esp4_offload
= {
378 .gro_receive
= esp4_gro_receive
,
379 .gso_segment
= esp4_gso_segment
,
383 static const struct xfrm_type_offload esp_type_offload
= {
384 .owner
= THIS_MODULE
,
385 .proto
= IPPROTO_ESP
,
386 .input_tail
= esp_input_tail
,
388 .encap
= esp4_gso_encap
,
391 static int __init
esp4_offload_init(void)
393 if (xfrm_register_type_offload(&esp_type_offload
, AF_INET
) < 0) {
394 pr_info("%s: can't add xfrm type offload\n", __func__
);
398 return inet_add_offload(&esp4_offload
, IPPROTO_ESP
);
401 static void __exit
esp4_offload_exit(void)
403 xfrm_unregister_type_offload(&esp_type_offload
, AF_INET
);
404 inet_del_offload(&esp4_offload
, IPPROTO_ESP
);
407 module_init(esp4_offload_init
);
408 module_exit(esp4_offload_exit
);
409 MODULE_LICENSE("GPL");
410 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
411 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET
, XFRM_PROTO_ESP
);
412 MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");