2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * Copyright (C) 2016 secunet Security Networks AG
6 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
15 #include <linux/skbuff.h>
16 #include <linux/init.h>
17 #include <net/protocol.h>
18 #include <crypto/aead.h>
19 #include <crypto/authenc.h>
20 #include <linux/err.h>
21 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
31 static struct sk_buff
**esp4_gro_receive(struct sk_buff
**head
,
34 int offset
= skb_gro_offset(skb
);
35 struct xfrm_offload
*xo
;
41 if (!pskb_pull(skb
, offset
))
44 if ((err
= xfrm_parse_spi(skb
, IPPROTO_ESP
, &spi
, &seq
)) != 0)
47 xo
= xfrm_offload(skb
);
48 if (!xo
|| !(xo
->flags
& CRYPTO_DONE
)) {
49 err
= secpath_set(skb
);
53 if (skb
->sp
->len
== XFRM_MAX_DEPTH
)
56 x
= xfrm_state_lookup(dev_net(skb
->dev
), skb
->mark
,
57 (xfrm_address_t
*)&ip_hdr(skb
)->daddr
,
58 spi
, IPPROTO_ESP
, AF_INET
);
62 skb
->sp
->xvec
[skb
->sp
->len
++] = x
;
65 xo
= xfrm_offload(skb
);
72 xo
->flags
|= XFRM_GRO
;
74 XFRM_TUNNEL_SKB_CB(skb
)->tunnel
.ip4
= NULL
;
75 XFRM_SPI_SKB_CB(skb
)->family
= AF_INET
;
76 XFRM_SPI_SKB_CB(skb
)->daddroff
= offsetof(struct iphdr
, daddr
);
77 XFRM_SPI_SKB_CB(skb
)->seq
= seq
;
79 /* We don't need to handle errors from xfrm_input, it does all
80 * the error handling and frees the resources on error. */
81 xfrm_input(skb
, IPPROTO_ESP
, spi
, -2);
83 return ERR_PTR(-EINPROGRESS
);
85 skb_push(skb
, offset
);
86 NAPI_GRO_CB(skb
)->same_flow
= 0;
87 NAPI_GRO_CB(skb
)->flush
= 1;
92 static void esp4_gso_encap(struct xfrm_state
*x
, struct sk_buff
*skb
)
94 struct ip_esp_hdr
*esph
;
95 struct iphdr
*iph
= ip_hdr(skb
);
96 struct xfrm_offload
*xo
= xfrm_offload(skb
);
97 int proto
= iph
->protocol
;
99 skb_push(skb
, -skb_network_offset(skb
));
100 esph
= ip_esp_hdr(skb
);
101 *skb_mac_header(skb
) = IPPROTO_ESP
;
103 esph
->spi
= x
->id
.spi
;
104 esph
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
109 static struct sk_buff
*esp4_gso_segment(struct sk_buff
*skb
,
110 netdev_features_t features
)
112 struct xfrm_state
*x
;
113 struct ip_esp_hdr
*esph
;
114 struct crypto_aead
*aead
;
115 netdev_features_t esp_features
= features
;
116 struct xfrm_offload
*xo
= xfrm_offload(skb
);
119 return ERR_PTR(-EINVAL
);
121 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_ESP
))
122 return ERR_PTR(-EINVAL
);
124 x
= skb
->sp
->xvec
[skb
->sp
->len
- 1];
126 esph
= ip_esp_hdr(skb
);
128 if (esph
->spi
!= x
->id
.spi
)
129 return ERR_PTR(-EINVAL
);
131 if (!pskb_may_pull(skb
, sizeof(*esph
) + crypto_aead_ivsize(aead
)))
132 return ERR_PTR(-EINVAL
);
134 __skb_pull(skb
, sizeof(*esph
) + crypto_aead_ivsize(aead
));
136 skb
->encap_hdr_csum
= 1;
138 if (!(features
& NETIF_F_HW_ESP
) || !x
->xso
.offload_handle
||
139 (x
->xso
.dev
!= skb
->dev
))
140 esp_features
= features
& ~(NETIF_F_SG
| NETIF_F_CSUM_MASK
);
141 else if (!(features
& NETIF_F_HW_ESP_TX_CSUM
))
142 esp_features
= features
& ~NETIF_F_CSUM_MASK
;
144 xo
->flags
|= XFRM_GSO_SEGMENT
;
146 return x
->outer_mode
->gso_segment(x
, skb
, esp_features
);
149 static int esp_input_tail(struct xfrm_state
*x
, struct sk_buff
*skb
)
151 struct crypto_aead
*aead
= x
->data
;
152 struct xfrm_offload
*xo
= xfrm_offload(skb
);
154 if (!pskb_may_pull(skb
, sizeof(struct ip_esp_hdr
) + crypto_aead_ivsize(aead
)))
157 if (!(xo
->flags
& CRYPTO_DONE
))
158 skb
->ip_summed
= CHECKSUM_NONE
;
160 return esp_input_done2(skb
, 0);
163 static int esp_xmit(struct xfrm_state
*x
, struct sk_buff
*skb
, netdev_features_t features
)
168 struct xfrm_offload
*xo
;
169 struct ip_esp_hdr
*esph
;
170 struct crypto_aead
*aead
;
172 bool hw_offload
= true;
177 xo
= xfrm_offload(skb
);
182 if (!(features
& NETIF_F_HW_ESP
) || !x
->xso
.offload_handle
||
183 (x
->xso
.dev
!= skb
->dev
)) {
184 xo
->flags
|= CRYPTO_FALLBACK
;
188 esp
.proto
= xo
->proto
;
190 /* skb is pure payload to encrypt */
193 alen
= crypto_aead_authsize(aead
);
196 /* XXX: Add support for tfc padding here. */
198 blksize
= ALIGN(crypto_aead_blocksize(aead
), 4);
199 esp
.clen
= ALIGN(skb
->len
+ 2 + esp
.tfclen
, blksize
);
200 esp
.plen
= esp
.clen
- skb
->len
- esp
.tfclen
;
201 esp
.tailen
= esp
.tfclen
+ esp
.plen
+ alen
;
203 esp
.esph
= ip_esp_hdr(skb
);
206 if (!hw_offload
|| (hw_offload
&& !skb_is_gso(skb
))) {
207 esp
.nfrags
= esp_output_head(x
, skb
, &esp
);
215 esph
->spi
= x
->id
.spi
;
217 skb_push(skb
, -skb_network_offset(skb
));
219 if (xo
->flags
& XFRM_GSO_SEGMENT
) {
220 esph
->seq_no
= htonl(seq
);
222 if (!skb_is_gso(skb
))
225 xo
->seq
.low
+= skb_shinfo(skb
)->gso_segs
;
228 esp
.seqno
= cpu_to_be64(seq
+ ((u64
)xo
->seq
.hi
<< 32));
230 ip_hdr(skb
)->tot_len
= htons(skb
->len
);
231 ip_send_check(ip_hdr(skb
));
236 err
= esp_output_tail(x
, skb
, &esp
);
245 static const struct net_offload esp4_offload
= {
247 .gro_receive
= esp4_gro_receive
,
248 .gso_segment
= esp4_gso_segment
,
252 static const struct xfrm_type_offload esp_type_offload
= {
253 .description
= "ESP4 OFFLOAD",
254 .owner
= THIS_MODULE
,
255 .proto
= IPPROTO_ESP
,
256 .input_tail
= esp_input_tail
,
258 .encap
= esp4_gso_encap
,
261 static int __init
esp4_offload_init(void)
263 if (xfrm_register_type_offload(&esp_type_offload
, AF_INET
) < 0) {
264 pr_info("%s: can't add xfrm type offload\n", __func__
);
268 return inet_add_offload(&esp4_offload
, IPPROTO_ESP
);
271 static void __exit
esp4_offload_exit(void)
273 if (xfrm_unregister_type_offload(&esp_type_offload
, AF_INET
) < 0)
274 pr_info("%s: can't remove xfrm type offload\n", __func__
);
276 inet_del_offload(&esp4_offload
, IPPROTO_ESP
);
279 module_init(esp4_offload_init
);
280 module_exit(esp4_offload_exit
);
281 MODULE_LICENSE("GPL");
282 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
283 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET
, XFRM_PROTO_ESP
);