Merge tag 'iommu-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux/fpc-iii.git] / net / ipv6 / esp6_offload.c
blob1ca516fb30e1c29f29c94e90351587ab33b43323
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IPV6 GSO/GRO offload support
4 * Linux INET implementation
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
9 * ESP GRO support
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/esp.h>
22 #include <linux/scatterlist.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <net/ip6_route.h>
27 #include <net/ipv6.h>
28 #include <linux/icmpv6.h>
30 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
32 int off = sizeof(struct ipv6hdr);
33 struct ipv6_opt_hdr *exthdr;
35 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
36 return offsetof(struct ipv6hdr, nexthdr);
38 while (off < nhlen) {
39 exthdr = (void *)ipv6_hdr + off;
40 if (exthdr->nexthdr == NEXTHDR_ESP)
41 return off;
43 off += ipv6_optlen(exthdr);
46 return 0;
49 static struct sk_buff *esp6_gro_receive(struct list_head *head,
50 struct sk_buff *skb)
52 int offset = skb_gro_offset(skb);
53 struct xfrm_offload *xo;
54 struct xfrm_state *x;
55 __be32 seq;
56 __be32 spi;
57 int nhoff;
58 int err;
60 if (!pskb_pull(skb, offset))
61 return NULL;
63 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
64 goto out;
66 xo = xfrm_offload(skb);
67 if (!xo || !(xo->flags & CRYPTO_DONE)) {
68 struct sec_path *sp = secpath_set(skb);
70 if (!sp)
71 goto out;
73 if (sp->len == XFRM_MAX_DEPTH)
74 goto out_reset;
76 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
77 (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
78 spi, IPPROTO_ESP, AF_INET6);
79 if (!x)
80 goto out_reset;
82 skb->mark = xfrm_smark_get(skb->mark, x);
84 sp->xvec[sp->len++] = x;
85 sp->olen++;
87 xo = xfrm_offload(skb);
88 if (!xo)
89 goto out_reset;
92 xo->flags |= XFRM_GRO;
94 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
95 if (!nhoff)
96 goto out;
98 IP6CB(skb)->nhoff = nhoff;
99 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
100 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
101 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
102 XFRM_SPI_SKB_CB(skb)->seq = seq;
104 /* We don't need to handle errors from xfrm_input, it does all
105 * the error handling and frees the resources on error. */
106 xfrm_input(skb, IPPROTO_ESP, spi, -2);
108 return ERR_PTR(-EINPROGRESS);
109 out_reset:
110 secpath_reset(skb);
111 out:
112 skb_push(skb, offset);
113 NAPI_GRO_CB(skb)->same_flow = 0;
114 NAPI_GRO_CB(skb)->flush = 1;
116 return NULL;
119 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
121 struct ip_esp_hdr *esph;
122 struct ipv6hdr *iph = ipv6_hdr(skb);
123 struct xfrm_offload *xo = xfrm_offload(skb);
124 u8 proto = iph->nexthdr;
126 skb_push(skb, -skb_network_offset(skb));
128 if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) {
129 __be16 frag;
131 ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
134 esph = ip_esp_hdr(skb);
135 *skb_mac_header(skb) = IPPROTO_ESP;
137 esph->spi = x->id.spi;
138 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
140 xo->proto = proto;
143 static struct sk_buff *xfrm6_tunnel_gso_segment(struct xfrm_state *x,
144 struct sk_buff *skb,
145 netdev_features_t features)
147 __skb_push(skb, skb->mac_len);
148 return skb_mac_gso_segment(skb, features);
151 static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x,
152 struct sk_buff *skb,
153 netdev_features_t features)
155 const struct net_offload *ops;
156 struct sk_buff *segs = ERR_PTR(-EINVAL);
157 struct xfrm_offload *xo = xfrm_offload(skb);
159 skb->transport_header += x->props.header_len;
160 ops = rcu_dereference(inet6_offloads[xo->proto]);
161 if (likely(ops && ops->callbacks.gso_segment))
162 segs = ops->callbacks.gso_segment(skb, features);
164 return segs;
167 static struct sk_buff *xfrm6_beet_gso_segment(struct xfrm_state *x,
168 struct sk_buff *skb,
169 netdev_features_t features)
171 struct xfrm_offload *xo = xfrm_offload(skb);
172 struct sk_buff *segs = ERR_PTR(-EINVAL);
173 const struct net_offload *ops;
174 u8 proto = xo->proto;
176 skb->transport_header += x->props.header_len;
178 if (x->sel.family != AF_INET6) {
179 skb->transport_header -=
180 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
182 if (proto == IPPROTO_BEETPH) {
183 struct ip_beet_phdr *ph =
184 (struct ip_beet_phdr *)skb->data;
186 skb->transport_header += ph->hdrlen * 8;
187 proto = ph->nexthdr;
188 } else {
189 skb->transport_header -= IPV4_BEET_PHMAXLEN;
192 if (proto == IPPROTO_TCP)
193 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
194 } else {
195 __be16 frag;
197 skb->transport_header +=
198 ipv6_skip_exthdr(skb, 0, &proto, &frag);
201 __skb_pull(skb, skb_transport_offset(skb));
202 ops = rcu_dereference(inet6_offloads[proto]);
203 if (likely(ops && ops->callbacks.gso_segment))
204 segs = ops->callbacks.gso_segment(skb, features);
206 return segs;
209 static struct sk_buff *xfrm6_outer_mode_gso_segment(struct xfrm_state *x,
210 struct sk_buff *skb,
211 netdev_features_t features)
213 switch (x->outer_mode.encap) {
214 case XFRM_MODE_TUNNEL:
215 return xfrm6_tunnel_gso_segment(x, skb, features);
216 case XFRM_MODE_TRANSPORT:
217 return xfrm6_transport_gso_segment(x, skb, features);
218 case XFRM_MODE_BEET:
219 return xfrm6_beet_gso_segment(x, skb, features);
222 return ERR_PTR(-EOPNOTSUPP);
225 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
226 netdev_features_t features)
228 struct xfrm_state *x;
229 struct ip_esp_hdr *esph;
230 struct crypto_aead *aead;
231 netdev_features_t esp_features = features;
232 struct xfrm_offload *xo = xfrm_offload(skb);
233 struct sec_path *sp;
235 if (!xo)
236 return ERR_PTR(-EINVAL);
238 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
239 return ERR_PTR(-EINVAL);
241 sp = skb_sec_path(skb);
242 x = sp->xvec[sp->len - 1];
243 aead = x->data;
244 esph = ip_esp_hdr(skb);
246 if (esph->spi != x->id.spi)
247 return ERR_PTR(-EINVAL);
249 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
250 return ERR_PTR(-EINVAL);
252 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
254 skb->encap_hdr_csum = 1;
256 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
257 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
258 else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
259 esp_features = features & ~NETIF_F_CSUM_MASK;
261 xo->flags |= XFRM_GSO_SEGMENT;
263 return xfrm6_outer_mode_gso_segment(x, skb, esp_features);
266 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
268 struct crypto_aead *aead = x->data;
269 struct xfrm_offload *xo = xfrm_offload(skb);
271 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
272 return -EINVAL;
274 if (!(xo->flags & CRYPTO_DONE))
275 skb->ip_summed = CHECKSUM_NONE;
277 return esp6_input_done2(skb, 0);
280 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
282 int len;
283 int err;
284 int alen;
285 int blksize;
286 struct xfrm_offload *xo;
287 struct crypto_aead *aead;
288 struct esp_info esp;
289 bool hw_offload = true;
290 __u32 seq;
292 esp.inplace = true;
294 xo = xfrm_offload(skb);
296 if (!xo)
297 return -EINVAL;
299 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev) {
300 xo->flags |= CRYPTO_FALLBACK;
301 hw_offload = false;
304 esp.proto = xo->proto;
306 /* skb is pure payload to encrypt */
308 aead = x->data;
309 alen = crypto_aead_authsize(aead);
311 esp.tfclen = 0;
312 /* XXX: Add support for tfc padding here. */
314 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
315 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
316 esp.plen = esp.clen - skb->len - esp.tfclen;
317 esp.tailen = esp.tfclen + esp.plen + alen;
319 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
320 esp.nfrags = esp6_output_head(x, skb, &esp);
321 if (esp.nfrags < 0)
322 return esp.nfrags;
325 seq = xo->seq.low;
327 esp.esph = ip_esp_hdr(skb);
328 esp.esph->spi = x->id.spi;
330 skb_push(skb, -skb_network_offset(skb));
332 if (xo->flags & XFRM_GSO_SEGMENT) {
333 esp.esph->seq_no = htonl(seq);
335 if (!skb_is_gso(skb))
336 xo->seq.low++;
337 else
338 xo->seq.low += skb_shinfo(skb)->gso_segs;
341 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
343 len = skb->len - sizeof(struct ipv6hdr);
344 if (len > IPV6_MAXPLEN)
345 len = 0;
347 ipv6_hdr(skb)->payload_len = htons(len);
349 if (hw_offload)
350 return 0;
352 err = esp6_output_tail(x, skb, &esp);
353 if (err)
354 return err;
356 secpath_reset(skb);
358 return 0;
361 static const struct net_offload esp6_offload = {
362 .callbacks = {
363 .gro_receive = esp6_gro_receive,
364 .gso_segment = esp6_gso_segment,
368 static const struct xfrm_type_offload esp6_type_offload = {
369 .description = "ESP6 OFFLOAD",
370 .owner = THIS_MODULE,
371 .proto = IPPROTO_ESP,
372 .input_tail = esp6_input_tail,
373 .xmit = esp6_xmit,
374 .encap = esp6_gso_encap,
377 static int __init esp6_offload_init(void)
379 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
380 pr_info("%s: can't add xfrm type offload\n", __func__);
381 return -EAGAIN;
384 return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
387 static void __exit esp6_offload_exit(void)
389 xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6);
390 inet6_del_offload(&esp6_offload, IPPROTO_ESP);
393 module_init(esp6_offload_init);
394 module_exit(esp6_offload_exit);
395 MODULE_LICENSE("GPL");
396 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
397 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
398 MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");