powerpc: Add new kconfig CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
[linux/fpc-iii.git] / net / ipv6 / esp6_offload.c
blob333a478aa1610441ce08e3ac82e92d3b48e3222d
1 /*
2 * IPV6 GSO/GRO offload support
3 * Linux INET implementation
5 * Copyright (C) 2016 secunet Security Networks AG
6 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * ESP GRO support
15 #include <linux/skbuff.h>
16 #include <linux/init.h>
17 #include <net/protocol.h>
18 #include <crypto/aead.h>
19 #include <crypto/authenc.h>
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <net/ip.h>
23 #include <net/xfrm.h>
24 #include <net/esp.h>
25 #include <linux/scatterlist.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <net/ip6_route.h>
30 #include <net/ipv6.h>
31 #include <linux/icmpv6.h>
33 static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
35 int off = sizeof(struct ipv6hdr);
36 struct ipv6_opt_hdr *exthdr;
38 if (likely(ipv6_hdr->nexthdr == NEXTHDR_ESP))
39 return offsetof(struct ipv6hdr, nexthdr);
41 while (off < nhlen) {
42 exthdr = (void *)ipv6_hdr + off;
43 if (exthdr->nexthdr == NEXTHDR_ESP)
44 return off;
46 off += ipv6_optlen(exthdr);
49 return 0;
52 static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
53 struct sk_buff *skb)
55 int offset = skb_gro_offset(skb);
56 struct xfrm_offload *xo;
57 struct xfrm_state *x;
58 __be32 seq;
59 __be32 spi;
60 int nhoff;
61 int err;
63 skb_pull(skb, offset);
65 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
66 goto out;
68 xo = xfrm_offload(skb);
69 if (!xo || !(xo->flags & CRYPTO_DONE)) {
70 err = secpath_set(skb);
71 if (err)
72 goto out;
74 if (skb->sp->len == XFRM_MAX_DEPTH)
75 goto out;
77 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
78 (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
79 spi, IPPROTO_ESP, AF_INET6);
80 if (!x)
81 goto out;
83 skb->sp->xvec[skb->sp->len++] = x;
84 skb->sp->olen++;
86 xo = xfrm_offload(skb);
87 if (!xo) {
88 xfrm_state_put(x);
89 goto out;
93 xo->flags |= XFRM_GRO;
95 nhoff = esp6_nexthdr_esp_offset(ipv6_hdr(skb), offset);
96 if (!nhoff)
97 goto out;
99 IP6CB(skb)->nhoff = nhoff;
100 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
101 XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
102 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
103 XFRM_SPI_SKB_CB(skb)->seq = seq;
105 /* We don't need to handle errors from xfrm_input, it does all
106 * the error handling and frees the resources on error. */
107 xfrm_input(skb, IPPROTO_ESP, spi, -2);
109 return ERR_PTR(-EINPROGRESS);
110 out:
111 skb_push(skb, offset);
112 NAPI_GRO_CB(skb)->same_flow = 0;
113 NAPI_GRO_CB(skb)->flush = 1;
115 return NULL;
118 static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
120 struct ip_esp_hdr *esph;
121 struct ipv6hdr *iph = ipv6_hdr(skb);
122 struct xfrm_offload *xo = xfrm_offload(skb);
123 int proto = iph->nexthdr;
125 skb_push(skb, -skb_network_offset(skb));
126 esph = ip_esp_hdr(skb);
127 *skb_mac_header(skb) = IPPROTO_ESP;
129 esph->spi = x->id.spi;
130 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
132 xo->proto = proto;
135 static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
136 netdev_features_t features)
138 __u32 seq;
139 int err = 0;
140 struct sk_buff *skb2;
141 struct xfrm_state *x;
142 struct ip_esp_hdr *esph;
143 struct crypto_aead *aead;
144 struct sk_buff *segs = ERR_PTR(-EINVAL);
145 netdev_features_t esp_features = features;
146 struct xfrm_offload *xo = xfrm_offload(skb);
148 if (!xo)
149 goto out;
151 seq = xo->seq.low;
153 x = skb->sp->xvec[skb->sp->len - 1];
154 aead = x->data;
155 esph = ip_esp_hdr(skb);
157 if (esph->spi != x->id.spi)
158 goto out;
160 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
161 goto out;
163 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
165 skb->encap_hdr_csum = 1;
167 if (!(features & NETIF_F_HW_ESP))
168 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
170 segs = x->outer_mode->gso_segment(x, skb, esp_features);
171 if (IS_ERR_OR_NULL(segs))
172 goto out;
174 __skb_pull(skb, skb->data - skb_mac_header(skb));
176 skb2 = segs;
177 do {
178 struct sk_buff *nskb = skb2->next;
180 xo = xfrm_offload(skb2);
181 xo->flags |= XFRM_GSO_SEGMENT;
182 xo->seq.low = seq;
183 xo->seq.hi = xfrm_replay_seqhi(x, seq);
185 if(!(features & NETIF_F_HW_ESP))
186 xo->flags |= CRYPTO_FALLBACK;
188 x->outer_mode->xmit(x, skb2);
190 err = x->type_offload->xmit(x, skb2, esp_features);
191 if (err) {
192 kfree_skb_list(segs);
193 return ERR_PTR(err);
196 if (!skb_is_gso(skb2))
197 seq++;
198 else
199 seq += skb_shinfo(skb2)->gso_segs;
201 skb_push(skb2, skb2->mac_len);
202 skb2 = nskb;
203 } while (skb2);
205 out:
206 return segs;
209 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb)
211 struct crypto_aead *aead = x->data;
212 struct xfrm_offload *xo = xfrm_offload(skb);
214 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
215 return -EINVAL;
217 if (!(xo->flags & CRYPTO_DONE))
218 skb->ip_summed = CHECKSUM_NONE;
220 return esp6_input_done2(skb, 0);
223 static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
225 int err;
226 int alen;
227 int blksize;
228 struct xfrm_offload *xo;
229 struct ip_esp_hdr *esph;
230 struct crypto_aead *aead;
231 struct esp_info esp;
232 bool hw_offload = true;
234 esp.inplace = true;
236 xo = xfrm_offload(skb);
238 if (!xo)
239 return -EINVAL;
241 if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle ||
242 (x->xso.dev != skb->dev)) {
243 xo->flags |= CRYPTO_FALLBACK;
244 hw_offload = false;
247 esp.proto = xo->proto;
249 /* skb is pure payload to encrypt */
251 aead = x->data;
252 alen = crypto_aead_authsize(aead);
254 esp.tfclen = 0;
255 /* XXX: Add support for tfc padding here. */
257 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
258 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
259 esp.plen = esp.clen - skb->len - esp.tfclen;
260 esp.tailen = esp.tfclen + esp.plen + alen;
262 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
263 esp.nfrags = esp6_output_head(x, skb, &esp);
264 if (esp.nfrags < 0)
265 return esp.nfrags;
268 esph = ip_esp_hdr(skb);
269 esph->spi = x->id.spi;
271 skb_push(skb, -skb_network_offset(skb));
273 if (xo->flags & XFRM_GSO_SEGMENT) {
274 esph->seq_no = htonl(xo->seq.low);
275 } else {
276 int len;
278 len = skb->len - sizeof(struct ipv6hdr);
279 if (len > IPV6_MAXPLEN)
280 len = 0;
282 ipv6_hdr(skb)->payload_len = htons(len);
285 if (hw_offload)
286 return 0;
288 esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
290 err = esp6_output_tail(x, skb, &esp);
291 if (err)
292 return err;
294 secpath_reset(skb);
296 return 0;
299 static const struct net_offload esp6_offload = {
300 .callbacks = {
301 .gro_receive = esp6_gro_receive,
302 .gso_segment = esp6_gso_segment,
306 static const struct xfrm_type_offload esp6_type_offload = {
307 .description = "ESP6 OFFLOAD",
308 .owner = THIS_MODULE,
309 .proto = IPPROTO_ESP,
310 .input_tail = esp6_input_tail,
311 .xmit = esp6_xmit,
312 .encap = esp6_gso_encap,
315 static int __init esp6_offload_init(void)
317 if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) {
318 pr_info("%s: can't add xfrm type offload\n", __func__);
319 return -EAGAIN;
322 return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
325 static void __exit esp6_offload_exit(void)
327 if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0)
328 pr_info("%s: can't remove xfrm type offload\n", __func__);
330 inet6_del_offload(&esp6_offload, IPPROTO_ESP);
333 module_init(esp6_offload_init);
334 module_exit(esp6_offload_exit);
335 MODULE_LICENSE("GPL");
336 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
337 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);