gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / net / ipv4 / esp4_offload.c
blob731022cff6006ef900b4e62bd5f0f27fc934d40a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
9 * ESP GRO support
12 #include <linux/skbuff.h>
13 #include <linux/init.h>
14 #include <net/protocol.h>
15 #include <crypto/aead.h>
16 #include <crypto/authenc.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <net/ip.h>
20 #include <net/xfrm.h>
21 #include <net/esp.h>
22 #include <linux/scatterlist.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
26 #include <net/udp.h>
28 static struct sk_buff *esp4_gro_receive(struct list_head *head,
29 struct sk_buff *skb)
31 int offset = skb_gro_offset(skb);
32 struct xfrm_offload *xo;
33 struct xfrm_state *x;
34 __be32 seq;
35 __be32 spi;
36 int err;
38 if (!pskb_pull(skb, offset))
39 return NULL;
41 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
42 goto out;
44 xo = xfrm_offload(skb);
45 if (!xo || !(xo->flags & CRYPTO_DONE)) {
46 struct sec_path *sp = secpath_set(skb);
48 if (!sp)
49 goto out;
51 if (sp->len == XFRM_MAX_DEPTH)
52 goto out_reset;
54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55 (xfrm_address_t *)&ip_hdr(skb)->daddr,
56 spi, IPPROTO_ESP, AF_INET);
57 if (!x)
58 goto out_reset;
60 skb->mark = xfrm_smark_get(skb->mark, x);
62 sp->xvec[sp->len++] = x;
63 sp->olen++;
65 xo = xfrm_offload(skb);
66 if (!xo) {
67 xfrm_state_put(x);
68 goto out_reset;
72 xo->flags |= XFRM_GRO;
74 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
75 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
76 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
77 XFRM_SPI_SKB_CB(skb)->seq = seq;
79 /* We don't need to handle errors from xfrm_input, it does all
80 * the error handling and frees the resources on error. */
81 xfrm_input(skb, IPPROTO_ESP, spi, -2);
83 return ERR_PTR(-EINPROGRESS);
84 out_reset:
85 secpath_reset(skb);
86 out:
87 skb_push(skb, offset);
88 NAPI_GRO_CB(skb)->same_flow = 0;
89 NAPI_GRO_CB(skb)->flush = 1;
91 return NULL;
94 static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
96 struct ip_esp_hdr *esph;
97 struct iphdr *iph = ip_hdr(skb);
98 struct xfrm_offload *xo = xfrm_offload(skb);
99 int proto = iph->protocol;
101 skb_push(skb, -skb_network_offset(skb));
102 esph = ip_esp_hdr(skb);
103 *skb_mac_header(skb) = IPPROTO_ESP;
105 esph->spi = x->id.spi;
106 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
108 xo->proto = proto;
111 static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
112 struct sk_buff *skb,
113 netdev_features_t features)
115 __skb_push(skb, skb->mac_len);
116 return skb_mac_gso_segment(skb, features);
119 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
120 struct sk_buff *skb,
121 netdev_features_t features)
123 const struct net_offload *ops;
124 struct sk_buff *segs = ERR_PTR(-EINVAL);
125 struct xfrm_offload *xo = xfrm_offload(skb);
127 skb->transport_header += x->props.header_len;
128 ops = rcu_dereference(inet_offloads[xo->proto]);
129 if (likely(ops && ops->callbacks.gso_segment))
130 segs = ops->callbacks.gso_segment(skb, features);
132 return segs;
135 static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
136 struct sk_buff *skb,
137 netdev_features_t features)
139 struct xfrm_offload *xo = xfrm_offload(skb);
140 struct sk_buff *segs = ERR_PTR(-EINVAL);
141 const struct net_offload *ops;
142 int proto = xo->proto;
144 skb->transport_header += x->props.header_len;
146 if (proto == IPPROTO_BEETPH) {
147 struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data;
149 skb->transport_header += ph->hdrlen * 8;
150 proto = ph->nexthdr;
151 } else if (x->sel.family != AF_INET6) {
152 skb->transport_header -= IPV4_BEET_PHMAXLEN;
153 } else if (proto == IPPROTO_TCP) {
154 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
157 __skb_pull(skb, skb_transport_offset(skb));
158 ops = rcu_dereference(inet_offloads[proto]);
159 if (likely(ops && ops->callbacks.gso_segment))
160 segs = ops->callbacks.gso_segment(skb, features);
162 return segs;
165 static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
166 struct sk_buff *skb,
167 netdev_features_t features)
169 switch (x->outer_mode.encap) {
170 case XFRM_MODE_TUNNEL:
171 return xfrm4_tunnel_gso_segment(x, skb, features);
172 case XFRM_MODE_TRANSPORT:
173 return xfrm4_transport_gso_segment(x, skb, features);
174 case XFRM_MODE_BEET:
175 return xfrm4_beet_gso_segment(x, skb, features);
178 return ERR_PTR(-EOPNOTSUPP);
181 static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
182 netdev_features_t features)
184 struct xfrm_state *x;
185 struct ip_esp_hdr *esph;
186 struct crypto_aead *aead;
187 netdev_features_t esp_features = features;
188 struct xfrm_offload *xo = xfrm_offload(skb);
189 struct sec_path *sp;
191 if (!xo)
192 return ERR_PTR(-EINVAL);
194 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
195 return ERR_PTR(-EINVAL);
197 sp = skb_sec_path(skb);
198 x = sp->xvec[sp->len - 1];
199 aead = x->data;
200 esph = ip_esp_hdr(skb);
202 if (esph->spi != x->id.spi)
203 return ERR_PTR(-EINVAL);
205 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
206 return ERR_PTR(-EINVAL);
208 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
210 skb->encap_hdr_csum = 1;
212 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
213 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
214 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
215 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
216 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
217 esp_features = features & ~NETIF_F_CSUM_MASK;
219 xo->flags |= XFRM_GSO_SEGMENT;
221 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
224 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
226 struct crypto_aead *aead = x->data;
227 struct xfrm_offload *xo = xfrm_offload(skb);
229 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
230 return -EINVAL;
232 if (!(xo->flags & CRYPTO_DONE))
233 skb->ip_summed = CHECKSUM_NONE;
235 return esp_input_done2(skb, 0);
238 static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
240 int err;
241 int alen;
242 int blksize;
243 struct xfrm_offload *xo;
244 struct ip_esp_hdr *esph;
245 struct crypto_aead *aead;
246 struct esp_info esp;
247 bool hw_offload = true;
248 __u32 seq;
250 esp.inplace = true;
252 xo = xfrm_offload(skb);
254 if (!xo)
255 return -EINVAL;
257 if ((!(features & NETIF_F_HW_ESP) &&
258 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
259 x->xso.dev != skb->dev) {
260 xo->flags |= CRYPTO_FALLBACK;
261 hw_offload = false;
264 esp.proto = xo->proto;
266 /* skb is pure payload to encrypt */
268 aead = x->data;
269 alen = crypto_aead_authsize(aead);
271 esp.tfclen = 0;
272 /* XXX: Add support for tfc padding here. */
274 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
275 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
276 esp.plen = esp.clen - skb->len - esp.tfclen;
277 esp.tailen = esp.tfclen + esp.plen + alen;
279 esp.esph = ip_esp_hdr(skb);
282 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
283 esp.nfrags = esp_output_head(x, skb, &esp);
284 if (esp.nfrags < 0)
285 return esp.nfrags;
288 seq = xo->seq.low;
290 esph = esp.esph;
291 esph->spi = x->id.spi;
293 skb_push(skb, -skb_network_offset(skb));
295 if (xo->flags & XFRM_GSO_SEGMENT) {
296 esph->seq_no = htonl(seq);
298 if (!skb_is_gso(skb))
299 xo->seq.low++;
300 else
301 xo->seq.low += skb_shinfo(skb)->gso_segs;
304 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
306 ip_hdr(skb)->tot_len = htons(skb->len);
307 ip_send_check(ip_hdr(skb));
309 if (hw_offload)
310 return 0;
312 err = esp_output_tail(x, skb, &esp);
313 if (err)
314 return err;
316 secpath_reset(skb);
318 return 0;
321 static const struct net_offload esp4_offload = {
322 .callbacks = {
323 .gro_receive = esp4_gro_receive,
324 .gso_segment = esp4_gso_segment,
328 static const struct xfrm_type_offload esp_type_offload = {
329 .description = "ESP4 OFFLOAD",
330 .owner = THIS_MODULE,
331 .proto = IPPROTO_ESP,
332 .input_tail = esp_input_tail,
333 .xmit = esp_xmit,
334 .encap = esp4_gso_encap,
337 static int __init esp4_offload_init(void)
339 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
340 pr_info("%s: can't add xfrm type offload\n", __func__);
341 return -EAGAIN;
344 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
347 static void __exit esp4_offload_exit(void)
349 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
350 inet_del_offload(&esp4_offload, IPPROTO_ESP);
353 module_init(esp4_offload_init);
354 module_exit(esp4_offload_exit);
355 MODULE_LICENSE("GPL");
356 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
357 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);