drm: mxsfb: drm_dev_alloc() returns error pointers
[linux/fpc-iii.git] / net / ipv4 / gre_offload.c
blobd5cac99170b194151b16f614508f7fa0933ff2e1
1 /*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * GRE GSO support
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
15 #include <net/protocol.h>
16 #include <net/gre.h>
18 static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
19 netdev_features_t features)
21 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
22 struct sk_buff *segs = ERR_PTR(-EINVAL);
23 u16 mac_offset = skb->mac_header;
24 __be16 protocol = skb->protocol;
25 u16 mac_len = skb->mac_len;
26 int gre_offset, outer_hlen;
27 bool need_csum, ufo, gso_partial;
29 if (!skb->encapsulation)
30 goto out;
32 if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
33 goto out;
35 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
36 goto out;
38 /* setup inner skb. */
39 skb->encapsulation = 0;
40 SKB_GSO_CB(skb)->encap_level = 0;
41 __skb_pull(skb, tnl_hlen);
42 skb_reset_mac_header(skb);
43 skb_set_network_header(skb, skb_inner_network_offset(skb));
44 skb->mac_len = skb_inner_network_offset(skb);
45 skb->protocol = skb->inner_protocol;
47 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
48 skb->encap_hdr_csum = need_csum;
50 ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
52 features &= skb->dev->hw_enc_features;
54 /* The only checksum offload we care about from here on out is the
55 * outer one so strip the existing checksum feature flags based
56 * on the fact that we will be computing our checksum in software.
58 if (ufo) {
59 features &= ~NETIF_F_CSUM_MASK;
60 if (!need_csum)
61 features |= NETIF_F_HW_CSUM;
64 /* segment inner packet. */
65 segs = skb_mac_gso_segment(skb, features);
66 if (IS_ERR_OR_NULL(segs)) {
67 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
68 mac_len);
69 goto out;
72 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
74 outer_hlen = skb_tnl_header_len(skb);
75 gre_offset = outer_hlen - tnl_hlen;
76 skb = segs;
77 do {
78 struct gre_base_hdr *greh;
79 __sum16 *pcsum;
81 /* Set up inner headers if we are offloading inner checksum */
82 if (skb->ip_summed == CHECKSUM_PARTIAL) {
83 skb_reset_inner_headers(skb);
84 skb->encapsulation = 1;
87 skb->mac_len = mac_len;
88 skb->protocol = protocol;
90 __skb_push(skb, outer_hlen);
91 skb_reset_mac_header(skb);
92 skb_set_network_header(skb, mac_len);
93 skb_set_transport_header(skb, gre_offset);
95 if (!need_csum)
96 continue;
98 greh = (struct gre_base_hdr *)skb_transport_header(skb);
99 pcsum = (__sum16 *)(greh + 1);
101 if (gso_partial) {
102 unsigned int partial_adj;
104 /* Adjust checksum to account for the fact that
105 * the partial checksum is based on actual size
106 * whereas headers should be based on MSS size.
108 partial_adj = skb->len + skb_headroom(skb) -
109 SKB_GSO_CB(skb)->data_offset -
110 skb_shinfo(skb)->gso_size;
111 *pcsum = ~csum_fold((__force __wsum)htonl(partial_adj));
112 } else {
113 *pcsum = 0;
116 *(pcsum + 1) = 0;
117 *pcsum = gso_make_checksum(skb, 0);
118 } while ((skb = skb->next));
119 out:
120 return segs;
123 static struct sk_buff **gre_gro_receive(struct sk_buff **head,
124 struct sk_buff *skb)
126 struct sk_buff **pp = NULL;
127 struct sk_buff *p;
128 const struct gre_base_hdr *greh;
129 unsigned int hlen, grehlen;
130 unsigned int off;
131 int flush = 1;
132 struct packet_offload *ptype;
133 __be16 type;
135 if (NAPI_GRO_CB(skb)->encap_mark)
136 goto out;
138 NAPI_GRO_CB(skb)->encap_mark = 1;
140 off = skb_gro_offset(skb);
141 hlen = off + sizeof(*greh);
142 greh = skb_gro_header_fast(skb, off);
143 if (skb_gro_header_hard(skb, hlen)) {
144 greh = skb_gro_header_slow(skb, hlen, off);
145 if (unlikely(!greh))
146 goto out;
149 /* Only support version 0 and K (key), C (csum) flags. Note that
150 * although the support for the S (seq#) flag can be added easily
151 * for GRO, this is problematic for GSO hence can not be enabled
152 * here because a GRO pkt may end up in the forwarding path, thus
153 * requiring GSO support to break it up correctly.
155 if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
156 goto out;
158 /* We can only support GRE_CSUM if we can track the location of
159 * the GRE header. In the case of FOU/GUE we cannot because the
160 * outer UDP header displaces the GRE header leaving us in a state
161 * of limbo.
163 if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou)
164 goto out;
166 type = greh->protocol;
168 rcu_read_lock();
169 ptype = gro_find_receive_by_type(type);
170 if (!ptype)
171 goto out_unlock;
173 grehlen = GRE_HEADER_SECTION;
175 if (greh->flags & GRE_KEY)
176 grehlen += GRE_HEADER_SECTION;
178 if (greh->flags & GRE_CSUM)
179 grehlen += GRE_HEADER_SECTION;
181 hlen = off + grehlen;
182 if (skb_gro_header_hard(skb, hlen)) {
183 greh = skb_gro_header_slow(skb, hlen, off);
184 if (unlikely(!greh))
185 goto out_unlock;
188 /* Don't bother verifying checksum if we're going to flush anyway. */
189 if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
190 if (skb_gro_checksum_simple_validate(skb))
191 goto out_unlock;
193 skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0,
194 null_compute_pseudo);
197 for (p = *head; p; p = p->next) {
198 const struct gre_base_hdr *greh2;
200 if (!NAPI_GRO_CB(p)->same_flow)
201 continue;
203 /* The following checks are needed to ensure only pkts
204 * from the same tunnel are considered for aggregation.
205 * The criteria for "the same tunnel" includes:
206 * 1) same version (we only support version 0 here)
207 * 2) same protocol (we only support ETH_P_IP for now)
208 * 3) same set of flags
209 * 4) same key if the key field is present.
211 greh2 = (struct gre_base_hdr *)(p->data + off);
213 if (greh2->flags != greh->flags ||
214 greh2->protocol != greh->protocol) {
215 NAPI_GRO_CB(p)->same_flow = 0;
216 continue;
218 if (greh->flags & GRE_KEY) {
219 /* compare keys */
220 if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
221 NAPI_GRO_CB(p)->same_flow = 0;
222 continue;
227 skb_gro_pull(skb, grehlen);
229 /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
230 skb_gro_postpull_rcsum(skb, greh, grehlen);
232 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
233 flush = 0;
235 out_unlock:
236 rcu_read_unlock();
237 out:
238 NAPI_GRO_CB(skb)->flush |= flush;
240 return pp;
243 static int gre_gro_complete(struct sk_buff *skb, int nhoff)
245 struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
246 struct packet_offload *ptype;
247 unsigned int grehlen = sizeof(*greh);
248 int err = -ENOENT;
249 __be16 type;
251 skb->encapsulation = 1;
252 skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
254 type = greh->protocol;
255 if (greh->flags & GRE_KEY)
256 grehlen += GRE_HEADER_SECTION;
258 if (greh->flags & GRE_CSUM)
259 grehlen += GRE_HEADER_SECTION;
261 rcu_read_lock();
262 ptype = gro_find_complete_by_type(type);
263 if (ptype)
264 err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
266 rcu_read_unlock();
268 skb_set_inner_mac_header(skb, nhoff + grehlen);
270 return err;
273 static const struct net_offload gre_offload = {
274 .callbacks = {
275 .gso_segment = gre_gso_segment,
276 .gro_receive = gre_gro_receive,
277 .gro_complete = gre_gro_complete,
281 static int __init gre_offload_init(void)
283 int err;
285 err = inet_add_offload(&gre_offload, IPPROTO_GRE);
286 #if IS_ENABLED(CONFIG_IPV6)
287 if (err)
288 return err;
290 err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
291 if (err)
292 inet_del_offload(&gre_offload, IPPROTO_GRE);
293 #endif
295 return err;
297 device_initcall(gre_offload_init);