gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / net / ipv4 / tcp_offload.c
blob3f7c2fca5431891f473c9ace152a6b3e04443008
1 /*
2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * TCPv4 GSO/GRO support
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
17 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
18 unsigned int seq, unsigned int mss)
20 while (skb) {
21 if (before(ts_seq, seq + mss)) {
22 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
23 skb_shinfo(skb)->tskey = ts_seq;
24 return;
27 skb = skb->next;
28 seq += mss;
32 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33 netdev_features_t features)
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL);
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
40 struct tcphdr *th = tcp_hdr(skb);
42 /* Set up checksum pseudo header, usually expect stack to
43 * have done this already.
46 th->check = 0;
47 skb->ip_summed = CHECKSUM_PARTIAL;
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
51 return tcp_gso_segment(skb, features);
54 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
55 netdev_features_t features)
57 struct sk_buff *segs = ERR_PTR(-EINVAL);
58 unsigned int sum_truesize = 0;
59 struct tcphdr *th;
60 unsigned int thlen;
61 unsigned int seq;
62 __be32 delta;
63 unsigned int oldlen;
64 unsigned int mss;
65 struct sk_buff *gso_skb = skb;
66 __sum16 newcheck;
67 bool ooo_okay, copy_destructor;
69 th = tcp_hdr(skb);
70 thlen = th->doff * 4;
71 if (thlen < sizeof(*th))
72 goto out;
74 if (!pskb_may_pull(skb, thlen))
75 goto out;
77 oldlen = (u16)~skb->len;
78 __skb_pull(skb, thlen);
80 mss = tcp_skb_mss(skb);
81 if (unlikely(skb->len <= mss))
82 goto out;
84 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
85 /* Packet is from an untrusted source, reset gso_segs. */
86 int type = skb_shinfo(skb)->gso_type;
88 if (unlikely(type &
89 ~(SKB_GSO_TCPV4 |
90 SKB_GSO_DODGY |
91 SKB_GSO_TCP_ECN |
92 SKB_GSO_TCPV6 |
93 SKB_GSO_GRE |
94 SKB_GSO_GRE_CSUM |
95 SKB_GSO_IPIP |
96 SKB_GSO_SIT |
97 SKB_GSO_UDP_TUNNEL |
98 SKB_GSO_UDP_TUNNEL_CSUM |
99 SKB_GSO_TUNNEL_REMCSUM |
100 0) ||
101 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
102 goto out;
104 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
106 segs = NULL;
107 goto out;
110 copy_destructor = gso_skb->destructor == tcp_wfree;
111 ooo_okay = gso_skb->ooo_okay;
112 /* All segments but the first should have ooo_okay cleared */
113 skb->ooo_okay = 0;
115 segs = skb_segment(skb, features);
116 if (IS_ERR(segs))
117 goto out;
119 /* Only first segment might have ooo_okay set */
120 segs->ooo_okay = ooo_okay;
122 delta = htonl(oldlen + (thlen + mss));
124 skb = segs;
125 th = tcp_hdr(skb);
126 seq = ntohl(th->seq);
128 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
129 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
131 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
132 (__force u32)delta));
134 do {
135 th->fin = th->psh = 0;
136 th->check = newcheck;
138 if (skb->ip_summed != CHECKSUM_PARTIAL)
139 th->check = gso_make_checksum(skb, ~th->check);
141 seq += mss;
142 if (copy_destructor) {
143 skb->destructor = gso_skb->destructor;
144 skb->sk = gso_skb->sk;
145 sum_truesize += skb->truesize;
147 skb = skb->next;
148 th = tcp_hdr(skb);
150 th->seq = htonl(seq);
151 th->cwr = 0;
152 } while (skb->next);
154 /* Following permits TCP Small Queues to work well with GSO :
155 * The callback to TCP stack will be called at the time last frag
156 * is freed at TX completion, and not right now when gso_skb
157 * is freed by GSO engine
159 if (copy_destructor) {
160 swap(gso_skb->sk, skb->sk);
161 swap(gso_skb->destructor, skb->destructor);
162 sum_truesize += skb->truesize;
163 atomic_add(sum_truesize - gso_skb->truesize,
164 &skb->sk->sk_wmem_alloc);
167 delta = htonl(oldlen + (skb_tail_pointer(skb) -
168 skb_transport_header(skb)) +
169 skb->data_len);
170 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
171 (__force u32)delta));
172 if (skb->ip_summed != CHECKSUM_PARTIAL)
173 th->check = gso_make_checksum(skb, ~th->check);
174 out:
175 return segs;
178 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
180 struct sk_buff **pp = NULL;
181 struct sk_buff *p;
182 struct tcphdr *th;
183 struct tcphdr *th2;
184 unsigned int len;
185 unsigned int thlen;
186 __be32 flags;
187 unsigned int mss = 1;
188 unsigned int hlen;
189 unsigned int off;
190 int flush = 1;
191 int i;
193 off = skb_gro_offset(skb);
194 hlen = off + sizeof(*th);
195 th = skb_gro_header_fast(skb, off);
196 if (skb_gro_header_hard(skb, hlen)) {
197 th = skb_gro_header_slow(skb, hlen, off);
198 if (unlikely(!th))
199 goto out;
202 thlen = th->doff * 4;
203 if (thlen < sizeof(*th))
204 goto out;
206 hlen = off + thlen;
207 if (skb_gro_header_hard(skb, hlen)) {
208 th = skb_gro_header_slow(skb, hlen, off);
209 if (unlikely(!th))
210 goto out;
213 skb_gro_pull(skb, thlen);
215 len = skb_gro_len(skb);
216 flags = tcp_flag_word(th);
218 for (; (p = *head); head = &p->next) {
219 if (!NAPI_GRO_CB(p)->same_flow)
220 continue;
222 th2 = tcp_hdr(p);
224 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
225 NAPI_GRO_CB(p)->same_flow = 0;
226 continue;
229 goto found;
232 goto out_check_final;
234 found:
235 /* Include the IP ID check below from the inner most IP hdr */
236 flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
237 flush |= (__force int)(flags & TCP_FLAG_CWR);
238 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
239 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
240 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
241 for (i = sizeof(*th); i < thlen; i += 4)
242 flush |= *(u32 *)((u8 *)th + i) ^
243 *(u32 *)((u8 *)th2 + i);
245 mss = tcp_skb_mss(p);
247 flush |= (len - 1) >= mss;
248 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
250 if (flush || skb_gro_receive(head, skb)) {
251 mss = 1;
252 goto out_check_final;
255 p = *head;
256 th2 = tcp_hdr(p);
257 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
259 out_check_final:
260 flush = len < mss;
261 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
262 TCP_FLAG_RST | TCP_FLAG_SYN |
263 TCP_FLAG_FIN));
265 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
266 pp = head;
268 out:
269 NAPI_GRO_CB(skb)->flush |= (flush != 0);
271 return pp;
274 int tcp_gro_complete(struct sk_buff *skb)
276 struct tcphdr *th = tcp_hdr(skb);
278 skb->csum_start = (unsigned char *)th - skb->head;
279 skb->csum_offset = offsetof(struct tcphdr, check);
280 skb->ip_summed = CHECKSUM_PARTIAL;
282 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
284 if (th->cwr)
285 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
287 return 0;
289 EXPORT_SYMBOL(tcp_gro_complete);
291 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
293 /* Don't bother verifying checksum if we're going to flush anyway. */
294 if (!NAPI_GRO_CB(skb)->flush &&
295 skb_gro_checksum_validate(skb, IPPROTO_TCP,
296 inet_gro_compute_pseudo)) {
297 NAPI_GRO_CB(skb)->flush = 1;
298 return NULL;
301 return tcp_gro_receive(head, skb);
304 static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
306 const struct iphdr *iph = ip_hdr(skb);
307 struct tcphdr *th = tcp_hdr(skb);
309 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
310 iph->daddr, 0);
311 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
313 return tcp_gro_complete(skb);
316 static const struct net_offload tcpv4_offload = {
317 .callbacks = {
318 .gso_segment = tcp4_gso_segment,
319 .gro_receive = tcp4_gro_receive,
320 .gro_complete = tcp4_gro_complete,
324 int __init tcpv4_offload_init(void)
326 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);