2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * TCPv4 GSO/GRO support
13 #include <linux/skbuff.h>
15 #include <net/protocol.h>
17 struct sk_buff
*tcp_tso_segment(struct sk_buff
*skb
,
18 netdev_features_t features
)
20 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
27 struct sk_buff
*gso_skb
= skb
;
29 bool ooo_okay
, copy_destructor
;
31 if (!pskb_may_pull(skb
, sizeof(*th
)))
36 if (thlen
< sizeof(*th
))
39 if (!pskb_may_pull(skb
, thlen
))
42 oldlen
= (u16
)~skb
->len
;
43 __skb_pull(skb
, thlen
);
45 mss
= tcp_skb_mss(skb
);
46 if (unlikely(skb
->len
<= mss
))
49 if (skb_gso_ok(skb
, features
| NETIF_F_GSO_ROBUST
)) {
50 /* Packet is from an untrusted source, reset gso_segs. */
51 int type
= skb_shinfo(skb
)->gso_type
;
62 !(type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))))
65 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(skb
->len
, mss
);
71 copy_destructor
= gso_skb
->destructor
== tcp_wfree
;
72 ooo_okay
= gso_skb
->ooo_okay
;
73 /* All segments but the first should have ooo_okay cleared */
76 segs
= skb_segment(skb
, features
);
80 /* Only first segment might have ooo_okay set */
81 segs
->ooo_okay
= ooo_okay
;
83 delta
= htonl(oldlen
+ (thlen
+ mss
));
89 newcheck
= ~csum_fold((__force __wsum
)((__force u32
)th
->check
+
93 th
->fin
= th
->psh
= 0;
96 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
98 csum_fold(csum_partial(skb_transport_header(skb
),
102 if (copy_destructor
) {
103 skb
->destructor
= gso_skb
->destructor
;
104 skb
->sk
= gso_skb
->sk
;
105 /* {tcp|sock}_wfree() use exact truesize accounting :
106 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
107 * So we account mss bytes of 'true size' for each segment.
108 * The last segment will contain the remaining.
111 gso_skb
->truesize
-= mss
;
116 th
->seq
= htonl(seq
);
120 /* Following permits TCP Small Queues to work well with GSO :
121 * The callback to TCP stack will be called at the time last frag
122 * is freed at TX completion, and not right now when gso_skb
123 * is freed by GSO engine
125 if (copy_destructor
) {
126 swap(gso_skb
->sk
, skb
->sk
);
127 swap(gso_skb
->destructor
, skb
->destructor
);
128 swap(gso_skb
->truesize
, skb
->truesize
);
131 delta
= htonl(oldlen
+ (skb_tail_pointer(skb
) -
132 skb_transport_header(skb
)) +
134 th
->check
= ~csum_fold((__force __wsum
)((__force u32
)th
->check
+
135 (__force u32
)delta
));
136 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
137 th
->check
= csum_fold(csum_partial(skb_transport_header(skb
),
142 EXPORT_SYMBOL(tcp_tso_segment
);
144 struct sk_buff
**tcp_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
146 struct sk_buff
**pp
= NULL
;
153 unsigned int mss
= 1;
159 off
= skb_gro_offset(skb
);
160 hlen
= off
+ sizeof(*th
);
161 th
= skb_gro_header_fast(skb
, off
);
162 if (skb_gro_header_hard(skb
, hlen
)) {
163 th
= skb_gro_header_slow(skb
, hlen
, off
);
168 thlen
= th
->doff
* 4;
169 if (thlen
< sizeof(*th
))
173 if (skb_gro_header_hard(skb
, hlen
)) {
174 th
= skb_gro_header_slow(skb
, hlen
, off
);
179 skb_gro_pull(skb
, thlen
);
181 len
= skb_gro_len(skb
);
182 flags
= tcp_flag_word(th
);
184 for (; (p
= *head
); head
= &p
->next
) {
185 if (!NAPI_GRO_CB(p
)->same_flow
)
190 if (*(u32
*)&th
->source
^ *(u32
*)&th2
->source
) {
191 NAPI_GRO_CB(p
)->same_flow
= 0;
198 goto out_check_final
;
201 flush
= NAPI_GRO_CB(p
)->flush
;
202 flush
|= (__force
int)(flags
& TCP_FLAG_CWR
);
203 flush
|= (__force
int)((flags
^ tcp_flag_word(th2
)) &
204 ~(TCP_FLAG_CWR
| TCP_FLAG_FIN
| TCP_FLAG_PSH
));
205 flush
|= (__force
int)(th
->ack_seq
^ th2
->ack_seq
);
206 for (i
= sizeof(*th
); i
< thlen
; i
+= 4)
207 flush
|= *(u32
*)((u8
*)th
+ i
) ^
208 *(u32
*)((u8
*)th2
+ i
);
210 mss
= tcp_skb_mss(p
);
212 flush
|= (len
- 1) >= mss
;
213 flush
|= (ntohl(th2
->seq
) + skb_gro_len(p
)) ^ ntohl(th
->seq
);
215 if (flush
|| skb_gro_receive(head
, skb
)) {
217 goto out_check_final
;
222 tcp_flag_word(th2
) |= flags
& (TCP_FLAG_FIN
| TCP_FLAG_PSH
);
226 flush
|= (__force
int)(flags
& (TCP_FLAG_URG
| TCP_FLAG_PSH
|
227 TCP_FLAG_RST
| TCP_FLAG_SYN
|
230 if (p
&& (!NAPI_GRO_CB(skb
)->same_flow
|| flush
))
234 NAPI_GRO_CB(skb
)->flush
|= flush
;
238 EXPORT_SYMBOL(tcp_gro_receive
);
240 int tcp_gro_complete(struct sk_buff
*skb
)
242 struct tcphdr
*th
= tcp_hdr(skb
);
244 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
245 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
246 skb
->ip_summed
= CHECKSUM_PARTIAL
;
248 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
251 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TCP_ECN
;
255 EXPORT_SYMBOL(tcp_gro_complete
);
257 static int tcp_v4_gso_send_check(struct sk_buff
*skb
)
259 const struct iphdr
*iph
;
262 if (!pskb_may_pull(skb
, sizeof(*th
)))
269 skb
->ip_summed
= CHECKSUM_PARTIAL
;
270 __tcp_v4_send_check(skb
, iph
->saddr
, iph
->daddr
);
274 static struct sk_buff
**tcp4_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
276 const struct iphdr
*iph
= skb_gro_network_header(skb
);
280 switch (skb
->ip_summed
) {
281 case CHECKSUM_COMPLETE
:
282 if (!tcp_v4_check(skb_gro_len(skb
), iph
->saddr
, iph
->daddr
,
284 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
288 NAPI_GRO_CB(skb
)->flush
= 1;
292 wsum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
293 skb_gro_len(skb
), IPPROTO_TCP
, 0);
294 sum
= csum_fold(skb_checksum(skb
,
301 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
305 return tcp_gro_receive(head
, skb
);
308 static int tcp4_gro_complete(struct sk_buff
*skb
)
310 const struct iphdr
*iph
= ip_hdr(skb
);
311 struct tcphdr
*th
= tcp_hdr(skb
);
313 th
->check
= ~tcp_v4_check(skb
->len
- skb_transport_offset(skb
),
314 iph
->saddr
, iph
->daddr
, 0);
315 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
317 return tcp_gro_complete(skb
);
320 static const struct net_offload tcpv4_offload
= {
322 .gso_send_check
= tcp_v4_gso_send_check
,
323 .gso_segment
= tcp_tso_segment
,
324 .gro_receive
= tcp4_gro_receive
,
325 .gro_complete
= tcp4_gro_complete
,
329 int __init
tcpv4_offload_init(void)
331 return inet_add_offload(&tcpv4_offload
, IPPROTO_TCP
);