1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6 #include <linux/skbuff_ref.h>
10 /* This should be increased if a protocol with a bigger head is added. */
11 #define GRO_MAX_HEAD (MAX_HEADER + 128)
13 static DEFINE_SPINLOCK(offload_lock
);
16 * dev_add_offload - register offload handlers
17 * @po: protocol offload declaration
19 * Add protocol offload handlers to the networking stack. The passed
20 * &proto_offload is linked into kernel lists and may not be freed until
21 * it has been removed from the kernel lists.
23 * This call does not sleep therefore it can not
24 * guarantee all CPU's that are in middle of receiving packets
25 * will see the new offload handlers (until the next received packet).
27 void dev_add_offload(struct packet_offload
*po
)
29 struct packet_offload
*elem
;
31 spin_lock(&offload_lock
);
32 list_for_each_entry(elem
, &net_hotdata
.offload_base
, list
) {
33 if (po
->priority
< elem
->priority
)
36 list_add_rcu(&po
->list
, elem
->list
.prev
);
37 spin_unlock(&offload_lock
);
39 EXPORT_SYMBOL(dev_add_offload
);
42 * __dev_remove_offload - remove offload handler
43 * @po: packet offload declaration
45 * Remove a protocol offload handler that was previously added to the
46 * kernel offload handlers by dev_add_offload(). The passed &offload_type
47 * is removed from the kernel lists and can be freed or reused once this
50 * The packet type might still be in use by receivers
51 * and must not be freed until after all the CPU's have gone
52 * through a quiescent state.
54 static void __dev_remove_offload(struct packet_offload
*po
)
56 struct list_head
*head
= &net_hotdata
.offload_base
;
57 struct packet_offload
*po1
;
59 spin_lock(&offload_lock
);
61 list_for_each_entry(po1
, head
, list
) {
63 list_del_rcu(&po
->list
);
68 pr_warn("dev_remove_offload: %p not found\n", po
);
70 spin_unlock(&offload_lock
);
74 * dev_remove_offload - remove packet offload handler
75 * @po: packet offload declaration
77 * Remove a packet offload handler that was previously added to the kernel
78 * offload handlers by dev_add_offload(). The passed &offload_type is
79 * removed from the kernel lists and can be freed or reused once this
82 * This call sleeps to guarantee that no CPU is looking at the packet
85 void dev_remove_offload(struct packet_offload
*po
)
87 __dev_remove_offload(po
);
91 EXPORT_SYMBOL(dev_remove_offload
);
94 int skb_gro_receive(struct sk_buff
*p
, struct sk_buff
*skb
)
96 struct skb_shared_info
*pinfo
, *skbinfo
= skb_shinfo(skb
);
97 unsigned int offset
= skb_gro_offset(skb
);
98 unsigned int headlen
= skb_headlen(skb
);
99 unsigned int len
= skb_gro_len(skb
);
100 unsigned int delta_truesize
;
101 unsigned int new_truesize
;
105 /* Do not splice page pool based packets w/ non-page pool
106 * packets. This can result in reference count issues as page
107 * pool pages will not decrement the reference count and will
108 * instead be immediately returned to the pool or have frag
111 if (p
->pp_recycle
!= skb
->pp_recycle
)
112 return -ETOOMANYREFS
;
114 if (unlikely(p
->len
+ len
>= netif_get_gro_max_size(p
->dev
, p
) ||
115 NAPI_GRO_CB(skb
)->flush
))
118 if (unlikely(p
->len
+ len
>= GRO_LEGACY_MAX_SIZE
)) {
119 if (NAPI_GRO_CB(skb
)->proto
!= IPPROTO_TCP
||
120 (p
->protocol
== htons(ETH_P_IPV6
) &&
121 skb_headroom(p
) < sizeof(struct hop_jumbo_hdr
)) ||
126 segs
= NAPI_GRO_CB(skb
)->count
;
127 lp
= NAPI_GRO_CB(p
)->last
;
128 pinfo
= skb_shinfo(lp
);
130 if (headlen
<= offset
) {
133 int i
= skbinfo
->nr_frags
;
134 int nr_frags
= pinfo
->nr_frags
+ i
;
136 if (nr_frags
> MAX_SKB_FRAGS
)
140 pinfo
->nr_frags
= nr_frags
;
141 skbinfo
->nr_frags
= 0;
143 frag
= pinfo
->frags
+ nr_frags
;
144 frag2
= skbinfo
->frags
+ i
;
149 skb_frag_off_add(frag
, offset
);
150 skb_frag_size_sub(frag
, offset
);
152 /* all fragments truesize : remove (head size + sk_buff) */
153 new_truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
154 delta_truesize
= skb
->truesize
- new_truesize
;
156 skb
->truesize
= new_truesize
;
157 skb
->len
-= skb
->data_len
;
160 NAPI_GRO_CB(skb
)->free
= NAPI_GRO_FREE
;
162 } else if (skb
->head_frag
) {
163 int nr_frags
= pinfo
->nr_frags
;
164 skb_frag_t
*frag
= pinfo
->frags
+ nr_frags
;
165 struct page
*page
= virt_to_head_page(skb
->head
);
166 unsigned int first_size
= headlen
- offset
;
167 unsigned int first_offset
;
169 if (nr_frags
+ 1 + skbinfo
->nr_frags
> MAX_SKB_FRAGS
)
172 first_offset
= skb
->data
-
173 (unsigned char *)page_address(page
) +
176 pinfo
->nr_frags
= nr_frags
+ 1 + skbinfo
->nr_frags
;
178 skb_frag_fill_page_desc(frag
, page
, first_offset
, first_size
);
180 memcpy(frag
+ 1, skbinfo
->frags
, sizeof(*frag
) * skbinfo
->nr_frags
);
181 /* We dont need to clear skbinfo->nr_frags here */
183 new_truesize
= SKB_DATA_ALIGN(sizeof(struct sk_buff
));
184 delta_truesize
= skb
->truesize
- new_truesize
;
185 skb
->truesize
= new_truesize
;
186 NAPI_GRO_CB(skb
)->free
= NAPI_GRO_FREE_STOLEN_HEAD
;
191 /* sk ownership - if any - completely transferred to the aggregated packet */
192 skb
->destructor
= NULL
;
194 delta_truesize
= skb
->truesize
;
195 if (offset
> headlen
) {
196 unsigned int eat
= offset
- headlen
;
198 skb_frag_off_add(&skbinfo
->frags
[0], eat
);
199 skb_frag_size_sub(&skbinfo
->frags
[0], eat
);
200 skb
->data_len
-= eat
;
205 __skb_pull(skb
, offset
);
207 if (NAPI_GRO_CB(p
)->last
== p
)
208 skb_shinfo(p
)->frag_list
= skb
;
210 NAPI_GRO_CB(p
)->last
->next
= skb
;
211 NAPI_GRO_CB(p
)->last
= skb
;
212 __skb_header_release(skb
);
216 NAPI_GRO_CB(p
)->count
+= segs
;
218 p
->truesize
+= delta_truesize
;
222 lp
->truesize
+= delta_truesize
;
225 NAPI_GRO_CB(skb
)->same_flow
= 1;
229 int skb_gro_receive_list(struct sk_buff
*p
, struct sk_buff
*skb
)
231 if (unlikely(p
->len
+ skb
->len
>= 65536))
234 if (NAPI_GRO_CB(p
)->last
== p
)
235 skb_shinfo(p
)->frag_list
= skb
;
237 NAPI_GRO_CB(p
)->last
->next
= skb
;
239 skb_pull(skb
, skb_gro_offset(skb
));
241 NAPI_GRO_CB(p
)->last
= skb
;
242 NAPI_GRO_CB(p
)->count
++;
243 p
->data_len
+= skb
->len
;
245 /* sk ownership - if any - completely transferred to the aggregated packet */
246 skb
->destructor
= NULL
;
248 p
->truesize
+= skb
->truesize
;
251 NAPI_GRO_CB(skb
)->same_flow
= 1;
257 static void napi_gro_complete(struct napi_struct
*napi
, struct sk_buff
*skb
)
259 struct list_head
*head
= &net_hotdata
.offload_base
;
260 struct packet_offload
*ptype
;
261 __be16 type
= skb
->protocol
;
264 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
266 if (NAPI_GRO_CB(skb
)->count
== 1) {
267 skb_shinfo(skb
)->gso_size
= 0;
272 list_for_each_entry_rcu(ptype
, head
, list
) {
273 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
276 err
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_complete
,
277 ipv6_gro_complete
, inet_gro_complete
,
284 WARN_ON(&ptype
->list
== head
);
290 gro_normal_one(napi
, skb
, NAPI_GRO_CB(skb
)->count
);
293 static void __napi_gro_flush_chain(struct napi_struct
*napi
, u32 index
,
296 struct list_head
*head
= &napi
->gro_hash
[index
].list
;
297 struct sk_buff
*skb
, *p
;
299 list_for_each_entry_safe_reverse(skb
, p
, head
, list
) {
300 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
302 skb_list_del_init(skb
);
303 napi_gro_complete(napi
, skb
);
304 napi
->gro_hash
[index
].count
--;
307 if (!napi
->gro_hash
[index
].count
)
308 __clear_bit(index
, &napi
->gro_bitmask
);
311 /* napi->gro_hash[].list contains packets ordered by age.
312 * youngest packets at the head of it.
313 * Complete skbs in reverse order to reduce latencies.
315 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
317 unsigned long bitmask
= napi
->gro_bitmask
;
318 unsigned int i
, base
= ~0U;
320 while ((i
= ffs(bitmask
)) != 0) {
323 __napi_gro_flush_chain(napi
, base
, flush_old
);
326 EXPORT_SYMBOL(napi_gro_flush
);
328 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff
*skb
,
329 const struct sk_buff
*p
,
332 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
333 struct tc_skb_ext
*skb_ext
;
334 struct tc_skb_ext
*p_ext
;
336 skb_ext
= skb_ext_find(skb
, TC_SKB_EXT
);
337 p_ext
= skb_ext_find(p
, TC_SKB_EXT
);
339 diffs
|= (!!p_ext
) ^ (!!skb_ext
);
340 if (!diffs
&& unlikely(skb_ext
))
341 diffs
|= p_ext
->chain
^ skb_ext
->chain
;
346 static void gro_list_prepare(const struct list_head
*head
,
347 const struct sk_buff
*skb
)
349 unsigned int maclen
= skb
->dev
->hard_header_len
;
350 u32 hash
= skb_get_hash_raw(skb
);
353 list_for_each_entry(p
, head
, list
) {
356 if (hash
!= skb_get_hash_raw(p
)) {
357 NAPI_GRO_CB(p
)->same_flow
= 0;
361 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
362 diffs
|= p
->vlan_all
^ skb
->vlan_all
;
363 diffs
|= skb_metadata_differs(p
, skb
);
364 if (maclen
== ETH_HLEN
)
365 diffs
|= compare_ether_header(skb_mac_header(p
),
366 skb_mac_header(skb
));
368 diffs
= memcmp(skb_mac_header(p
),
372 /* in most common scenarios 'slow_gro' is 0
373 * otherwise we are already on some slower paths
374 * either skip all the infrequent tests altogether or
375 * avoid trying too hard to skip each of them individually
377 if (!diffs
&& unlikely(skb
->slow_gro
| p
->slow_gro
)) {
378 diffs
|= p
->sk
!= skb
->sk
;
379 diffs
|= skb_metadata_dst_cmp(p
, skb
);
380 diffs
|= skb_get_nfct(p
) ^ skb_get_nfct(skb
);
382 diffs
|= gro_list_prepare_tc_ext(skb
, p
, diffs
);
385 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
389 static inline void skb_gro_reset_offset(struct sk_buff
*skb
, u32 nhoff
)
391 const struct skb_shared_info
*pinfo
;
392 const skb_frag_t
*frag0
;
393 unsigned int headlen
;
395 NAPI_GRO_CB(skb
)->network_offset
= 0;
396 NAPI_GRO_CB(skb
)->data_offset
= 0;
397 headlen
= skb_headlen(skb
);
398 NAPI_GRO_CB(skb
)->frag0
= skb
->data
;
399 NAPI_GRO_CB(skb
)->frag0_len
= headlen
;
403 pinfo
= skb_shinfo(skb
);
404 frag0
= &pinfo
->frags
[0];
406 if (pinfo
->nr_frags
&& skb_frag_page(frag0
) &&
407 !PageHighMem(skb_frag_page(frag0
)) &&
408 (!NET_IP_ALIGN
|| !((skb_frag_off(frag0
) + nhoff
) & 3))) {
409 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
410 NAPI_GRO_CB(skb
)->frag0_len
= min_t(unsigned int,
411 skb_frag_size(frag0
),
412 skb
->end
- skb
->tail
);
416 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
418 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
420 BUG_ON(skb
->end
- skb
->tail
< grow
);
422 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
424 skb
->data_len
-= grow
;
427 skb_frag_off_add(&pinfo
->frags
[0], grow
);
428 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
430 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
431 skb_frag_unref(skb
, 0);
432 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
433 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
437 static void gro_try_pull_from_frag0(struct sk_buff
*skb
)
439 int grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
442 gro_pull_from_frag0(skb
, grow
);
445 static void gro_flush_oldest(struct napi_struct
*napi
, struct list_head
*head
)
447 struct sk_buff
*oldest
;
449 oldest
= list_last_entry(head
, struct sk_buff
, list
);
451 /* We are called with head length >= MAX_GRO_SKBS, so this is
454 if (WARN_ON_ONCE(!oldest
))
457 /* Do not adjust napi->gro_hash[].count, caller is adding a new
460 skb_list_del_init(oldest
);
461 napi_gro_complete(napi
, oldest
);
464 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
466 u32 bucket
= skb_get_hash_raw(skb
) & (GRO_HASH_BUCKETS
- 1);
467 struct gro_list
*gro_list
= &napi
->gro_hash
[bucket
];
468 struct list_head
*head
= &net_hotdata
.offload_base
;
469 struct packet_offload
*ptype
;
470 __be16 type
= skb
->protocol
;
471 struct sk_buff
*pp
= NULL
;
475 if (netif_elide_gro(skb
->dev
))
478 gro_list_prepare(&gro_list
->list
, skb
);
481 list_for_each_entry_rcu(ptype
, head
, list
) {
482 if (ptype
->type
== type
&& ptype
->callbacks
.gro_receive
)
489 skb_set_network_header(skb
, skb_gro_offset(skb
));
490 skb_reset_mac_len(skb
);
491 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb
, zeroed
) != sizeof(u32
));
492 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb
, zeroed
),
493 sizeof(u32
))); /* Avoid slow unaligned acc */
494 *(u32
*)&NAPI_GRO_CB(skb
)->zeroed
= 0;
495 NAPI_GRO_CB(skb
)->flush
= skb_has_frag_list(skb
);
496 NAPI_GRO_CB(skb
)->count
= 1;
497 if (unlikely(skb_is_gso(skb
))) {
498 NAPI_GRO_CB(skb
)->count
= skb_shinfo(skb
)->gso_segs
;
499 /* Only support TCP and non DODGY users. */
500 if (!skb_is_gso_tcp(skb
) ||
501 (skb_shinfo(skb
)->gso_type
& SKB_GSO_DODGY
))
502 NAPI_GRO_CB(skb
)->flush
= 1;
505 /* Setup for GRO checksum validation */
506 switch (skb
->ip_summed
) {
507 case CHECKSUM_COMPLETE
:
508 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
509 NAPI_GRO_CB(skb
)->csum_valid
= 1;
511 case CHECKSUM_UNNECESSARY
:
512 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
516 pp
= INDIRECT_CALL_INET(ptype
->callbacks
.gro_receive
,
517 ipv6_gro_receive
, inet_gro_receive
,
518 &gro_list
->list
, skb
);
522 if (PTR_ERR(pp
) == -EINPROGRESS
) {
527 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
528 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
531 skb_list_del_init(pp
);
532 napi_gro_complete(napi
, pp
);
539 if (NAPI_GRO_CB(skb
)->flush
)
542 if (unlikely(gro_list
->count
>= MAX_GRO_SKBS
))
543 gro_flush_oldest(napi
, &gro_list
->list
);
547 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
548 gro_try_pull_from_frag0(skb
);
549 NAPI_GRO_CB(skb
)->age
= jiffies
;
550 NAPI_GRO_CB(skb
)->last
= skb
;
551 if (!skb_is_gso(skb
))
552 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
553 list_add(&skb
->list
, &gro_list
->list
);
556 if (gro_list
->count
) {
557 if (!test_bit(bucket
, &napi
->gro_bitmask
))
558 __set_bit(bucket
, &napi
->gro_bitmask
);
559 } else if (test_bit(bucket
, &napi
->gro_bitmask
)) {
560 __clear_bit(bucket
, &napi
->gro_bitmask
);
567 gro_try_pull_from_frag0(skb
);
571 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
573 struct list_head
*offload_head
= &net_hotdata
.offload_base
;
574 struct packet_offload
*ptype
;
576 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
577 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
583 EXPORT_SYMBOL(gro_find_receive_by_type
);
585 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
587 struct list_head
*offload_head
= &net_hotdata
.offload_base
;
588 struct packet_offload
*ptype
;
590 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
591 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
597 EXPORT_SYMBOL(gro_find_complete_by_type
);
599 static gro_result_t
napi_skb_finish(struct napi_struct
*napi
,
605 gro_normal_one(napi
, skb
, 1);
608 case GRO_MERGED_FREE
:
609 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
610 napi_skb_free_stolen_head(skb
);
611 else if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)
614 __napi_kfree_skb(skb
, SKB_CONSUMED
);
626 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
630 skb_mark_napi_id(skb
, napi
);
631 trace_napi_gro_receive_entry(skb
);
633 skb_gro_reset_offset(skb
, 0);
635 ret
= napi_skb_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
636 trace_napi_gro_receive_exit(ret
);
640 EXPORT_SYMBOL(napi_gro_receive
);
642 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
644 if (unlikely(skb
->pfmemalloc
)) {
648 __skb_pull(skb
, skb_headlen(skb
));
649 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
650 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
651 __vlan_hwaccel_clear_tag(skb
);
652 skb
->dev
= napi
->dev
;
655 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
656 skb
->pkt_type
= PACKET_HOST
;
658 skb
->encapsulation
= 0;
659 skb_shinfo(skb
)->gso_type
= 0;
660 skb_shinfo(skb
)->gso_size
= 0;
661 if (unlikely(skb
->slow_gro
)) {
671 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
673 struct sk_buff
*skb
= napi
->skb
;
676 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
679 skb_mark_napi_id(skb
, napi
);
684 EXPORT_SYMBOL(napi_get_frags
);
686 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
693 __skb_push(skb
, ETH_HLEN
);
694 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
695 if (ret
== GRO_NORMAL
)
696 gro_normal_one(napi
, skb
, 1);
699 case GRO_MERGED_FREE
:
700 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
701 napi_skb_free_stolen_head(skb
);
703 napi_reuse_skb(napi
, skb
);
714 /* Upper GRO stack assumes network header starts at gro_offset=0
715 * Drivers could call both napi_gro_frags() and napi_gro_receive()
716 * We copy ethernet header into skb->data to have a common layout.
718 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
720 struct sk_buff
*skb
= napi
->skb
;
721 const struct ethhdr
*eth
;
722 unsigned int hlen
= sizeof(*eth
);
726 skb_reset_mac_header(skb
);
727 skb_gro_reset_offset(skb
, hlen
);
729 if (unlikely(!skb_gro_may_pull(skb
, hlen
))) {
730 eth
= skb_gro_header_slow(skb
, hlen
, 0);
731 if (unlikely(!eth
)) {
732 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
733 __func__
, napi
->dev
->name
);
734 napi_reuse_skb(napi
, skb
);
738 eth
= (const struct ethhdr
*)skb
->data
;
740 if (NAPI_GRO_CB(skb
)->frag0
!= skb
->data
)
741 gro_pull_from_frag0(skb
, hlen
);
743 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
744 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
746 __skb_pull(skb
, hlen
);
749 * This works because the only protocols we care about don't require
751 * We'll fix it up properly in napi_frags_finish()
753 skb
->protocol
= eth
->h_proto
;
758 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
761 struct sk_buff
*skb
= napi_frags_skb(napi
);
763 trace_napi_gro_frags_entry(skb
);
765 ret
= napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
766 trace_napi_gro_frags_exit(ret
);
770 EXPORT_SYMBOL(napi_gro_frags
);
772 /* Compute the checksum from gro_offset and return the folded value
773 * after adding in any pseudo checksum.
775 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
780 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
782 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
783 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
784 /* See comments in __skb_checksum_complete(). */
786 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
787 !skb
->csum_complete_sw
)
788 netdev_rx_csum_fault(skb
->dev
, skb
);
791 NAPI_GRO_CB(skb
)->csum
= wsum
;
792 NAPI_GRO_CB(skb
)->csum_valid
= 1;
796 EXPORT_SYMBOL(__skb_gro_checksum_complete
);