1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Routines having to do with the 'struct sk_buff' memory handlers.
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
9 * Alan Cox : Fixed the worst of the load
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
32 * The functions in this file will not compile correctly with gcc 2.4.x
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
41 #include <linux/interrupt.h>
43 #include <linux/inet.h>
44 #include <linux/slab.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
47 #include <linux/sctp.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/skbuff_ref.h>
55 #include <linux/splice.h>
56 #include <linux/cache.h>
57 #include <linux/rtnetlink.h>
58 #include <linux/init.h>
59 #include <linux/scatterlist.h>
60 #include <linux/errqueue.h>
61 #include <linux/prefetch.h>
62 #include <linux/bitfield.h>
63 #include <linux/if_vlan.h>
64 #include <linux/mpls.h>
65 #include <linux/kcov.h>
66 #include <linux/iov_iter.h>
68 #include <net/protocol.h>
71 #include <net/checksum.h>
73 #include <net/hotdata.h>
74 #include <net/ip6_checksum.h>
77 #include <net/mptcp.h>
79 #include <net/page_pool/helpers.h>
80 #include <net/dropreason.h>
82 #include <linux/uaccess.h>
83 #include <trace/events/skb.h>
84 #include <linux/highmem.h>
85 #include <linux/capability.h>
86 #include <linux/user_namespace.h>
87 #include <linux/indirect_call_wrapper.h>
88 #include <linux/textsearch.h>
91 #include "netmem_priv.h"
92 #include "sock_destructor.h"
94 #ifdef CONFIG_SKB_EXTENSIONS
95 static struct kmem_cache
*skbuff_ext_cache __ro_after_init
;
98 #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
100 /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
101 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
102 * size, and we can differentiate heads from skb_small_head_cache
103 * vs system slabs by looking at their size (skb_end_offset()).
105 #define SKB_SMALL_HEAD_CACHE_SIZE \
106 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \
107 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \
110 #define SKB_SMALL_HEAD_HEADROOM \
111 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
113 /* kcm_write_msgs() relies on casting paged frags to bio_vec to use
114 * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the
117 static_assert(offsetof(struct bio_vec
, bv_page
) ==
118 offsetof(skb_frag_t
, netmem
));
119 static_assert(sizeof_field(struct bio_vec
, bv_page
) ==
120 sizeof_field(skb_frag_t
, netmem
));
122 static_assert(offsetof(struct bio_vec
, bv_len
) == offsetof(skb_frag_t
, len
));
123 static_assert(sizeof_field(struct bio_vec
, bv_len
) ==
124 sizeof_field(skb_frag_t
, len
));
126 static_assert(offsetof(struct bio_vec
, bv_offset
) ==
127 offsetof(skb_frag_t
, offset
));
128 static_assert(sizeof_field(struct bio_vec
, bv_offset
) ==
129 sizeof_field(skb_frag_t
, offset
));
132 #define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
133 static const char * const drop_reasons
[] = {
134 [SKB_CONSUMED
] = "CONSUMED",
135 DEFINE_DROP_REASON(FN
, FN
)
138 static const struct drop_reason_list drop_reasons_core
= {
139 .reasons
= drop_reasons
,
140 .n_reasons
= ARRAY_SIZE(drop_reasons
),
143 const struct drop_reason_list __rcu
*
144 drop_reasons_by_subsys
[SKB_DROP_REASON_SUBSYS_NUM
] = {
145 [SKB_DROP_REASON_SUBSYS_CORE
] = RCU_INITIALIZER(&drop_reasons_core
),
147 EXPORT_SYMBOL(drop_reasons_by_subsys
);
150 * drop_reasons_register_subsys - register another drop reason subsystem
151 * @subsys: the subsystem to register, must not be the core
152 * @list: the list of drop reasons within the subsystem, must point to
153 * a statically initialized list
155 void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys
,
156 const struct drop_reason_list
*list
)
158 if (WARN(subsys
<= SKB_DROP_REASON_SUBSYS_CORE
||
159 subsys
>= ARRAY_SIZE(drop_reasons_by_subsys
),
160 "invalid subsystem %d\n", subsys
))
163 /* must point to statically allocated memory, so INIT is OK */
164 RCU_INIT_POINTER(drop_reasons_by_subsys
[subsys
], list
);
166 EXPORT_SYMBOL_GPL(drop_reasons_register_subsys
);
169 * drop_reasons_unregister_subsys - unregister a drop reason subsystem
170 * @subsys: the subsystem to remove, must not be the core
172 * Note: This will synchronize_rcu() to ensure no users when it returns.
174 void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys
)
176 if (WARN(subsys
<= SKB_DROP_REASON_SUBSYS_CORE
||
177 subsys
>= ARRAY_SIZE(drop_reasons_by_subsys
),
178 "invalid subsystem %d\n", subsys
))
181 RCU_INIT_POINTER(drop_reasons_by_subsys
[subsys
], NULL
);
185 EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys
);
188 * skb_panic - private function for out-of-line support
192 * @msg: skb_over_panic or skb_under_panic
194 * Out-of-line support for skb_put() and skb_push().
195 * Called via the wrapper skb_over_panic() or skb_under_panic().
196 * Keep out of line to prevent kernel bloat.
197 * __builtin_return_address is not used because it is not always reliable.
199 static void skb_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
,
202 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
203 msg
, addr
, skb
->len
, sz
, skb
->head
, skb
->data
,
204 (unsigned long)skb
->tail
, (unsigned long)skb
->end
,
205 skb
->dev
? skb
->dev
->name
: "<NULL>");
209 static void skb_over_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
)
211 skb_panic(skb
, sz
, addr
, __func__
);
214 static void skb_under_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
)
216 skb_panic(skb
, sz
, addr
, __func__
);
219 #define NAPI_SKB_CACHE_SIZE 64
220 #define NAPI_SKB_CACHE_BULK 16
221 #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
223 #if PAGE_SIZE == SZ_4K
225 #define NAPI_HAS_SMALL_PAGE_FRAG 1
226 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
228 /* specialized page frag allocator using a single order 0 page
229 * and slicing it into 1K sized fragment. Constrained to systems
230 * with a very limited amount of 1K fragments fitting a single
231 * page - to avoid excessive truesize underestimation
234 struct page_frag_1k
{
240 static void *page_frag_alloc_1k(struct page_frag_1k
*nc
, gfp_t gfp
)
245 offset
= nc
->offset
- SZ_1K
;
246 if (likely(offset
>= 0))
249 page
= alloc_pages_node(NUMA_NO_NODE
, gfp
, 0);
253 nc
->va
= page_address(page
);
254 nc
->pfmemalloc
= page_is_pfmemalloc(page
);
255 offset
= PAGE_SIZE
- SZ_1K
;
256 page_ref_add(page
, offset
/ SZ_1K
);
260 return nc
->va
+ offset
;
264 /* the small page is actually unused in this build; add dummy helpers
265 * to please the compiler and avoid later preprocessor's conditionals
267 #define NAPI_HAS_SMALL_PAGE_FRAG 0
268 #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
270 struct page_frag_1k
{
273 static void *page_frag_alloc_1k(struct page_frag_1k
*nc
, gfp_t gfp_mask
)
280 struct napi_alloc_cache
{
281 local_lock_t bh_lock
;
282 struct page_frag_cache page
;
283 struct page_frag_1k page_small
;
284 unsigned int skb_count
;
285 void *skb_cache
[NAPI_SKB_CACHE_SIZE
];
288 static DEFINE_PER_CPU(struct page_frag_cache
, netdev_alloc_cache
);
289 static DEFINE_PER_CPU(struct napi_alloc_cache
, napi_alloc_cache
) = {
290 .bh_lock
= INIT_LOCAL_LOCK(bh_lock
),
293 /* Double check that napi_get_frags() allocates skbs with
294 * skb->head being backed by slab, not a page fragment.
295 * This is to make sure bug fixed in 3226b158e67c
296 * ("net: avoid 32 x truesize under-estimation for tiny skbs")
297 * does not accidentally come back.
299 void napi_get_frags_check(struct napi_struct
*napi
)
304 skb
= napi_get_frags(napi
);
305 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG
&& skb
&& skb
->head_frag
);
306 napi_free_frags(napi
);
310 void *__napi_alloc_frag_align(unsigned int fragsz
, unsigned int align_mask
)
312 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
315 fragsz
= SKB_DATA_ALIGN(fragsz
);
317 local_lock_nested_bh(&napi_alloc_cache
.bh_lock
);
318 data
= __page_frag_alloc_align(&nc
->page
, fragsz
,
319 GFP_ATOMIC
| __GFP_NOWARN
, align_mask
);
320 local_unlock_nested_bh(&napi_alloc_cache
.bh_lock
);
324 EXPORT_SYMBOL(__napi_alloc_frag_align
);
326 void *__netdev_alloc_frag_align(unsigned int fragsz
, unsigned int align_mask
)
330 if (in_hardirq() || irqs_disabled()) {
331 struct page_frag_cache
*nc
= this_cpu_ptr(&netdev_alloc_cache
);
333 fragsz
= SKB_DATA_ALIGN(fragsz
);
334 data
= __page_frag_alloc_align(nc
, fragsz
,
335 GFP_ATOMIC
| __GFP_NOWARN
,
339 data
= __napi_alloc_frag_align(fragsz
, align_mask
);
344 EXPORT_SYMBOL(__netdev_alloc_frag_align
);
346 static struct sk_buff
*napi_skb_cache_get(void)
348 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
351 local_lock_nested_bh(&napi_alloc_cache
.bh_lock
);
352 if (unlikely(!nc
->skb_count
)) {
353 nc
->skb_count
= kmem_cache_alloc_bulk(net_hotdata
.skbuff_cache
,
354 GFP_ATOMIC
| __GFP_NOWARN
,
357 if (unlikely(!nc
->skb_count
)) {
358 local_unlock_nested_bh(&napi_alloc_cache
.bh_lock
);
363 skb
= nc
->skb_cache
[--nc
->skb_count
];
364 local_unlock_nested_bh(&napi_alloc_cache
.bh_lock
);
365 kasan_mempool_unpoison_object(skb
, kmem_cache_size(net_hotdata
.skbuff_cache
));
370 static inline void __finalize_skb_around(struct sk_buff
*skb
, void *data
,
373 struct skb_shared_info
*shinfo
;
375 size
-= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
377 /* Assumes caller memset cleared SKB */
378 skb
->truesize
= SKB_TRUESIZE(size
);
379 refcount_set(&skb
->users
, 1);
382 skb_reset_tail_pointer(skb
);
383 skb_set_end_offset(skb
, size
);
384 skb
->mac_header
= (typeof(skb
->mac_header
))~0U;
385 skb
->transport_header
= (typeof(skb
->transport_header
))~0U;
386 skb
->alloc_cpu
= raw_smp_processor_id();
387 /* make sure we initialize shinfo sequentially */
388 shinfo
= skb_shinfo(skb
);
389 memset(shinfo
, 0, offsetof(struct skb_shared_info
, dataref
));
390 atomic_set(&shinfo
->dataref
, 1);
392 skb_set_kcov_handle(skb
, kcov_common_handle());
395 static inline void *__slab_build_skb(struct sk_buff
*skb
, void *data
,
400 /* Must find the allocation size (and grow it to match). */
402 /* krealloc() will immediately return "data" when
403 * "ksize(data)" is requested: it is the existing upper
404 * bounds. As a result, GFP_ATOMIC will be ignored. Note
405 * that this "new" pointer needs to be passed back to the
406 * caller for use so the __alloc_size hinting will be
409 resized
= krealloc(data
, *size
, GFP_ATOMIC
);
410 WARN_ON_ONCE(resized
!= data
);
414 /* build_skb() variant which can operate on slab buffers.
415 * Note that this should be used sparingly as slab buffers
416 * cannot be combined efficiently by GRO!
418 struct sk_buff
*slab_build_skb(void *data
)
423 skb
= kmem_cache_alloc(net_hotdata
.skbuff_cache
,
424 GFP_ATOMIC
| __GFP_NOWARN
);
428 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
429 data
= __slab_build_skb(skb
, data
, &size
);
430 __finalize_skb_around(skb
, data
, size
);
434 EXPORT_SYMBOL(slab_build_skb
);
436 /* Caller must provide SKB that is memset cleared */
437 static void __build_skb_around(struct sk_buff
*skb
, void *data
,
438 unsigned int frag_size
)
440 unsigned int size
= frag_size
;
442 /* frag_size == 0 is considered deprecated now. Callers
443 * using slab buffer should use slab_build_skb() instead.
445 if (WARN_ONCE(size
== 0, "Use slab_build_skb() instead"))
446 data
= __slab_build_skb(skb
, data
, &size
);
448 __finalize_skb_around(skb
, data
, size
);
452 * __build_skb - build a network buffer
453 * @data: data buffer provided by caller
454 * @frag_size: size of data (must not be 0)
456 * Allocate a new &sk_buff. Caller provides space holding head and
457 * skb_shared_info. @data must have been allocated from the page
458 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc()
459 * allocation is deprecated, and callers should use slab_build_skb()
461 * The return is the new skb buffer.
462 * On a failure the return is %NULL, and @data is not freed.
464 * Before IO, driver allocates only data buffer where NIC put incoming frame
465 * Driver should add room at head (NET_SKB_PAD) and
466 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
467 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
468 * before giving packet to stack.
469 * RX rings only contains data buffers, not full skbs.
471 struct sk_buff
*__build_skb(void *data
, unsigned int frag_size
)
475 skb
= kmem_cache_alloc(net_hotdata
.skbuff_cache
,
476 GFP_ATOMIC
| __GFP_NOWARN
);
480 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
481 __build_skb_around(skb
, data
, frag_size
);
486 /* build_skb() is wrapper over __build_skb(), that specifically
487 * takes care of skb->head and skb->pfmemalloc
489 struct sk_buff
*build_skb(void *data
, unsigned int frag_size
)
491 struct sk_buff
*skb
= __build_skb(data
, frag_size
);
493 if (likely(skb
&& frag_size
)) {
495 skb_propagate_pfmemalloc(virt_to_head_page(data
), skb
);
499 EXPORT_SYMBOL(build_skb
);
502 * build_skb_around - build a network buffer around provided skb
503 * @skb: sk_buff provide by caller, must be memset cleared
504 * @data: data buffer provided by caller
505 * @frag_size: size of data
507 struct sk_buff
*build_skb_around(struct sk_buff
*skb
,
508 void *data
, unsigned int frag_size
)
513 __build_skb_around(skb
, data
, frag_size
);
517 skb_propagate_pfmemalloc(virt_to_head_page(data
), skb
);
521 EXPORT_SYMBOL(build_skb_around
);
524 * __napi_build_skb - build a network buffer
525 * @data: data buffer provided by caller
526 * @frag_size: size of data
528 * Version of __build_skb() that uses NAPI percpu caches to obtain
529 * skbuff_head instead of inplace allocation.
531 * Returns a new &sk_buff on success, %NULL on allocation failure.
533 static struct sk_buff
*__napi_build_skb(void *data
, unsigned int frag_size
)
537 skb
= napi_skb_cache_get();
541 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
542 __build_skb_around(skb
, data
, frag_size
);
548 * napi_build_skb - build a network buffer
549 * @data: data buffer provided by caller
550 * @frag_size: size of data
552 * Version of __napi_build_skb() that takes care of skb->head_frag
553 * and skb->pfmemalloc when the data is a page or page fragment.
555 * Returns a new &sk_buff on success, %NULL on allocation failure.
557 struct sk_buff
*napi_build_skb(void *data
, unsigned int frag_size
)
559 struct sk_buff
*skb
= __napi_build_skb(data
, frag_size
);
561 if (likely(skb
) && frag_size
) {
563 skb_propagate_pfmemalloc(virt_to_head_page(data
), skb
);
568 EXPORT_SYMBOL(napi_build_skb
);
571 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
572 * the caller if emergency pfmemalloc reserves are being used. If it is and
573 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
574 * may be used. Otherwise, the packet data may be discarded until enough
577 static void *kmalloc_reserve(unsigned int *size
, gfp_t flags
, int node
,
580 bool ret_pfmemalloc
= false;
584 obj_size
= SKB_HEAD_ALIGN(*size
);
585 if (obj_size
<= SKB_SMALL_HEAD_CACHE_SIZE
&&
586 !(flags
& KMALLOC_NOT_NORMAL_BITS
)) {
587 obj
= kmem_cache_alloc_node(net_hotdata
.skb_small_head_cache
,
588 flags
| __GFP_NOMEMALLOC
| __GFP_NOWARN
,
590 *size
= SKB_SMALL_HEAD_CACHE_SIZE
;
591 if (obj
|| !(gfp_pfmemalloc_allowed(flags
)))
593 /* Try again but now we are using pfmemalloc reserves */
594 ret_pfmemalloc
= true;
595 obj
= kmem_cache_alloc_node(net_hotdata
.skb_small_head_cache
, flags
, node
);
599 obj_size
= kmalloc_size_roundup(obj_size
);
600 /* The following cast might truncate high-order bits of obj_size, this
601 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
603 *size
= (unsigned int)obj_size
;
606 * Try a regular allocation, when that fails and we're not entitled
607 * to the reserves, fail.
609 obj
= kmalloc_node_track_caller(obj_size
,
610 flags
| __GFP_NOMEMALLOC
| __GFP_NOWARN
,
612 if (obj
|| !(gfp_pfmemalloc_allowed(flags
)))
615 /* Try again but now we are using pfmemalloc reserves */
616 ret_pfmemalloc
= true;
617 obj
= kmalloc_node_track_caller(obj_size
, flags
, node
);
621 *pfmemalloc
= ret_pfmemalloc
;
626 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
627 * 'private' fields and also do memory statistics to find all the
633 * __alloc_skb - allocate a network buffer
634 * @size: size to allocate
635 * @gfp_mask: allocation mask
636 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
637 * instead of head cache and allocate a cloned (child) skb.
638 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
639 * allocations in case the data is required for writeback
640 * @node: numa node to allocate memory on
642 * Allocate a new &sk_buff. The returned buffer has no headroom and a
643 * tail room of at least size bytes. The object has a reference count
644 * of one. The return is the buffer. On a failure the return is %NULL.
646 * Buffers may only be allocated from interrupts using a @gfp_mask of
649 struct sk_buff
*__alloc_skb(unsigned int size
, gfp_t gfp_mask
,
652 struct kmem_cache
*cache
;
657 cache
= (flags
& SKB_ALLOC_FCLONE
)
658 ? net_hotdata
.skbuff_fclone_cache
: net_hotdata
.skbuff_cache
;
660 if (sk_memalloc_socks() && (flags
& SKB_ALLOC_RX
))
661 gfp_mask
|= __GFP_MEMALLOC
;
664 if ((flags
& (SKB_ALLOC_FCLONE
| SKB_ALLOC_NAPI
)) == SKB_ALLOC_NAPI
&&
665 likely(node
== NUMA_NO_NODE
|| node
== numa_mem_id()))
666 skb
= napi_skb_cache_get();
668 skb
= kmem_cache_alloc_node(cache
, gfp_mask
& ~GFP_DMA
, node
);
673 /* We do our best to align skb_shared_info on a separate cache
674 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
675 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
676 * Both skb->head and skb_shared_info are cache line aligned.
678 data
= kmalloc_reserve(&size
, gfp_mask
, node
, &pfmemalloc
);
681 /* kmalloc_size_roundup() might give us more room than requested.
682 * Put skb_shared_info exactly at the end of allocated zone,
683 * to allow max possible filling before reallocation.
685 prefetchw(data
+ SKB_WITH_OVERHEAD(size
));
688 * Only clear those fields we need to clear, not those that we will
689 * actually initialise below. Hence, don't put any more fields after
690 * the tail pointer in struct sk_buff!
692 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
693 __build_skb_around(skb
, data
, size
);
694 skb
->pfmemalloc
= pfmemalloc
;
696 if (flags
& SKB_ALLOC_FCLONE
) {
697 struct sk_buff_fclones
*fclones
;
699 fclones
= container_of(skb
, struct sk_buff_fclones
, skb1
);
701 skb
->fclone
= SKB_FCLONE_ORIG
;
702 refcount_set(&fclones
->fclone_ref
, 1);
708 kmem_cache_free(cache
, skb
);
711 EXPORT_SYMBOL(__alloc_skb
);
714 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
715 * @dev: network device to receive on
716 * @len: length to allocate
717 * @gfp_mask: get_free_pages mask, passed to alloc_skb
719 * Allocate a new &sk_buff and assign it a usage count of one. The
720 * buffer has NET_SKB_PAD headroom built in. Users should allocate
721 * the headroom they think they need without accounting for the
722 * built in space. The built in space is used for optimisations.
724 * %NULL is returned if there is no free memory.
726 struct sk_buff
*__netdev_alloc_skb(struct net_device
*dev
, unsigned int len
,
729 struct page_frag_cache
*nc
;
736 /* If requested length is either too small or too big,
737 * we use kmalloc() for skb->head allocation.
739 if (len
<= SKB_WITH_OVERHEAD(1024) ||
740 len
> SKB_WITH_OVERHEAD(PAGE_SIZE
) ||
741 (gfp_mask
& (__GFP_DIRECT_RECLAIM
| GFP_DMA
))) {
742 skb
= __alloc_skb(len
, gfp_mask
, SKB_ALLOC_RX
, NUMA_NO_NODE
);
748 len
= SKB_HEAD_ALIGN(len
);
750 if (sk_memalloc_socks())
751 gfp_mask
|= __GFP_MEMALLOC
;
753 if (in_hardirq() || irqs_disabled()) {
754 nc
= this_cpu_ptr(&netdev_alloc_cache
);
755 data
= page_frag_alloc(nc
, len
, gfp_mask
);
756 pfmemalloc
= page_frag_cache_is_pfmemalloc(nc
);
759 local_lock_nested_bh(&napi_alloc_cache
.bh_lock
);
761 nc
= this_cpu_ptr(&napi_alloc_cache
.page
);
762 data
= page_frag_alloc(nc
, len
, gfp_mask
);
763 pfmemalloc
= page_frag_cache_is_pfmemalloc(nc
);
765 local_unlock_nested_bh(&napi_alloc_cache
.bh_lock
);
772 skb
= __build_skb(data
, len
);
773 if (unlikely(!skb
)) {
783 skb_reserve(skb
, NET_SKB_PAD
);
789 EXPORT_SYMBOL(__netdev_alloc_skb
);
792 * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
793 * @napi: napi instance this buffer was allocated for
794 * @len: length to allocate
796 * Allocate a new sk_buff for use in NAPI receive. This buffer will
797 * attempt to allocate the head from a special reserved region used
798 * only for NAPI Rx allocation. By doing this we can save several
799 * CPU cycles by avoiding having to disable and re-enable IRQs.
801 * %NULL is returned if there is no free memory.
803 struct sk_buff
*napi_alloc_skb(struct napi_struct
*napi
, unsigned int len
)
805 gfp_t gfp_mask
= GFP_ATOMIC
| __GFP_NOWARN
;
806 struct napi_alloc_cache
*nc
;
811 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
812 len
+= NET_SKB_PAD
+ NET_IP_ALIGN
;
814 /* If requested length is either too small or too big,
815 * we use kmalloc() for skb->head allocation.
816 * When the small frag allocator is available, prefer it over kmalloc
817 * for small fragments
819 if ((!NAPI_HAS_SMALL_PAGE_FRAG
&& len
<= SKB_WITH_OVERHEAD(1024)) ||
820 len
> SKB_WITH_OVERHEAD(PAGE_SIZE
) ||
821 (gfp_mask
& (__GFP_DIRECT_RECLAIM
| GFP_DMA
))) {
822 skb
= __alloc_skb(len
, gfp_mask
, SKB_ALLOC_RX
| SKB_ALLOC_NAPI
,
829 if (sk_memalloc_socks())
830 gfp_mask
|= __GFP_MEMALLOC
;
832 local_lock_nested_bh(&napi_alloc_cache
.bh_lock
);
833 nc
= this_cpu_ptr(&napi_alloc_cache
);
834 if (NAPI_HAS_SMALL_PAGE_FRAG
&& len
<= SKB_WITH_OVERHEAD(1024)) {
835 /* we are artificially inflating the allocation size, but
836 * that is not as bad as it may look like, as:
837 * - 'len' less than GRO_MAX_HEAD makes little sense
838 * - On most systems, larger 'len' values lead to fragment
839 * size above 512 bytes
840 * - kmalloc would use the kmalloc-1k slab for such values
841 * - Builds with smaller GRO_MAX_HEAD will very likely do
842 * little networking, as that implies no WiFi and no
843 * tunnels support, and 32 bits arches.
847 data
= page_frag_alloc_1k(&nc
->page_small
, gfp_mask
);
848 pfmemalloc
= NAPI_SMALL_PAGE_PFMEMALLOC(nc
->page_small
);
850 len
= SKB_HEAD_ALIGN(len
);
852 data
= page_frag_alloc(&nc
->page
, len
, gfp_mask
);
853 pfmemalloc
= page_frag_cache_is_pfmemalloc(&nc
->page
);
855 local_unlock_nested_bh(&napi_alloc_cache
.bh_lock
);
860 skb
= __napi_build_skb(data
, len
);
861 if (unlikely(!skb
)) {
871 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
872 skb
->dev
= napi
->dev
;
877 EXPORT_SYMBOL(napi_alloc_skb
);
879 void skb_add_rx_frag_netmem(struct sk_buff
*skb
, int i
, netmem_ref netmem
,
880 int off
, int size
, unsigned int truesize
)
882 DEBUG_NET_WARN_ON_ONCE(size
> truesize
);
884 skb_fill_netmem_desc(skb
, i
, netmem
, off
, size
);
886 skb
->data_len
+= size
;
887 skb
->truesize
+= truesize
;
889 EXPORT_SYMBOL(skb_add_rx_frag_netmem
);
891 void skb_coalesce_rx_frag(struct sk_buff
*skb
, int i
, int size
,
892 unsigned int truesize
)
894 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
896 DEBUG_NET_WARN_ON_ONCE(size
> truesize
);
898 skb_frag_size_add(frag
, size
);
900 skb
->data_len
+= size
;
901 skb
->truesize
+= truesize
;
903 EXPORT_SYMBOL(skb_coalesce_rx_frag
);
905 static void skb_drop_list(struct sk_buff
**listp
)
907 kfree_skb_list(*listp
);
911 static inline void skb_drop_fraglist(struct sk_buff
*skb
)
913 skb_drop_list(&skb_shinfo(skb
)->frag_list
);
916 static void skb_clone_fraglist(struct sk_buff
*skb
)
918 struct sk_buff
*list
;
920 skb_walk_frags(skb
, list
)
924 static bool is_pp_netmem(netmem_ref netmem
)
926 return (netmem_get_pp_magic(netmem
) & ~0x3UL
) == PP_SIGNATURE
;
929 int skb_pp_cow_data(struct page_pool
*pool
, struct sk_buff
**pskb
,
930 unsigned int headroom
)
932 #if IS_ENABLED(CONFIG_PAGE_POOL)
933 u32 size
, truesize
, len
, max_head_size
, off
;
934 struct sk_buff
*skb
= *pskb
, *nskb
;
935 int err
, i
, head_off
;
938 /* XDP does not support fraglist so we need to linearize
941 if (skb_has_frag_list(skb
))
944 max_head_size
= SKB_WITH_OVERHEAD(PAGE_SIZE
- headroom
);
945 if (skb
->len
> max_head_size
+ MAX_SKB_FRAGS
* PAGE_SIZE
)
948 size
= min_t(u32
, skb
->len
, max_head_size
);
949 truesize
= SKB_HEAD_ALIGN(size
) + headroom
;
950 data
= page_pool_dev_alloc_va(pool
, &truesize
);
954 nskb
= napi_build_skb(data
, truesize
);
956 page_pool_free_va(pool
, data
, true);
960 skb_reserve(nskb
, headroom
);
961 skb_copy_header(nskb
, skb
);
962 skb_mark_for_recycle(nskb
);
964 err
= skb_copy_bits(skb
, 0, nskb
->data
, size
);
971 head_off
= skb_headroom(nskb
) - skb_headroom(skb
);
972 skb_headers_offset_update(nskb
, head_off
);
975 len
= skb
->len
- off
;
976 for (i
= 0; i
< MAX_SKB_FRAGS
&& off
< skb
->len
; i
++) {
980 size
= min_t(u32
, len
, PAGE_SIZE
);
983 page
= page_pool_dev_alloc(pool
, &page_off
, &truesize
);
989 skb_add_rx_frag(nskb
, i
, page
, page_off
, size
, truesize
);
990 err
= skb_copy_bits(skb
, off
, page_address(page
) + page_off
,
1009 EXPORT_SYMBOL(skb_pp_cow_data
);
1011 int skb_cow_data_for_xdp(struct page_pool
*pool
, struct sk_buff
**pskb
,
1012 struct bpf_prog
*prog
)
1014 if (!prog
->aux
->xdp_has_frags
)
1017 return skb_pp_cow_data(pool
, pskb
, XDP_PACKET_HEADROOM
);
1019 EXPORT_SYMBOL(skb_cow_data_for_xdp
);
1021 #if IS_ENABLED(CONFIG_PAGE_POOL)
1022 bool napi_pp_put_page(netmem_ref netmem
)
1024 netmem
= netmem_compound_head(netmem
);
1026 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
1027 * in order to preserve any existing bits, such as bit 0 for the
1028 * head page of compound page and bit 1 for pfmemalloc page, so
1029 * mask those bits for freeing side when doing below checking,
1030 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
1031 * to avoid recycling the pfmemalloc page.
1033 if (unlikely(!is_pp_netmem(netmem
)))
1036 page_pool_put_full_netmem(netmem_get_pp(netmem
), netmem
, false);
1040 EXPORT_SYMBOL(napi_pp_put_page
);
1043 static bool skb_pp_recycle(struct sk_buff
*skb
, void *data
)
1045 if (!IS_ENABLED(CONFIG_PAGE_POOL
) || !skb
->pp_recycle
)
1047 return napi_pp_put_page(page_to_netmem(virt_to_page(data
)));
1051 * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
1052 * @skb: page pool aware skb
1054 * Increase the fragment reference count (pp_ref_count) of a skb. This is
1055 * intended to gain fragment references only for page pool aware skbs,
1056 * i.e. when skb->pp_recycle is true, and not for fragments in a
1057 * non-pp-recycling skb. It has a fallback to increase references on normal
1058 * pages, as page pool aware skbs may also have normal page fragments.
1060 static int skb_pp_frag_ref(struct sk_buff
*skb
)
1062 struct skb_shared_info
*shinfo
;
1063 netmem_ref head_netmem
;
1066 if (!skb
->pp_recycle
)
1069 shinfo
= skb_shinfo(skb
);
1071 for (i
= 0; i
< shinfo
->nr_frags
; i
++) {
1072 head_netmem
= netmem_compound_head(shinfo
->frags
[i
].netmem
);
1073 if (likely(is_pp_netmem(head_netmem
)))
1074 page_pool_ref_netmem(head_netmem
);
1076 page_ref_inc(netmem_to_page(head_netmem
));
1081 static void skb_kfree_head(void *head
, unsigned int end_offset
)
1083 if (end_offset
== SKB_SMALL_HEAD_HEADROOM
)
1084 kmem_cache_free(net_hotdata
.skb_small_head_cache
, head
);
1089 static void skb_free_head(struct sk_buff
*skb
)
1091 unsigned char *head
= skb
->head
;
1093 if (skb
->head_frag
) {
1094 if (skb_pp_recycle(skb
, head
))
1096 skb_free_frag(head
);
1098 skb_kfree_head(head
, skb_end_offset(skb
));
1102 static void skb_release_data(struct sk_buff
*skb
, enum skb_drop_reason reason
)
1104 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1107 if (!skb_data_unref(skb
, shinfo
))
1110 if (skb_zcopy(skb
)) {
1111 bool skip_unref
= shinfo
->flags
& SKBFL_MANAGED_FRAG_REFS
;
1113 skb_zcopy_clear(skb
, true);
1118 for (i
= 0; i
< shinfo
->nr_frags
; i
++)
1119 __skb_frag_unref(&shinfo
->frags
[i
], skb
->pp_recycle
);
1122 if (shinfo
->frag_list
)
1123 kfree_skb_list_reason(shinfo
->frag_list
, reason
);
1127 /* When we clone an SKB we copy the reycling bit. The pp_recycle
1128 * bit is only set on the head though, so in order to avoid races
1129 * while trying to recycle fragments on __skb_frag_unref() we need
1130 * to make one SKB responsible for triggering the recycle path.
1131 * So disable the recycling bit if an SKB is cloned and we have
1132 * additional references to the fragmented part of the SKB.
1133 * Eventually the last SKB will have the recycling bit set and it's
1134 * dataref set to 0, which will trigger the recycling
1136 skb
->pp_recycle
= 0;
1140 * Free an skbuff by memory without cleaning the state.
1142 static void kfree_skbmem(struct sk_buff
*skb
)
1144 struct sk_buff_fclones
*fclones
;
1146 switch (skb
->fclone
) {
1147 case SKB_FCLONE_UNAVAILABLE
:
1148 kmem_cache_free(net_hotdata
.skbuff_cache
, skb
);
1151 case SKB_FCLONE_ORIG
:
1152 fclones
= container_of(skb
, struct sk_buff_fclones
, skb1
);
1154 /* We usually free the clone (TX completion) before original skb
1155 * This test would have no chance to be true for the clone,
1156 * while here, branch prediction will be good.
1158 if (refcount_read(&fclones
->fclone_ref
) == 1)
1162 default: /* SKB_FCLONE_CLONE */
1163 fclones
= container_of(skb
, struct sk_buff_fclones
, skb2
);
1166 if (!refcount_dec_and_test(&fclones
->fclone_ref
))
1169 kmem_cache_free(net_hotdata
.skbuff_fclone_cache
, fclones
);
1172 void skb_release_head_state(struct sk_buff
*skb
)
1175 if (skb
->destructor
) {
1176 DEBUG_NET_WARN_ON_ONCE(in_hardirq());
1177 skb
->destructor(skb
);
1179 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1180 nf_conntrack_put(skb_nfct(skb
));
1185 /* Free everything but the sk_buff shell. */
1186 static void skb_release_all(struct sk_buff
*skb
, enum skb_drop_reason reason
)
1188 skb_release_head_state(skb
);
1189 if (likely(skb
->head
))
1190 skb_release_data(skb
, reason
);
1194 * __kfree_skb - private function
1197 * Free an sk_buff. Release anything attached to the buffer.
1198 * Clean the state. This is an internal helper function. Users should
1199 * always call kfree_skb
1202 void __kfree_skb(struct sk_buff
*skb
)
1204 skb_release_all(skb
, SKB_DROP_REASON_NOT_SPECIFIED
);
1207 EXPORT_SYMBOL(__kfree_skb
);
1209 static __always_inline
1210 bool __sk_skb_reason_drop(struct sock
*sk
, struct sk_buff
*skb
,
1211 enum skb_drop_reason reason
)
1213 if (unlikely(!skb_unref(skb
)))
1216 DEBUG_NET_WARN_ON_ONCE(reason
== SKB_NOT_DROPPED_YET
||
1217 u32_get_bits(reason
,
1218 SKB_DROP_REASON_SUBSYS_MASK
) >=
1219 SKB_DROP_REASON_SUBSYS_NUM
);
1221 if (reason
== SKB_CONSUMED
)
1222 trace_consume_skb(skb
, __builtin_return_address(0));
1224 trace_kfree_skb(skb
, __builtin_return_address(0), reason
, sk
);
1229 * sk_skb_reason_drop - free an sk_buff with special reason
1230 * @sk: the socket to receive @skb, or NULL if not applicable
1231 * @skb: buffer to free
1232 * @reason: reason why this skb is dropped
1234 * Drop a reference to the buffer and free it if the usage count has hit
1235 * zero. Meanwhile, pass the receiving socket and drop reason to
1236 * 'kfree_skb' tracepoint.
1239 sk_skb_reason_drop(struct sock
*sk
, struct sk_buff
*skb
, enum skb_drop_reason reason
)
1241 if (__sk_skb_reason_drop(sk
, skb
, reason
))
1244 EXPORT_SYMBOL(sk_skb_reason_drop
);
1246 #define KFREE_SKB_BULK_SIZE 16
1248 struct skb_free_array
{
1249 unsigned int skb_count
;
1250 void *skb_array
[KFREE_SKB_BULK_SIZE
];
1253 static void kfree_skb_add_bulk(struct sk_buff
*skb
,
1254 struct skb_free_array
*sa
,
1255 enum skb_drop_reason reason
)
1257 /* if SKB is a clone, don't handle this case */
1258 if (unlikely(skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
)) {
1263 skb_release_all(skb
, reason
);
1264 sa
->skb_array
[sa
->skb_count
++] = skb
;
1266 if (unlikely(sa
->skb_count
== KFREE_SKB_BULK_SIZE
)) {
1267 kmem_cache_free_bulk(net_hotdata
.skbuff_cache
, KFREE_SKB_BULK_SIZE
,
1274 kfree_skb_list_reason(struct sk_buff
*segs
, enum skb_drop_reason reason
)
1276 struct skb_free_array sa
;
1281 struct sk_buff
*next
= segs
->next
;
1283 if (__sk_skb_reason_drop(NULL
, segs
, reason
)) {
1284 skb_poison_list(segs
);
1285 kfree_skb_add_bulk(segs
, &sa
, reason
);
1292 kmem_cache_free_bulk(net_hotdata
.skbuff_cache
, sa
.skb_count
, sa
.skb_array
);
1294 EXPORT_SYMBOL(kfree_skb_list_reason
);
1296 /* Dump skb information and contents.
1298 * Must only be called from net_ratelimit()-ed paths.
1300 * Dumps whole packets if full_pkt, only headers otherwise.
1302 void skb_dump(const char *level
, const struct sk_buff
*skb
, bool full_pkt
)
1304 struct skb_shared_info
*sh
= skb_shinfo(skb
);
1305 struct net_device
*dev
= skb
->dev
;
1306 struct sock
*sk
= skb
->sk
;
1307 struct sk_buff
*list_skb
;
1308 bool has_mac
, has_trans
;
1309 int headroom
, tailroom
;
1310 int i
, len
, seg_len
;
1315 len
= min_t(int, skb
->len
, MAX_HEADER
+ 128);
1317 headroom
= skb_headroom(skb
);
1318 tailroom
= skb_tailroom(skb
);
1320 has_mac
= skb_mac_header_was_set(skb
);
1321 has_trans
= skb_transport_header_was_set(skb
);
1323 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
1324 "mac=(%d,%d) mac_len=%u net=(%d,%d) trans=%d\n"
1325 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
1326 "csum(0x%x start=%u offset=%u ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
1327 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n"
1328 "priority=0x%x mark=0x%x alloc_cpu=%u vlan_all=0x%x\n"
1329 "encapsulation=%d inner(proto=0x%04x, mac=%u, net=%u, trans=%u)\n",
1330 level
, skb
->len
, headroom
, skb_headlen(skb
), tailroom
,
1331 has_mac
? skb
->mac_header
: -1,
1332 has_mac
? skb_mac_header_len(skb
) : -1,
1334 skb
->network_header
,
1335 has_trans
? skb_network_header_len(skb
) : -1,
1336 has_trans
? skb
->transport_header
: -1,
1337 sh
->tx_flags
, sh
->nr_frags
,
1338 sh
->gso_size
, sh
->gso_type
, sh
->gso_segs
,
1339 skb
->csum
, skb
->csum_start
, skb
->csum_offset
, skb
->ip_summed
,
1340 skb
->csum_complete_sw
, skb
->csum_valid
, skb
->csum_level
,
1341 skb
->hash
, skb
->sw_hash
, skb
->l4_hash
,
1342 ntohs(skb
->protocol
), skb
->pkt_type
, skb
->skb_iif
,
1343 skb
->priority
, skb
->mark
, skb
->alloc_cpu
, skb
->vlan_all
,
1344 skb
->encapsulation
, skb
->inner_protocol
, skb
->inner_mac_header
,
1345 skb
->inner_network_header
, skb
->inner_transport_header
);
1348 printk("%sdev name=%s feat=%pNF\n",
1349 level
, dev
->name
, &dev
->features
);
1351 printk("%ssk family=%hu type=%u proto=%u\n",
1352 level
, sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
);
1354 if (full_pkt
&& headroom
)
1355 print_hex_dump(level
, "skb headroom: ", DUMP_PREFIX_OFFSET
,
1356 16, 1, skb
->head
, headroom
, false);
1358 seg_len
= min_t(int, skb_headlen(skb
), len
);
1360 print_hex_dump(level
, "skb linear: ", DUMP_PREFIX_OFFSET
,
1361 16, 1, skb
->data
, seg_len
, false);
1364 if (full_pkt
&& tailroom
)
1365 print_hex_dump(level
, "skb tailroom: ", DUMP_PREFIX_OFFSET
,
1366 16, 1, skb_tail_pointer(skb
), tailroom
, false);
1368 for (i
= 0; len
&& i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1369 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1370 u32 p_off
, p_len
, copied
;
1374 if (skb_frag_is_net_iov(frag
)) {
1375 printk("%sskb frag %d: not readable\n", level
, i
);
1376 len
-= skb_frag_size(frag
);
1382 skb_frag_foreach_page(frag
, skb_frag_off(frag
),
1383 skb_frag_size(frag
), p
, p_off
, p_len
,
1385 seg_len
= min_t(int, p_len
, len
);
1386 vaddr
= kmap_atomic(p
);
1387 print_hex_dump(level
, "skb frag: ",
1389 16, 1, vaddr
+ p_off
, seg_len
, false);
1390 kunmap_atomic(vaddr
);
1397 if (full_pkt
&& skb_has_frag_list(skb
)) {
1398 printk("skb fraglist:\n");
1399 skb_walk_frags(skb
, list_skb
)
1400 skb_dump(level
, list_skb
, true);
1403 EXPORT_SYMBOL(skb_dump
);
1406 * skb_tx_error - report an sk_buff xmit error
1407 * @skb: buffer that triggered an error
1409 * Report xmit error if a device callback is tracking this skb.
1410 * skb must be freed afterwards.
1412 void skb_tx_error(struct sk_buff
*skb
)
1415 skb_zcopy_downgrade_managed(skb
);
1416 skb_zcopy_clear(skb
, true);
1419 EXPORT_SYMBOL(skb_tx_error
);
1421 #ifdef CONFIG_TRACEPOINTS
1423 * consume_skb - free an skbuff
1424 * @skb: buffer to free
1426 * Drop a ref to the buffer and free it if the usage count has hit zero
1427 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
1428 * is being dropped after a failure and notes that
1430 void consume_skb(struct sk_buff
*skb
)
1432 if (!skb_unref(skb
))
1435 trace_consume_skb(skb
, __builtin_return_address(0));
1438 EXPORT_SYMBOL(consume_skb
);
1442 * __consume_stateless_skb - free an skbuff, assuming it is stateless
1443 * @skb: buffer to free
1445 * Alike consume_skb(), but this variant assumes that this is the last
1446 * skb reference and all the head states have been already dropped
1448 void __consume_stateless_skb(struct sk_buff
*skb
)
1450 trace_consume_skb(skb
, __builtin_return_address(0));
1451 skb_release_data(skb
, SKB_CONSUMED
);
1455 static void napi_skb_cache_put(struct sk_buff
*skb
)
1457 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
1460 if (!kasan_mempool_poison_object(skb
))
1463 local_lock_nested_bh(&napi_alloc_cache
.bh_lock
);
1464 nc
->skb_cache
[nc
->skb_count
++] = skb
;
1466 if (unlikely(nc
->skb_count
== NAPI_SKB_CACHE_SIZE
)) {
1467 for (i
= NAPI_SKB_CACHE_HALF
; i
< NAPI_SKB_CACHE_SIZE
; i
++)
1468 kasan_mempool_unpoison_object(nc
->skb_cache
[i
],
1469 kmem_cache_size(net_hotdata
.skbuff_cache
));
1471 kmem_cache_free_bulk(net_hotdata
.skbuff_cache
, NAPI_SKB_CACHE_HALF
,
1472 nc
->skb_cache
+ NAPI_SKB_CACHE_HALF
);
1473 nc
->skb_count
= NAPI_SKB_CACHE_HALF
;
1475 local_unlock_nested_bh(&napi_alloc_cache
.bh_lock
);
1478 void __napi_kfree_skb(struct sk_buff
*skb
, enum skb_drop_reason reason
)
1480 skb_release_all(skb
, reason
);
1481 napi_skb_cache_put(skb
);
1484 void napi_skb_free_stolen_head(struct sk_buff
*skb
)
1486 if (unlikely(skb
->slow_gro
)) {
1493 napi_skb_cache_put(skb
);
1496 void napi_consume_skb(struct sk_buff
*skb
, int budget
)
1498 /* Zero budget indicate non-NAPI context called us, like netpoll */
1499 if (unlikely(!budget
)) {
1500 dev_consume_skb_any(skb
);
1504 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
1506 if (!skb_unref(skb
))
1509 /* if reaching here SKB is ready to free */
1510 trace_consume_skb(skb
, __builtin_return_address(0));
1512 /* if SKB is a clone, don't handle this case */
1513 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
) {
1518 skb_release_all(skb
, SKB_CONSUMED
);
1519 napi_skb_cache_put(skb
);
1521 EXPORT_SYMBOL(napi_consume_skb
);
1523 /* Make sure a field is contained by headers group */
1524 #define CHECK_SKB_FIELD(field) \
1525 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \
1526 offsetof(struct sk_buff, headers.field)); \
1528 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1530 new->tstamp
= old
->tstamp
;
1531 /* We do not copy old->sk */
1532 new->dev
= old
->dev
;
1533 memcpy(new->cb
, old
->cb
, sizeof(old
->cb
));
1534 skb_dst_copy(new, old
);
1535 __skb_ext_copy(new, old
);
1536 __nf_copy(new, old
, false);
1538 /* Note : this field could be in the headers group.
1539 * It is not yet because we do not want to have a 16 bit hole
1541 new->queue_mapping
= old
->queue_mapping
;
1543 memcpy(&new->headers
, &old
->headers
, sizeof(new->headers
));
1544 CHECK_SKB_FIELD(protocol
);
1545 CHECK_SKB_FIELD(csum
);
1546 CHECK_SKB_FIELD(hash
);
1547 CHECK_SKB_FIELD(priority
);
1548 CHECK_SKB_FIELD(skb_iif
);
1549 CHECK_SKB_FIELD(vlan_proto
);
1550 CHECK_SKB_FIELD(vlan_tci
);
1551 CHECK_SKB_FIELD(transport_header
);
1552 CHECK_SKB_FIELD(network_header
);
1553 CHECK_SKB_FIELD(mac_header
);
1554 CHECK_SKB_FIELD(inner_protocol
);
1555 CHECK_SKB_FIELD(inner_transport_header
);
1556 CHECK_SKB_FIELD(inner_network_header
);
1557 CHECK_SKB_FIELD(inner_mac_header
);
1558 CHECK_SKB_FIELD(mark
);
1559 #ifdef CONFIG_NETWORK_SECMARK
1560 CHECK_SKB_FIELD(secmark
);
1562 #ifdef CONFIG_NET_RX_BUSY_POLL
1563 CHECK_SKB_FIELD(napi_id
);
1565 CHECK_SKB_FIELD(alloc_cpu
);
1567 CHECK_SKB_FIELD(sender_cpu
);
1569 #ifdef CONFIG_NET_SCHED
1570 CHECK_SKB_FIELD(tc_index
);
1576 * You should not add any new code to this function. Add it to
1577 * __copy_skb_header above instead.
1579 static struct sk_buff
*__skb_clone(struct sk_buff
*n
, struct sk_buff
*skb
)
1581 #define C(x) n->x = skb->x
1583 n
->next
= n
->prev
= NULL
;
1585 __copy_skb_header(n
, skb
);
1590 n
->hdr_len
= skb
->nohdr
? skb_headroom(skb
) : skb
->hdr_len
;
1596 n
->destructor
= NULL
;
1603 refcount_set(&n
->users
, 1);
1605 atomic_inc(&(skb_shinfo(skb
)->dataref
));
1613 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1614 * @first: first sk_buff of the msg
1616 struct sk_buff
*alloc_skb_for_msg(struct sk_buff
*first
)
1620 n
= alloc_skb(0, GFP_ATOMIC
);
1624 n
->len
= first
->len
;
1625 n
->data_len
= first
->len
;
1626 n
->truesize
= first
->truesize
;
1628 skb_shinfo(n
)->frag_list
= first
;
1630 __copy_skb_header(n
, first
);
1631 n
->destructor
= NULL
;
1635 EXPORT_SYMBOL_GPL(alloc_skb_for_msg
);
1638 * skb_morph - morph one skb into another
1639 * @dst: the skb to receive the contents
1640 * @src: the skb to supply the contents
1642 * This is identical to skb_clone except that the target skb is
1643 * supplied by the user.
1645 * The target skb is returned upon exit.
1647 struct sk_buff
*skb_morph(struct sk_buff
*dst
, struct sk_buff
*src
)
1649 skb_release_all(dst
, SKB_CONSUMED
);
1650 return __skb_clone(dst
, src
);
1652 EXPORT_SYMBOL_GPL(skb_morph
);
1654 int mm_account_pinned_pages(struct mmpin
*mmp
, size_t size
)
1656 unsigned long max_pg
, num_pg
, new_pg
, old_pg
, rlim
;
1657 struct user_struct
*user
;
1659 if (capable(CAP_IPC_LOCK
) || !size
)
1662 rlim
= rlimit(RLIMIT_MEMLOCK
);
1663 if (rlim
== RLIM_INFINITY
)
1666 num_pg
= (size
>> PAGE_SHIFT
) + 2; /* worst case */
1667 max_pg
= rlim
>> PAGE_SHIFT
;
1668 user
= mmp
->user
? : current_user();
1670 old_pg
= atomic_long_read(&user
->locked_vm
);
1672 new_pg
= old_pg
+ num_pg
;
1673 if (new_pg
> max_pg
)
1675 } while (!atomic_long_try_cmpxchg(&user
->locked_vm
, &old_pg
, new_pg
));
1678 mmp
->user
= get_uid(user
);
1679 mmp
->num_pg
= num_pg
;
1681 mmp
->num_pg
+= num_pg
;
1686 EXPORT_SYMBOL_GPL(mm_account_pinned_pages
);
1688 void mm_unaccount_pinned_pages(struct mmpin
*mmp
)
1691 atomic_long_sub(mmp
->num_pg
, &mmp
->user
->locked_vm
);
1692 free_uid(mmp
->user
);
1695 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages
);
1697 static struct ubuf_info
*msg_zerocopy_alloc(struct sock
*sk
, size_t size
)
1699 struct ubuf_info_msgzc
*uarg
;
1700 struct sk_buff
*skb
;
1702 WARN_ON_ONCE(!in_task());
1704 skb
= sock_omalloc(sk
, 0, GFP_KERNEL
);
1708 BUILD_BUG_ON(sizeof(*uarg
) > sizeof(skb
->cb
));
1709 uarg
= (void *)skb
->cb
;
1710 uarg
->mmp
.user
= NULL
;
1712 if (mm_account_pinned_pages(&uarg
->mmp
, size
)) {
1717 uarg
->ubuf
.ops
= &msg_zerocopy_ubuf_ops
;
1718 uarg
->id
= ((u32
)atomic_inc_return(&sk
->sk_zckey
)) - 1;
1720 uarg
->bytelen
= size
;
1722 uarg
->ubuf
.flags
= SKBFL_ZEROCOPY_FRAG
| SKBFL_DONT_ORPHAN
;
1723 refcount_set(&uarg
->ubuf
.refcnt
, 1);
1729 static inline struct sk_buff
*skb_from_uarg(struct ubuf_info_msgzc
*uarg
)
1731 return container_of((void *)uarg
, struct sk_buff
, cb
);
1734 struct ubuf_info
*msg_zerocopy_realloc(struct sock
*sk
, size_t size
,
1735 struct ubuf_info
*uarg
)
1738 struct ubuf_info_msgzc
*uarg_zc
;
1739 const u32 byte_limit
= 1 << 19; /* limit to a few TSO */
1742 /* there might be non MSG_ZEROCOPY users */
1743 if (uarg
->ops
!= &msg_zerocopy_ubuf_ops
)
1746 /* realloc only when socket is locked (TCP, UDP cork),
1747 * so uarg->len and sk_zckey access is serialized
1749 if (!sock_owned_by_user(sk
)) {
1754 uarg_zc
= uarg_to_msgzc(uarg
);
1755 bytelen
= uarg_zc
->bytelen
+ size
;
1756 if (uarg_zc
->len
== USHRT_MAX
- 1 || bytelen
> byte_limit
) {
1757 /* TCP can create new skb to attach new uarg */
1758 if (sk
->sk_type
== SOCK_STREAM
)
1763 next
= (u32
)atomic_read(&sk
->sk_zckey
);
1764 if ((u32
)(uarg_zc
->id
+ uarg_zc
->len
) == next
) {
1765 if (mm_account_pinned_pages(&uarg_zc
->mmp
, size
))
1768 uarg_zc
->bytelen
= bytelen
;
1769 atomic_set(&sk
->sk_zckey
, ++next
);
1771 /* no extra ref when appending to datagram (MSG_MORE) */
1772 if (sk
->sk_type
== SOCK_STREAM
)
1773 net_zcopy_get(uarg
);
1780 return msg_zerocopy_alloc(sk
, size
);
1782 EXPORT_SYMBOL_GPL(msg_zerocopy_realloc
);
1784 static bool skb_zerocopy_notify_extend(struct sk_buff
*skb
, u32 lo
, u16 len
)
1786 struct sock_exterr_skb
*serr
= SKB_EXT_ERR(skb
);
1790 old_lo
= serr
->ee
.ee_info
;
1791 old_hi
= serr
->ee
.ee_data
;
1792 sum_len
= old_hi
- old_lo
+ 1ULL + len
;
1794 if (sum_len
>= (1ULL << 32))
1797 if (lo
!= old_hi
+ 1)
1800 serr
->ee
.ee_data
+= len
;
1804 static void __msg_zerocopy_callback(struct ubuf_info_msgzc
*uarg
)
1806 struct sk_buff
*tail
, *skb
= skb_from_uarg(uarg
);
1807 struct sock_exterr_skb
*serr
;
1808 struct sock
*sk
= skb
->sk
;
1809 struct sk_buff_head
*q
;
1810 unsigned long flags
;
1815 mm_unaccount_pinned_pages(&uarg
->mmp
);
1817 /* if !len, there was only 1 call, and it was aborted
1818 * so do not queue a completion notification
1820 if (!uarg
->len
|| sock_flag(sk
, SOCK_DEAD
))
1825 hi
= uarg
->id
+ len
- 1;
1826 is_zerocopy
= uarg
->zerocopy
;
1828 serr
= SKB_EXT_ERR(skb
);
1829 memset(serr
, 0, sizeof(*serr
));
1830 serr
->ee
.ee_errno
= 0;
1831 serr
->ee
.ee_origin
= SO_EE_ORIGIN_ZEROCOPY
;
1832 serr
->ee
.ee_data
= hi
;
1833 serr
->ee
.ee_info
= lo
;
1835 serr
->ee
.ee_code
|= SO_EE_CODE_ZEROCOPY_COPIED
;
1837 q
= &sk
->sk_error_queue
;
1838 spin_lock_irqsave(&q
->lock
, flags
);
1839 tail
= skb_peek_tail(q
);
1840 if (!tail
|| SKB_EXT_ERR(tail
)->ee
.ee_origin
!= SO_EE_ORIGIN_ZEROCOPY
||
1841 !skb_zerocopy_notify_extend(tail
, lo
, len
)) {
1842 __skb_queue_tail(q
, skb
);
1845 spin_unlock_irqrestore(&q
->lock
, flags
);
1847 sk_error_report(sk
);
1854 static void msg_zerocopy_complete(struct sk_buff
*skb
, struct ubuf_info
*uarg
,
1857 struct ubuf_info_msgzc
*uarg_zc
= uarg_to_msgzc(uarg
);
1859 uarg_zc
->zerocopy
= uarg_zc
->zerocopy
& success
;
1861 if (refcount_dec_and_test(&uarg
->refcnt
))
1862 __msg_zerocopy_callback(uarg_zc
);
1865 void msg_zerocopy_put_abort(struct ubuf_info
*uarg
, bool have_uref
)
1867 struct sock
*sk
= skb_from_uarg(uarg_to_msgzc(uarg
))->sk
;
1869 atomic_dec(&sk
->sk_zckey
);
1870 uarg_to_msgzc(uarg
)->len
--;
1873 msg_zerocopy_complete(NULL
, uarg
, true);
1875 EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort
);
1877 const struct ubuf_info_ops msg_zerocopy_ubuf_ops
= {
1878 .complete
= msg_zerocopy_complete
,
1880 EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops
);
1882 int skb_zerocopy_iter_stream(struct sock
*sk
, struct sk_buff
*skb
,
1883 struct msghdr
*msg
, int len
,
1884 struct ubuf_info
*uarg
)
1886 int err
, orig_len
= skb
->len
;
1888 if (uarg
->ops
->link_skb
) {
1889 err
= uarg
->ops
->link_skb(skb
, uarg
);
1893 struct ubuf_info
*orig_uarg
= skb_zcopy(skb
);
1895 /* An skb can only point to one uarg. This edge case happens
1896 * when TCP appends to an skb, but zerocopy_realloc triggered
1899 if (orig_uarg
&& uarg
!= orig_uarg
)
1903 err
= __zerocopy_sg_from_iter(msg
, sk
, skb
, &msg
->msg_iter
, len
);
1904 if (err
== -EFAULT
|| (err
== -EMSGSIZE
&& skb
->len
== orig_len
)) {
1905 struct sock
*save_sk
= skb
->sk
;
1907 /* Streams do not free skb on error. Reset to prev state. */
1908 iov_iter_revert(&msg
->msg_iter
, skb
->len
- orig_len
);
1910 ___pskb_trim(skb
, orig_len
);
1915 skb_zcopy_set(skb
, uarg
, NULL
);
1916 return skb
->len
- orig_len
;
1918 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream
);
1920 void __skb_zcopy_downgrade_managed(struct sk_buff
*skb
)
1924 skb_shinfo(skb
)->flags
&= ~SKBFL_MANAGED_FRAG_REFS
;
1925 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
1926 skb_frag_ref(skb
, i
);
1928 EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed
);
1930 static int skb_zerocopy_clone(struct sk_buff
*nskb
, struct sk_buff
*orig
,
1933 if (skb_zcopy(orig
)) {
1934 if (skb_zcopy(nskb
)) {
1935 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1940 if (skb_uarg(nskb
) == skb_uarg(orig
))
1942 if (skb_copy_ubufs(nskb
, GFP_ATOMIC
))
1945 skb_zcopy_set(nskb
, skb_uarg(orig
), NULL
);
1951 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1952 * @skb: the skb to modify
1953 * @gfp_mask: allocation priority
1955 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1956 * It will copy all frags into kernel and drop the reference
1957 * to userspace pages.
1959 * If this function is called from an interrupt gfp_mask() must be
1962 * Returns 0 on success or a negative error code on failure
1963 * to allocate kernel memory to copy to.
1965 int skb_copy_ubufs(struct sk_buff
*skb
, gfp_t gfp_mask
)
1967 int num_frags
= skb_shinfo(skb
)->nr_frags
;
1968 struct page
*page
, *head
= NULL
;
1969 int i
, order
, psize
, new_frags
;
1972 if (skb_shared(skb
) || skb_unclone(skb
, gfp_mask
))
1975 if (!skb_frags_readable(skb
))
1981 /* We might have to allocate high order pages, so compute what minimum
1982 * page order is needed.
1985 while ((PAGE_SIZE
<< order
) * MAX_SKB_FRAGS
< __skb_pagelen(skb
))
1987 psize
= (PAGE_SIZE
<< order
);
1989 new_frags
= (__skb_pagelen(skb
) + psize
- 1) >> (PAGE_SHIFT
+ order
);
1990 for (i
= 0; i
< new_frags
; i
++) {
1991 page
= alloc_pages(gfp_mask
| __GFP_COMP
, order
);
1994 struct page
*next
= (struct page
*)page_private(head
);
2000 set_page_private(page
, (unsigned long)head
);
2006 for (i
= 0; i
< num_frags
; i
++) {
2007 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
2008 u32 p_off
, p_len
, copied
;
2012 skb_frag_foreach_page(f
, skb_frag_off(f
), skb_frag_size(f
),
2013 p
, p_off
, p_len
, copied
) {
2015 vaddr
= kmap_atomic(p
);
2017 while (done
< p_len
) {
2018 if (d_off
== psize
) {
2020 page
= (struct page
*)page_private(page
);
2022 copy
= min_t(u32
, psize
- d_off
, p_len
- done
);
2023 memcpy(page_address(page
) + d_off
,
2024 vaddr
+ p_off
+ done
, copy
);
2028 kunmap_atomic(vaddr
);
2032 /* skb frags release userspace buffers */
2033 for (i
= 0; i
< num_frags
; i
++)
2034 skb_frag_unref(skb
, i
);
2036 /* skb frags point to kernel buffers */
2037 for (i
= 0; i
< new_frags
- 1; i
++) {
2038 __skb_fill_netmem_desc(skb
, i
, page_to_netmem(head
), 0, psize
);
2039 head
= (struct page
*)page_private(head
);
2041 __skb_fill_netmem_desc(skb
, new_frags
- 1, page_to_netmem(head
), 0,
2043 skb_shinfo(skb
)->nr_frags
= new_frags
;
2046 skb_zcopy_clear(skb
, false);
2049 EXPORT_SYMBOL_GPL(skb_copy_ubufs
);
2052 * skb_clone - duplicate an sk_buff
2053 * @skb: buffer to clone
2054 * @gfp_mask: allocation priority
2056 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
2057 * copies share the same packet data but not structure. The new
2058 * buffer has a reference count of 1. If the allocation fails the
2059 * function returns %NULL otherwise the new buffer is returned.
2061 * If this function is called from an interrupt gfp_mask() must be
2065 struct sk_buff
*skb_clone(struct sk_buff
*skb
, gfp_t gfp_mask
)
2067 struct sk_buff_fclones
*fclones
= container_of(skb
,
2068 struct sk_buff_fclones
,
2072 if (skb_orphan_frags(skb
, gfp_mask
))
2075 if (skb
->fclone
== SKB_FCLONE_ORIG
&&
2076 refcount_read(&fclones
->fclone_ref
) == 1) {
2078 refcount_set(&fclones
->fclone_ref
, 2);
2079 n
->fclone
= SKB_FCLONE_CLONE
;
2081 if (skb_pfmemalloc(skb
))
2082 gfp_mask
|= __GFP_MEMALLOC
;
2084 n
= kmem_cache_alloc(net_hotdata
.skbuff_cache
, gfp_mask
);
2088 n
->fclone
= SKB_FCLONE_UNAVAILABLE
;
2091 return __skb_clone(n
, skb
);
2093 EXPORT_SYMBOL(skb_clone
);
2095 void skb_headers_offset_update(struct sk_buff
*skb
, int off
)
2097 /* Only adjust this if it actually is csum_start rather than csum */
2098 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2099 skb
->csum_start
+= off
;
2100 /* {transport,network,mac}_header and tail are relative to skb->head */
2101 skb
->transport_header
+= off
;
2102 skb
->network_header
+= off
;
2103 if (skb_mac_header_was_set(skb
))
2104 skb
->mac_header
+= off
;
2105 skb
->inner_transport_header
+= off
;
2106 skb
->inner_network_header
+= off
;
2107 skb
->inner_mac_header
+= off
;
2109 EXPORT_SYMBOL(skb_headers_offset_update
);
2111 void skb_copy_header(struct sk_buff
*new, const struct sk_buff
*old
)
2113 __copy_skb_header(new, old
);
2115 skb_shinfo(new)->gso_size
= skb_shinfo(old
)->gso_size
;
2116 skb_shinfo(new)->gso_segs
= skb_shinfo(old
)->gso_segs
;
2117 skb_shinfo(new)->gso_type
= skb_shinfo(old
)->gso_type
;
2119 EXPORT_SYMBOL(skb_copy_header
);
2121 static inline int skb_alloc_rx_flag(const struct sk_buff
*skb
)
2123 if (skb_pfmemalloc(skb
))
2124 return SKB_ALLOC_RX
;
2129 * skb_copy - create private copy of an sk_buff
2130 * @skb: buffer to copy
2131 * @gfp_mask: allocation priority
2133 * Make a copy of both an &sk_buff and its data. This is used when the
2134 * caller wishes to modify the data and needs a private copy of the
2135 * data to alter. Returns %NULL on failure or the pointer to the buffer
2136 * on success. The returned buffer has a reference count of 1.
2138 * As by-product this function converts non-linear &sk_buff to linear
2139 * one, so that &sk_buff becomes completely private and caller is allowed
2140 * to modify all the data of returned buffer. This means that this
2141 * function is not recommended for use in circumstances when only
2142 * header is going to be modified. Use pskb_copy() instead.
2145 struct sk_buff
*skb_copy(const struct sk_buff
*skb
, gfp_t gfp_mask
)
2151 if (!skb_frags_readable(skb
))
2154 if (WARN_ON_ONCE(skb_shinfo(skb
)->gso_type
& SKB_GSO_FRAGLIST
))
2157 headerlen
= skb_headroom(skb
);
2158 size
= skb_end_offset(skb
) + skb
->data_len
;
2159 n
= __alloc_skb(size
, gfp_mask
,
2160 skb_alloc_rx_flag(skb
), NUMA_NO_NODE
);
2164 /* Set the data pointer */
2165 skb_reserve(n
, headerlen
);
2166 /* Set the tail pointer and length */
2167 skb_put(n
, skb
->len
);
2169 BUG_ON(skb_copy_bits(skb
, -headerlen
, n
->head
, headerlen
+ skb
->len
));
2171 skb_copy_header(n
, skb
);
2174 EXPORT_SYMBOL(skb_copy
);
2177 * __pskb_copy_fclone - create copy of an sk_buff with private head.
2178 * @skb: buffer to copy
2179 * @headroom: headroom of new skb
2180 * @gfp_mask: allocation priority
2181 * @fclone: if true allocate the copy of the skb from the fclone
2182 * cache instead of the head cache; it is recommended to set this
2183 * to true for the cases where the copy will likely be cloned
2185 * Make a copy of both an &sk_buff and part of its data, located
2186 * in header. Fragmented data remain shared. This is used when
2187 * the caller wishes to modify only header of &sk_buff and needs
2188 * private copy of the header to alter. Returns %NULL on failure
2189 * or the pointer to the buffer on success.
2190 * The returned buffer has a reference count of 1.
2193 struct sk_buff
*__pskb_copy_fclone(struct sk_buff
*skb
, int headroom
,
2194 gfp_t gfp_mask
, bool fclone
)
2196 unsigned int size
= skb_headlen(skb
) + headroom
;
2197 int flags
= skb_alloc_rx_flag(skb
) | (fclone
? SKB_ALLOC_FCLONE
: 0);
2198 struct sk_buff
*n
= __alloc_skb(size
, gfp_mask
, flags
, NUMA_NO_NODE
);
2203 /* Set the data pointer */
2204 skb_reserve(n
, headroom
);
2205 /* Set the tail pointer and length */
2206 skb_put(n
, skb_headlen(skb
));
2207 /* Copy the bytes */
2208 skb_copy_from_linear_data(skb
, n
->data
, n
->len
);
2210 n
->truesize
+= skb
->data_len
;
2211 n
->data_len
= skb
->data_len
;
2214 if (skb_shinfo(skb
)->nr_frags
) {
2217 if (skb_orphan_frags(skb
, gfp_mask
) ||
2218 skb_zerocopy_clone(n
, skb
, gfp_mask
)) {
2223 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2224 skb_shinfo(n
)->frags
[i
] = skb_shinfo(skb
)->frags
[i
];
2225 skb_frag_ref(skb
, i
);
2227 skb_shinfo(n
)->nr_frags
= i
;
2230 if (skb_has_frag_list(skb
)) {
2231 skb_shinfo(n
)->frag_list
= skb_shinfo(skb
)->frag_list
;
2232 skb_clone_fraglist(n
);
2235 skb_copy_header(n
, skb
);
2239 EXPORT_SYMBOL(__pskb_copy_fclone
);
2242 * pskb_expand_head - reallocate header of &sk_buff
2243 * @skb: buffer to reallocate
2244 * @nhead: room to add at head
2245 * @ntail: room to add at tail
2246 * @gfp_mask: allocation priority
2248 * Expands (or creates identical copy, if @nhead and @ntail are zero)
2249 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2250 * reference count of 1. Returns zero in the case of success or error,
2251 * if expansion failed. In the last case, &sk_buff is not changed.
2253 * All the pointers pointing into skb header may change and must be
2254 * reloaded after call to this function.
2257 int pskb_expand_head(struct sk_buff
*skb
, int nhead
, int ntail
,
2260 unsigned int osize
= skb_end_offset(skb
);
2261 unsigned int size
= osize
+ nhead
+ ntail
;
2268 BUG_ON(skb_shared(skb
));
2270 skb_zcopy_downgrade_managed(skb
);
2272 if (skb_pfmemalloc(skb
))
2273 gfp_mask
|= __GFP_MEMALLOC
;
2275 data
= kmalloc_reserve(&size
, gfp_mask
, NUMA_NO_NODE
, NULL
);
2278 size
= SKB_WITH_OVERHEAD(size
);
2280 /* Copy only real data... and, alas, header. This should be
2281 * optimized for the cases when header is void.
2283 memcpy(data
+ nhead
, skb
->head
, skb_tail_pointer(skb
) - skb
->head
);
2285 memcpy((struct skb_shared_info
*)(data
+ size
),
2287 offsetof(struct skb_shared_info
, frags
[skb_shinfo(skb
)->nr_frags
]));
2290 * if shinfo is shared we must drop the old head gracefully, but if it
2291 * is not we can just drop the old head and let the existing refcount
2292 * be since all we did is relocate the values
2294 if (skb_cloned(skb
)) {
2295 if (skb_orphan_frags(skb
, gfp_mask
))
2298 refcount_inc(&skb_uarg(skb
)->refcnt
);
2299 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
2300 skb_frag_ref(skb
, i
);
2302 if (skb_has_frag_list(skb
))
2303 skb_clone_fraglist(skb
);
2305 skb_release_data(skb
, SKB_CONSUMED
);
2309 off
= (data
+ nhead
) - skb
->head
;
2315 skb_set_end_offset(skb
, size
);
2316 #ifdef NET_SKBUFF_DATA_USES_OFFSET
2320 skb_headers_offset_update(skb
, nhead
);
2324 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
2326 skb_metadata_clear(skb
);
2328 /* It is not generally safe to change skb->truesize.
2329 * For the moment, we really care of rx path, or
2330 * when skb is orphaned (not attached to a socket).
2332 if (!skb
->sk
|| skb
->destructor
== sock_edemux
)
2333 skb
->truesize
+= size
- osize
;
2338 skb_kfree_head(data
, size
);
2342 EXPORT_SYMBOL(pskb_expand_head
);
2344 /* Make private copy of skb with writable head and some headroom */
2346 struct sk_buff
*skb_realloc_headroom(struct sk_buff
*skb
, unsigned int headroom
)
2348 struct sk_buff
*skb2
;
2349 int delta
= headroom
- skb_headroom(skb
);
2352 skb2
= pskb_copy(skb
, GFP_ATOMIC
);
2354 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2355 if (skb2
&& pskb_expand_head(skb2
, SKB_DATA_ALIGN(delta
), 0,
2363 EXPORT_SYMBOL(skb_realloc_headroom
);
2365 /* Note: We plan to rework this in linux-6.4 */
2366 int __skb_unclone_keeptruesize(struct sk_buff
*skb
, gfp_t pri
)
2368 unsigned int saved_end_offset
, saved_truesize
;
2369 struct skb_shared_info
*shinfo
;
2372 saved_end_offset
= skb_end_offset(skb
);
2373 saved_truesize
= skb
->truesize
;
2375 res
= pskb_expand_head(skb
, 0, 0, pri
);
2379 skb
->truesize
= saved_truesize
;
2381 if (likely(skb_end_offset(skb
) == saved_end_offset
))
2384 /* We can not change skb->end if the original or new value
2385 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
2387 if (saved_end_offset
== SKB_SMALL_HEAD_HEADROOM
||
2388 skb_end_offset(skb
) == SKB_SMALL_HEAD_HEADROOM
) {
2389 /* We think this path should not be taken.
2390 * Add a temporary trace to warn us just in case.
2392 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n",
2393 saved_end_offset
, skb_end_offset(skb
));
2398 shinfo
= skb_shinfo(skb
);
2400 /* We are about to change back skb->end,
2401 * we need to move skb_shinfo() to its new location.
2403 memmove(skb
->head
+ saved_end_offset
,
2405 offsetof(struct skb_shared_info
, frags
[shinfo
->nr_frags
]));
2407 skb_set_end_offset(skb
, saved_end_offset
);
2413 * skb_expand_head - reallocate header of &sk_buff
2414 * @skb: buffer to reallocate
2415 * @headroom: needed headroom
2417 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2418 * if possible; copies skb->sk to new skb as needed
2419 * and frees original skb in case of failures.
2421 * It expect increased headroom and generates warning otherwise.
2424 struct sk_buff
*skb_expand_head(struct sk_buff
*skb
, unsigned int headroom
)
2426 int delta
= headroom
- skb_headroom(skb
);
2427 int osize
= skb_end_offset(skb
);
2428 struct sock
*sk
= skb
->sk
;
2430 if (WARN_ONCE(delta
<= 0,
2431 "%s is expecting an increase in the headroom", __func__
))
2434 delta
= SKB_DATA_ALIGN(delta
);
2435 /* pskb_expand_head() might crash, if skb is shared. */
2436 if (skb_shared(skb
) || !is_skb_wmem(skb
)) {
2437 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
2439 if (unlikely(!nskb
))
2443 skb_set_owner_w(nskb
, sk
);
2447 if (pskb_expand_head(skb
, delta
, 0, GFP_ATOMIC
))
2450 if (sk
&& is_skb_wmem(skb
)) {
2451 delta
= skb_end_offset(skb
) - osize
;
2452 refcount_add(delta
, &sk
->sk_wmem_alloc
);
2453 skb
->truesize
+= delta
;
2461 EXPORT_SYMBOL(skb_expand_head
);
2464 * skb_copy_expand - copy and expand sk_buff
2465 * @skb: buffer to copy
2466 * @newheadroom: new free bytes at head
2467 * @newtailroom: new free bytes at tail
2468 * @gfp_mask: allocation priority
2470 * Make a copy of both an &sk_buff and its data and while doing so
2471 * allocate additional space.
2473 * This is used when the caller wishes to modify the data and needs a
2474 * private copy of the data to alter as well as more space for new fields.
2475 * Returns %NULL on failure or the pointer to the buffer
2476 * on success. The returned buffer has a reference count of 1.
2478 * You must pass %GFP_ATOMIC as the allocation priority if this function
2479 * is called from an interrupt.
2481 struct sk_buff
*skb_copy_expand(const struct sk_buff
*skb
,
2482 int newheadroom
, int newtailroom
,
2486 * Allocate the copy buffer
2488 int head_copy_len
, head_copy_off
;
2492 if (!skb_frags_readable(skb
))
2495 if (WARN_ON_ONCE(skb_shinfo(skb
)->gso_type
& SKB_GSO_FRAGLIST
))
2498 oldheadroom
= skb_headroom(skb
);
2499 n
= __alloc_skb(newheadroom
+ skb
->len
+ newtailroom
,
2500 gfp_mask
, skb_alloc_rx_flag(skb
),
2505 skb_reserve(n
, newheadroom
);
2507 /* Set the tail pointer and length */
2508 skb_put(n
, skb
->len
);
2510 head_copy_len
= oldheadroom
;
2512 if (newheadroom
<= head_copy_len
)
2513 head_copy_len
= newheadroom
;
2515 head_copy_off
= newheadroom
- head_copy_len
;
2517 /* Copy the linear header and data. */
2518 BUG_ON(skb_copy_bits(skb
, -head_copy_len
, n
->head
+ head_copy_off
,
2519 skb
->len
+ head_copy_len
));
2521 skb_copy_header(n
, skb
);
2523 skb_headers_offset_update(n
, newheadroom
- oldheadroom
);
2527 EXPORT_SYMBOL(skb_copy_expand
);
2530 * __skb_pad - zero pad the tail of an skb
2531 * @skb: buffer to pad
2532 * @pad: space to pad
2533 * @free_on_error: free buffer on error
2535 * Ensure that a buffer is followed by a padding area that is zero
2536 * filled. Used by network drivers which may DMA or transfer data
2537 * beyond the buffer end onto the wire.
2539 * May return error in out of memory cases. The skb is freed on error
2540 * if @free_on_error is true.
2543 int __skb_pad(struct sk_buff
*skb
, int pad
, bool free_on_error
)
2548 /* If the skbuff is non linear tailroom is always zero.. */
2549 if (!skb_cloned(skb
) && skb_tailroom(skb
) >= pad
) {
2550 memset(skb
->data
+skb
->len
, 0, pad
);
2554 ntail
= skb
->data_len
+ pad
- (skb
->end
- skb
->tail
);
2555 if (likely(skb_cloned(skb
) || ntail
> 0)) {
2556 err
= pskb_expand_head(skb
, 0, ntail
, GFP_ATOMIC
);
2561 /* FIXME: The use of this function with non-linear skb's really needs
2564 err
= skb_linearize(skb
);
2568 memset(skb
->data
+ skb
->len
, 0, pad
);
2576 EXPORT_SYMBOL(__skb_pad
);
2579 * pskb_put - add data to the tail of a potentially fragmented buffer
2580 * @skb: start of the buffer to use
2581 * @tail: tail fragment of the buffer to use
2582 * @len: amount of data to add
2584 * This function extends the used data area of the potentially
2585 * fragmented buffer. @tail must be the last fragment of @skb -- or
2586 * @skb itself. If this would exceed the total buffer size the kernel
2587 * will panic. A pointer to the first byte of the extra data is
2591 void *pskb_put(struct sk_buff
*skb
, struct sk_buff
*tail
, int len
)
2594 skb
->data_len
+= len
;
2597 return skb_put(tail
, len
);
2599 EXPORT_SYMBOL_GPL(pskb_put
);
2602 * skb_put - add data to a buffer
2603 * @skb: buffer to use
2604 * @len: amount of data to add
2606 * This function extends the used data area of the buffer. If this would
2607 * exceed the total buffer size the kernel will panic. A pointer to the
2608 * first byte of the extra data is returned.
2610 void *skb_put(struct sk_buff
*skb
, unsigned int len
)
2612 void *tmp
= skb_tail_pointer(skb
);
2613 SKB_LINEAR_ASSERT(skb
);
2616 if (unlikely(skb
->tail
> skb
->end
))
2617 skb_over_panic(skb
, len
, __builtin_return_address(0));
2620 EXPORT_SYMBOL(skb_put
);
2623 * skb_push - add data to the start of a buffer
2624 * @skb: buffer to use
2625 * @len: amount of data to add
2627 * This function extends the used data area of the buffer at the buffer
2628 * start. If this would exceed the total buffer headroom the kernel will
2629 * panic. A pointer to the first byte of the extra data is returned.
2631 void *skb_push(struct sk_buff
*skb
, unsigned int len
)
2635 if (unlikely(skb
->data
< skb
->head
))
2636 skb_under_panic(skb
, len
, __builtin_return_address(0));
2639 EXPORT_SYMBOL(skb_push
);
2642 * skb_pull - remove data from the start of a buffer
2643 * @skb: buffer to use
2644 * @len: amount of data to remove
2646 * This function removes data from the start of a buffer, returning
2647 * the memory to the headroom. A pointer to the next data in the buffer
2648 * is returned. Once the data has been pulled future pushes will overwrite
2651 void *skb_pull(struct sk_buff
*skb
, unsigned int len
)
2653 return skb_pull_inline(skb
, len
);
2655 EXPORT_SYMBOL(skb_pull
);
2658 * skb_pull_data - remove data from the start of a buffer returning its
2659 * original position.
2660 * @skb: buffer to use
2661 * @len: amount of data to remove
2663 * This function removes data from the start of a buffer, returning
2664 * the memory to the headroom. A pointer to the original data in the buffer
2665 * is returned after checking if there is enough data to pull. Once the
2666 * data has been pulled future pushes will overwrite the old data.
2668 void *skb_pull_data(struct sk_buff
*skb
, size_t len
)
2670 void *data
= skb
->data
;
2679 EXPORT_SYMBOL(skb_pull_data
);
2682 * skb_trim - remove end from a buffer
2683 * @skb: buffer to alter
2686 * Cut the length of a buffer down by removing data from the tail. If
2687 * the buffer is already under the length specified it is not modified.
2688 * The skb must be linear.
2690 void skb_trim(struct sk_buff
*skb
, unsigned int len
)
2693 __skb_trim(skb
, len
);
2695 EXPORT_SYMBOL(skb_trim
);
2697 /* Trims skb to length len. It can change skb pointers.
2700 int ___pskb_trim(struct sk_buff
*skb
, unsigned int len
)
2702 struct sk_buff
**fragp
;
2703 struct sk_buff
*frag
;
2704 int offset
= skb_headlen(skb
);
2705 int nfrags
= skb_shinfo(skb
)->nr_frags
;
2709 if (skb_cloned(skb
) &&
2710 unlikely((err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))))
2717 for (; i
< nfrags
; i
++) {
2718 int end
= offset
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2725 skb_frag_size_set(&skb_shinfo(skb
)->frags
[i
++], len
- offset
);
2728 skb_shinfo(skb
)->nr_frags
= i
;
2730 for (; i
< nfrags
; i
++)
2731 skb_frag_unref(skb
, i
);
2733 if (skb_has_frag_list(skb
))
2734 skb_drop_fraglist(skb
);
2738 for (fragp
= &skb_shinfo(skb
)->frag_list
; (frag
= *fragp
);
2739 fragp
= &frag
->next
) {
2740 int end
= offset
+ frag
->len
;
2742 if (skb_shared(frag
)) {
2743 struct sk_buff
*nfrag
;
2745 nfrag
= skb_clone(frag
, GFP_ATOMIC
);
2746 if (unlikely(!nfrag
))
2749 nfrag
->next
= frag
->next
;
2761 unlikely((err
= pskb_trim(frag
, len
- offset
))))
2765 skb_drop_list(&frag
->next
);
2770 if (len
> skb_headlen(skb
)) {
2771 skb
->data_len
-= skb
->len
- len
;
2776 skb_set_tail_pointer(skb
, len
);
2779 if (!skb
->sk
|| skb
->destructor
== sock_edemux
)
2783 EXPORT_SYMBOL(___pskb_trim
);
2785 /* Note : use pskb_trim_rcsum() instead of calling this directly
2787 int pskb_trim_rcsum_slow(struct sk_buff
*skb
, unsigned int len
)
2789 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
2790 int delta
= skb
->len
- len
;
2792 skb
->csum
= csum_block_sub(skb
->csum
,
2793 skb_checksum(skb
, len
, delta
, 0),
2795 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2796 int hdlen
= (len
> skb_headlen(skb
)) ? skb_headlen(skb
) : len
;
2797 int offset
= skb_checksum_start_offset(skb
) + skb
->csum_offset
;
2799 if (offset
+ sizeof(__sum16
) > hdlen
)
2802 return __pskb_trim(skb
, len
);
2804 EXPORT_SYMBOL(pskb_trim_rcsum_slow
);
2807 * __pskb_pull_tail - advance tail of skb header
2808 * @skb: buffer to reallocate
2809 * @delta: number of bytes to advance tail
2811 * The function makes a sense only on a fragmented &sk_buff,
2812 * it expands header moving its tail forward and copying necessary
2813 * data from fragmented part.
2815 * &sk_buff MUST have reference count of 1.
2817 * Returns %NULL (and &sk_buff does not change) if pull failed
2818 * or value of new tail of skb in the case of success.
2820 * All the pointers pointing into skb header may change and must be
2821 * reloaded after call to this function.
2824 /* Moves tail of skb head forward, copying data from fragmented part,
2825 * when it is necessary.
2826 * 1. It may fail due to malloc failure.
2827 * 2. It may change skb pointers.
2829 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2831 void *__pskb_pull_tail(struct sk_buff
*skb
, int delta
)
2833 /* If skb has not enough free space at tail, get new one
2834 * plus 128 bytes for future expansions. If we have enough
2835 * room at tail, reallocate without expansion only if skb is cloned.
2837 int i
, k
, eat
= (skb
->tail
+ delta
) - skb
->end
;
2839 if (!skb_frags_readable(skb
))
2842 if (eat
> 0 || skb_cloned(skb
)) {
2843 if (pskb_expand_head(skb
, 0, eat
> 0 ? eat
+ 128 : 0,
2848 BUG_ON(skb_copy_bits(skb
, skb_headlen(skb
),
2849 skb_tail_pointer(skb
), delta
));
2851 /* Optimization: no fragments, no reasons to preestimate
2852 * size of pulled pages. Superb.
2854 if (!skb_has_frag_list(skb
))
2857 /* Estimate size of pulled pages. */
2859 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2860 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2867 /* If we need update frag list, we are in troubles.
2868 * Certainly, it is possible to add an offset to skb data,
2869 * but taking into account that pulling is expected to
2870 * be very rare operation, it is worth to fight against
2871 * further bloating skb head and crucify ourselves here instead.
2872 * Pure masohism, indeed. 8)8)
2875 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
2876 struct sk_buff
*clone
= NULL
;
2877 struct sk_buff
*insp
= NULL
;
2880 if (list
->len
<= eat
) {
2881 /* Eaten as whole. */
2886 /* Eaten partially. */
2887 if (skb_is_gso(skb
) && !list
->head_frag
&&
2889 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
2891 if (skb_shared(list
)) {
2892 /* Sucks! We need to fork list. :-( */
2893 clone
= skb_clone(list
, GFP_ATOMIC
);
2899 /* This may be pulled without
2903 if (!pskb_pull(list
, eat
)) {
2911 /* Free pulled out fragments. */
2912 while ((list
= skb_shinfo(skb
)->frag_list
) != insp
) {
2913 skb_shinfo(skb
)->frag_list
= list
->next
;
2916 /* And insert new clone at head. */
2919 skb_shinfo(skb
)->frag_list
= clone
;
2922 /* Success! Now we may commit changes to skb data. */
2927 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2928 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2931 skb_frag_unref(skb
, i
);
2934 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[k
];
2936 *frag
= skb_shinfo(skb
)->frags
[i
];
2938 skb_frag_off_add(frag
, eat
);
2939 skb_frag_size_sub(frag
, eat
);
2947 skb_shinfo(skb
)->nr_frags
= k
;
2951 skb
->data_len
-= delta
;
2954 skb_zcopy_clear(skb
, false);
2956 return skb_tail_pointer(skb
);
2958 EXPORT_SYMBOL(__pskb_pull_tail
);
2961 * skb_copy_bits - copy bits from skb to kernel buffer
2963 * @offset: offset in source
2964 * @to: destination buffer
2965 * @len: number of bytes to copy
2967 * Copy the specified number of bytes from the source skb to the
2968 * destination buffer.
2971 * If its prototype is ever changed,
2972 * check arch/{*}/net/{*}.S files,
2973 * since it is called from BPF assembly code.
2975 int skb_copy_bits(const struct sk_buff
*skb
, int offset
, void *to
, int len
)
2977 int start
= skb_headlen(skb
);
2978 struct sk_buff
*frag_iter
;
2981 if (offset
> (int)skb
->len
- len
)
2985 if ((copy
= start
- offset
) > 0) {
2988 skb_copy_from_linear_data_offset(skb
, offset
, to
, copy
);
2989 if ((len
-= copy
) == 0)
2995 if (!skb_frags_readable(skb
))
2998 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3000 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
3002 WARN_ON(start
> offset
+ len
);
3004 end
= start
+ skb_frag_size(f
);
3005 if ((copy
= end
- offset
) > 0) {
3006 u32 p_off
, p_len
, copied
;
3013 skb_frag_foreach_page(f
,
3014 skb_frag_off(f
) + offset
- start
,
3015 copy
, p
, p_off
, p_len
, copied
) {
3016 vaddr
= kmap_atomic(p
);
3017 memcpy(to
+ copied
, vaddr
+ p_off
, p_len
);
3018 kunmap_atomic(vaddr
);
3021 if ((len
-= copy
) == 0)
3029 skb_walk_frags(skb
, frag_iter
) {
3032 WARN_ON(start
> offset
+ len
);
3034 end
= start
+ frag_iter
->len
;
3035 if ((copy
= end
- offset
) > 0) {
3038 if (skb_copy_bits(frag_iter
, offset
- start
, to
, copy
))
3040 if ((len
-= copy
) == 0)
3054 EXPORT_SYMBOL(skb_copy_bits
);
3057 * Callback from splice_to_pipe(), if we need to release some pages
3058 * at the end of the spd in case we error'ed out in filling the pipe.
3060 static void sock_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
3062 put_page(spd
->pages
[i
]);
3065 static struct page
*linear_to_page(struct page
*page
, unsigned int *len
,
3066 unsigned int *offset
,
3069 struct page_frag
*pfrag
= sk_page_frag(sk
);
3071 if (!sk_page_frag_refill(sk
, pfrag
))
3074 *len
= min_t(unsigned int, *len
, pfrag
->size
- pfrag
->offset
);
3076 memcpy(page_address(pfrag
->page
) + pfrag
->offset
,
3077 page_address(page
) + *offset
, *len
);
3078 *offset
= pfrag
->offset
;
3079 pfrag
->offset
+= *len
;
3084 static bool spd_can_coalesce(const struct splice_pipe_desc
*spd
,
3086 unsigned int offset
)
3088 return spd
->nr_pages
&&
3089 spd
->pages
[spd
->nr_pages
- 1] == page
&&
3090 (spd
->partial
[spd
->nr_pages
- 1].offset
+
3091 spd
->partial
[spd
->nr_pages
- 1].len
== offset
);
3095 * Fill page/offset/length into spd, if it can hold more pages.
3097 static bool spd_fill_page(struct splice_pipe_desc
*spd
,
3098 struct pipe_inode_info
*pipe
, struct page
*page
,
3099 unsigned int *len
, unsigned int offset
,
3103 if (unlikely(spd
->nr_pages
== MAX_SKB_FRAGS
))
3107 page
= linear_to_page(page
, len
, &offset
, sk
);
3111 if (spd_can_coalesce(spd
, page
, offset
)) {
3112 spd
->partial
[spd
->nr_pages
- 1].len
+= *len
;
3116 spd
->pages
[spd
->nr_pages
] = page
;
3117 spd
->partial
[spd
->nr_pages
].len
= *len
;
3118 spd
->partial
[spd
->nr_pages
].offset
= offset
;
3124 static bool __splice_segment(struct page
*page
, unsigned int poff
,
3125 unsigned int plen
, unsigned int *off
,
3127 struct splice_pipe_desc
*spd
, bool linear
,
3129 struct pipe_inode_info
*pipe
)
3134 /* skip this segment if already processed */
3140 /* ignore any bits we already processed */
3146 unsigned int flen
= min(*len
, plen
);
3148 if (spd_fill_page(spd
, pipe
, page
, &flen
, poff
,
3154 } while (*len
&& plen
);
3160 * Map linear and fragment data from the skb to spd. It reports true if the
3161 * pipe is full or if we already spliced the requested length.
3163 static bool __skb_splice_bits(struct sk_buff
*skb
, struct pipe_inode_info
*pipe
,
3164 unsigned int *offset
, unsigned int *len
,
3165 struct splice_pipe_desc
*spd
, struct sock
*sk
)
3168 struct sk_buff
*iter
;
3170 /* map the linear part :
3171 * If skb->head_frag is set, this 'linear' part is backed by a
3172 * fragment, and if the head is not shared with any clones then
3173 * we can avoid a copy since we own the head portion of this page.
3175 if (__splice_segment(virt_to_page(skb
->data
),
3176 (unsigned long) skb
->data
& (PAGE_SIZE
- 1),
3179 skb_head_is_locked(skb
),
3184 * then map the fragments
3186 if (!skb_frags_readable(skb
))
3189 for (seg
= 0; seg
< skb_shinfo(skb
)->nr_frags
; seg
++) {
3190 const skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[seg
];
3192 if (WARN_ON_ONCE(!skb_frag_page(f
)))
3195 if (__splice_segment(skb_frag_page(f
),
3196 skb_frag_off(f
), skb_frag_size(f
),
3197 offset
, len
, spd
, false, sk
, pipe
))
3201 skb_walk_frags(skb
, iter
) {
3202 if (*offset
>= iter
->len
) {
3203 *offset
-= iter
->len
;
3206 /* __skb_splice_bits() only fails if the output has no room
3207 * left, so no point in going over the frag_list for the error
3210 if (__skb_splice_bits(iter
, pipe
, offset
, len
, spd
, sk
))
3218 * Map data from the skb to a pipe. Should handle both the linear part,
3219 * the fragments, and the frag list.
3221 int skb_splice_bits(struct sk_buff
*skb
, struct sock
*sk
, unsigned int offset
,
3222 struct pipe_inode_info
*pipe
, unsigned int tlen
,
3225 struct partial_page partial
[MAX_SKB_FRAGS
];
3226 struct page
*pages
[MAX_SKB_FRAGS
];
3227 struct splice_pipe_desc spd
= {
3230 .nr_pages_max
= MAX_SKB_FRAGS
,
3231 .ops
= &nosteal_pipe_buf_ops
,
3232 .spd_release
= sock_spd_release
,
3236 __skb_splice_bits(skb
, pipe
, &offset
, &tlen
, &spd
, sk
);
3239 ret
= splice_to_pipe(pipe
, &spd
);
3243 EXPORT_SYMBOL_GPL(skb_splice_bits
);
3245 static int sendmsg_locked(struct sock
*sk
, struct msghdr
*msg
)
3247 struct socket
*sock
= sk
->sk_socket
;
3248 size_t size
= msg_data_left(msg
);
3253 if (!sock
->ops
->sendmsg_locked
)
3254 return sock_no_sendmsg_locked(sk
, msg
, size
);
3256 return sock
->ops
->sendmsg_locked(sk
, msg
, size
);
3259 static int sendmsg_unlocked(struct sock
*sk
, struct msghdr
*msg
)
3261 struct socket
*sock
= sk
->sk_socket
;
3265 return sock_sendmsg(sock
, msg
);
3268 typedef int (*sendmsg_func
)(struct sock
*sk
, struct msghdr
*msg
);
3269 static int __skb_send_sock(struct sock
*sk
, struct sk_buff
*skb
, int offset
,
3270 int len
, sendmsg_func sendmsg
)
3272 unsigned int orig_len
= len
;
3273 struct sk_buff
*head
= skb
;
3274 unsigned short fragidx
;
3279 /* Deal with head data */
3280 while (offset
< skb_headlen(skb
) && len
) {
3284 slen
= min_t(int, len
, skb_headlen(skb
) - offset
);
3285 kv
.iov_base
= skb
->data
+ offset
;
3287 memset(&msg
, 0, sizeof(msg
));
3288 msg
.msg_flags
= MSG_DONTWAIT
;
3290 iov_iter_kvec(&msg
.msg_iter
, ITER_SOURCE
, &kv
, 1, slen
);
3291 ret
= INDIRECT_CALL_2(sendmsg
, sendmsg_locked
,
3292 sendmsg_unlocked
, sk
, &msg
);
3300 /* All the data was skb head? */
3304 /* Make offset relative to start of frags */
3305 offset
-= skb_headlen(skb
);
3307 /* Find where we are in frag list */
3308 for (fragidx
= 0; fragidx
< skb_shinfo(skb
)->nr_frags
; fragidx
++) {
3309 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[fragidx
];
3311 if (offset
< skb_frag_size(frag
))
3314 offset
-= skb_frag_size(frag
);
3317 for (; len
&& fragidx
< skb_shinfo(skb
)->nr_frags
; fragidx
++) {
3318 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[fragidx
];
3320 slen
= min_t(size_t, len
, skb_frag_size(frag
) - offset
);
3323 struct bio_vec bvec
;
3324 struct msghdr msg
= {
3325 .msg_flags
= MSG_SPLICE_PAGES
| MSG_DONTWAIT
,
3328 bvec_set_page(&bvec
, skb_frag_page(frag
), slen
,
3329 skb_frag_off(frag
) + offset
);
3330 iov_iter_bvec(&msg
.msg_iter
, ITER_SOURCE
, &bvec
, 1,
3333 ret
= INDIRECT_CALL_2(sendmsg
, sendmsg_locked
,
3334 sendmsg_unlocked
, sk
, &msg
);
3347 /* Process any frag lists */
3350 if (skb_has_frag_list(skb
)) {
3351 skb
= skb_shinfo(skb
)->frag_list
;
3354 } else if (skb
->next
) {
3361 return orig_len
- len
;
3364 return orig_len
== len
? ret
: orig_len
- len
;
3367 /* Send skb data on a socket. Socket must be locked. */
3368 int skb_send_sock_locked(struct sock
*sk
, struct sk_buff
*skb
, int offset
,
3371 return __skb_send_sock(sk
, skb
, offset
, len
, sendmsg_locked
);
3373 EXPORT_SYMBOL_GPL(skb_send_sock_locked
);
3375 /* Send skb data on a socket. Socket must be unlocked. */
3376 int skb_send_sock(struct sock
*sk
, struct sk_buff
*skb
, int offset
, int len
)
3378 return __skb_send_sock(sk
, skb
, offset
, len
, sendmsg_unlocked
);
3382 * skb_store_bits - store bits from kernel buffer to skb
3383 * @skb: destination buffer
3384 * @offset: offset in destination
3385 * @from: source buffer
3386 * @len: number of bytes to copy
3388 * Copy the specified number of bytes from the source buffer to the
3389 * destination skb. This function handles all the messy bits of
3390 * traversing fragment lists and such.
3393 int skb_store_bits(struct sk_buff
*skb
, int offset
, const void *from
, int len
)
3395 int start
= skb_headlen(skb
);
3396 struct sk_buff
*frag_iter
;
3399 if (offset
> (int)skb
->len
- len
)
3402 if ((copy
= start
- offset
) > 0) {
3405 skb_copy_to_linear_data_offset(skb
, offset
, from
, copy
);
3406 if ((len
-= copy
) == 0)
3412 if (!skb_frags_readable(skb
))
3415 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3416 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3419 WARN_ON(start
> offset
+ len
);
3421 end
= start
+ skb_frag_size(frag
);
3422 if ((copy
= end
- offset
) > 0) {
3423 u32 p_off
, p_len
, copied
;
3430 skb_frag_foreach_page(frag
,
3431 skb_frag_off(frag
) + offset
- start
,
3432 copy
, p
, p_off
, p_len
, copied
) {
3433 vaddr
= kmap_atomic(p
);
3434 memcpy(vaddr
+ p_off
, from
+ copied
, p_len
);
3435 kunmap_atomic(vaddr
);
3438 if ((len
-= copy
) == 0)
3446 skb_walk_frags(skb
, frag_iter
) {
3449 WARN_ON(start
> offset
+ len
);
3451 end
= start
+ frag_iter
->len
;
3452 if ((copy
= end
- offset
) > 0) {
3455 if (skb_store_bits(frag_iter
, offset
- start
,
3458 if ((len
-= copy
) == 0)
3471 EXPORT_SYMBOL(skb_store_bits
);
3473 /* Checksum skb data. */
3474 __wsum
__skb_checksum(const struct sk_buff
*skb
, int offset
, int len
,
3475 __wsum csum
, const struct skb_checksum_ops
*ops
)
3477 int start
= skb_headlen(skb
);
3478 int i
, copy
= start
- offset
;
3479 struct sk_buff
*frag_iter
;
3482 /* Checksum header. */
3486 csum
= INDIRECT_CALL_1(ops
->update
, csum_partial_ext
,
3487 skb
->data
+ offset
, copy
, csum
);
3488 if ((len
-= copy
) == 0)
3494 if (WARN_ON_ONCE(!skb_frags_readable(skb
)))
3497 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3499 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3501 WARN_ON(start
> offset
+ len
);
3503 end
= start
+ skb_frag_size(frag
);
3504 if ((copy
= end
- offset
) > 0) {
3505 u32 p_off
, p_len
, copied
;
3513 skb_frag_foreach_page(frag
,
3514 skb_frag_off(frag
) + offset
- start
,
3515 copy
, p
, p_off
, p_len
, copied
) {
3516 vaddr
= kmap_atomic(p
);
3517 csum2
= INDIRECT_CALL_1(ops
->update
,
3519 vaddr
+ p_off
, p_len
, 0);
3520 kunmap_atomic(vaddr
);
3521 csum
= INDIRECT_CALL_1(ops
->combine
,
3522 csum_block_add_ext
, csum
,
3534 skb_walk_frags(skb
, frag_iter
) {
3537 WARN_ON(start
> offset
+ len
);
3539 end
= start
+ frag_iter
->len
;
3540 if ((copy
= end
- offset
) > 0) {
3544 csum2
= __skb_checksum(frag_iter
, offset
- start
,
3546 csum
= INDIRECT_CALL_1(ops
->combine
, csum_block_add_ext
,
3547 csum
, csum2
, pos
, copy
);
3548 if ((len
-= copy
) == 0)
3559 EXPORT_SYMBOL(__skb_checksum
);
3561 __wsum
skb_checksum(const struct sk_buff
*skb
, int offset
,
3562 int len
, __wsum csum
)
3564 const struct skb_checksum_ops ops
= {
3565 .update
= csum_partial_ext
,
3566 .combine
= csum_block_add_ext
,
3569 return __skb_checksum(skb
, offset
, len
, csum
, &ops
);
3571 EXPORT_SYMBOL(skb_checksum
);
3573 /* Both of above in one bottle. */
3575 __wsum
skb_copy_and_csum_bits(const struct sk_buff
*skb
, int offset
,
3578 int start
= skb_headlen(skb
);
3579 int i
, copy
= start
- offset
;
3580 struct sk_buff
*frag_iter
;
3588 csum
= csum_partial_copy_nocheck(skb
->data
+ offset
, to
,
3590 if ((len
-= copy
) == 0)
3597 if (!skb_frags_readable(skb
))
3600 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3603 WARN_ON(start
> offset
+ len
);
3605 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
3606 if ((copy
= end
- offset
) > 0) {
3607 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3608 u32 p_off
, p_len
, copied
;
3616 skb_frag_foreach_page(frag
,
3617 skb_frag_off(frag
) + offset
- start
,
3618 copy
, p
, p_off
, p_len
, copied
) {
3619 vaddr
= kmap_atomic(p
);
3620 csum2
= csum_partial_copy_nocheck(vaddr
+ p_off
,
3623 kunmap_atomic(vaddr
);
3624 csum
= csum_block_add(csum
, csum2
, pos
);
3636 skb_walk_frags(skb
, frag_iter
) {
3640 WARN_ON(start
> offset
+ len
);
3642 end
= start
+ frag_iter
->len
;
3643 if ((copy
= end
- offset
) > 0) {
3646 csum2
= skb_copy_and_csum_bits(frag_iter
,
3649 csum
= csum_block_add(csum
, csum2
, pos
);
3650 if ((len
-= copy
) == 0)
3661 EXPORT_SYMBOL(skb_copy_and_csum_bits
);
3663 __sum16
__skb_checksum_complete_head(struct sk_buff
*skb
, int len
)
3667 sum
= csum_fold(skb_checksum(skb
, 0, len
, skb
->csum
));
3668 /* See comments in __skb_checksum_complete(). */
3670 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
3671 !skb
->csum_complete_sw
)
3672 netdev_rx_csum_fault(skb
->dev
, skb
);
3674 if (!skb_shared(skb
))
3675 skb
->csum_valid
= !sum
;
3678 EXPORT_SYMBOL(__skb_checksum_complete_head
);
3680 /* This function assumes skb->csum already holds pseudo header's checksum,
3681 * which has been changed from the hardware checksum, for example, by
3682 * __skb_checksum_validate_complete(). And, the original skb->csum must
3683 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
3685 * It returns non-zero if the recomputed checksum is still invalid, otherwise
3686 * zero. The new checksum is stored back into skb->csum unless the skb is
3689 __sum16
__skb_checksum_complete(struct sk_buff
*skb
)
3694 csum
= skb_checksum(skb
, 0, skb
->len
, 0);
3696 sum
= csum_fold(csum_add(skb
->csum
, csum
));
3697 /* This check is inverted, because we already knew the hardware
3698 * checksum is invalid before calling this function. So, if the
3699 * re-computed checksum is valid instead, then we have a mismatch
3700 * between the original skb->csum and skb_checksum(). This means either
3701 * the original hardware checksum is incorrect or we screw up skb->csum
3702 * when moving skb->data around.
3705 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
3706 !skb
->csum_complete_sw
)
3707 netdev_rx_csum_fault(skb
->dev
, skb
);
3710 if (!skb_shared(skb
)) {
3711 /* Save full packet checksum */
3713 skb
->ip_summed
= CHECKSUM_COMPLETE
;
3714 skb
->csum_complete_sw
= 1;
3715 skb
->csum_valid
= !sum
;
3720 EXPORT_SYMBOL(__skb_checksum_complete
);
3722 static __wsum
warn_crc32c_csum_update(const void *buff
, int len
, __wsum sum
)
3724 net_warn_ratelimited(
3725 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3730 static __wsum
warn_crc32c_csum_combine(__wsum csum
, __wsum csum2
,
3731 int offset
, int len
)
3733 net_warn_ratelimited(
3734 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3739 static const struct skb_checksum_ops default_crc32c_ops
= {
3740 .update
= warn_crc32c_csum_update
,
3741 .combine
= warn_crc32c_csum_combine
,
3744 const struct skb_checksum_ops
*crc32c_csum_stub __read_mostly
=
3745 &default_crc32c_ops
;
3746 EXPORT_SYMBOL(crc32c_csum_stub
);
3749 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3750 * @from: source buffer
3752 * Calculates the amount of linear headroom needed in the 'to' skb passed
3753 * into skb_zerocopy().
3756 skb_zerocopy_headlen(const struct sk_buff
*from
)
3758 unsigned int hlen
= 0;
3760 if (!from
->head_frag
||
3761 skb_headlen(from
) < L1_CACHE_BYTES
||
3762 skb_shinfo(from
)->nr_frags
>= MAX_SKB_FRAGS
) {
3763 hlen
= skb_headlen(from
);
3768 if (skb_has_frag_list(from
))
3773 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen
);
3776 * skb_zerocopy - Zero copy skb to skb
3777 * @to: destination buffer
3778 * @from: source buffer
3779 * @len: number of bytes to copy from source buffer
3780 * @hlen: size of linear headroom in destination buffer
3782 * Copies up to `len` bytes from `from` to `to` by creating references
3783 * to the frags in the source buffer.
3785 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
3786 * headroom in the `to` buffer.
3789 * 0: everything is OK
3790 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3791 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3794 skb_zerocopy(struct sk_buff
*to
, struct sk_buff
*from
, int len
, int hlen
)
3797 int plen
= 0; /* length of skb->head fragment */
3800 unsigned int offset
;
3802 BUG_ON(!from
->head_frag
&& !hlen
);
3804 /* dont bother with small payloads */
3805 if (len
<= skb_tailroom(to
))
3806 return skb_copy_bits(from
, 0, skb_put(to
, len
), len
);
3809 ret
= skb_copy_bits(from
, 0, skb_put(to
, hlen
), hlen
);
3814 plen
= min_t(int, skb_headlen(from
), len
);
3816 page
= virt_to_head_page(from
->head
);
3817 offset
= from
->data
- (unsigned char *)page_address(page
);
3818 __skb_fill_netmem_desc(to
, 0, page_to_netmem(page
),
3826 skb_len_add(to
, len
+ plen
);
3828 if (unlikely(skb_orphan_frags(from
, GFP_ATOMIC
))) {
3832 skb_zerocopy_clone(to
, from
, GFP_ATOMIC
);
3834 for (i
= 0; i
< skb_shinfo(from
)->nr_frags
; i
++) {
3839 skb_shinfo(to
)->frags
[j
] = skb_shinfo(from
)->frags
[i
];
3840 size
= min_t(int, skb_frag_size(&skb_shinfo(to
)->frags
[j
]),
3842 skb_frag_size_set(&skb_shinfo(to
)->frags
[j
], size
);
3844 skb_frag_ref(to
, j
);
3847 skb_shinfo(to
)->nr_frags
= j
;
3851 EXPORT_SYMBOL_GPL(skb_zerocopy
);
3853 void skb_copy_and_csum_dev(const struct sk_buff
*skb
, u8
*to
)
3858 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3859 csstart
= skb_checksum_start_offset(skb
);
3861 csstart
= skb_headlen(skb
);
3863 BUG_ON(csstart
> skb_headlen(skb
));
3865 skb_copy_from_linear_data(skb
, to
, csstart
);
3868 if (csstart
!= skb
->len
)
3869 csum
= skb_copy_and_csum_bits(skb
, csstart
, to
+ csstart
,
3870 skb
->len
- csstart
);
3872 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3873 long csstuff
= csstart
+ skb
->csum_offset
;
3875 *((__sum16
*)(to
+ csstuff
)) = csum_fold(csum
);
3878 EXPORT_SYMBOL(skb_copy_and_csum_dev
);
3881 * skb_dequeue - remove from the head of the queue
3882 * @list: list to dequeue from
3884 * Remove the head of the list. The list lock is taken so the function
3885 * may be used safely with other locking list functions. The head item is
3886 * returned or %NULL if the list is empty.
3889 struct sk_buff
*skb_dequeue(struct sk_buff_head
*list
)
3891 unsigned long flags
;
3892 struct sk_buff
*result
;
3894 spin_lock_irqsave(&list
->lock
, flags
);
3895 result
= __skb_dequeue(list
);
3896 spin_unlock_irqrestore(&list
->lock
, flags
);
3899 EXPORT_SYMBOL(skb_dequeue
);
3902 * skb_dequeue_tail - remove from the tail of the queue
3903 * @list: list to dequeue from
3905 * Remove the tail of the list. The list lock is taken so the function
3906 * may be used safely with other locking list functions. The tail item is
3907 * returned or %NULL if the list is empty.
3909 struct sk_buff
*skb_dequeue_tail(struct sk_buff_head
*list
)
3911 unsigned long flags
;
3912 struct sk_buff
*result
;
3914 spin_lock_irqsave(&list
->lock
, flags
);
3915 result
= __skb_dequeue_tail(list
);
3916 spin_unlock_irqrestore(&list
->lock
, flags
);
3919 EXPORT_SYMBOL(skb_dequeue_tail
);
3922 * skb_queue_purge_reason - empty a list
3923 * @list: list to empty
3924 * @reason: drop reason
3926 * Delete all buffers on an &sk_buff list. Each buffer is removed from
3927 * the list and one reference dropped. This function takes the list
3928 * lock and is atomic with respect to other list locking functions.
3930 void skb_queue_purge_reason(struct sk_buff_head
*list
,
3931 enum skb_drop_reason reason
)
3933 struct sk_buff_head tmp
;
3934 unsigned long flags
;
3936 if (skb_queue_empty_lockless(list
))
3939 __skb_queue_head_init(&tmp
);
3941 spin_lock_irqsave(&list
->lock
, flags
);
3942 skb_queue_splice_init(list
, &tmp
);
3943 spin_unlock_irqrestore(&list
->lock
, flags
);
3945 __skb_queue_purge_reason(&tmp
, reason
);
3947 EXPORT_SYMBOL(skb_queue_purge_reason
);
3950 * skb_rbtree_purge - empty a skb rbtree
3951 * @root: root of the rbtree to empty
3952 * Return value: the sum of truesizes of all purged skbs.
3954 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3955 * the list and one reference dropped. This function does not take
3956 * any lock. Synchronization should be handled by the caller (e.g., TCP
3957 * out-of-order queue is protected by the socket lock).
3959 unsigned int skb_rbtree_purge(struct rb_root
*root
)
3961 struct rb_node
*p
= rb_first(root
);
3962 unsigned int sum
= 0;
3965 struct sk_buff
*skb
= rb_entry(p
, struct sk_buff
, rbnode
);
3968 rb_erase(&skb
->rbnode
, root
);
3969 sum
+= skb
->truesize
;
3975 void skb_errqueue_purge(struct sk_buff_head
*list
)
3977 struct sk_buff
*skb
, *next
;
3978 struct sk_buff_head kill
;
3979 unsigned long flags
;
3981 __skb_queue_head_init(&kill
);
3983 spin_lock_irqsave(&list
->lock
, flags
);
3984 skb_queue_walk_safe(list
, skb
, next
) {
3985 if (SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_ZEROCOPY
||
3986 SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_TIMESTAMPING
)
3988 __skb_unlink(skb
, list
);
3989 __skb_queue_tail(&kill
, skb
);
3991 spin_unlock_irqrestore(&list
->lock
, flags
);
3992 __skb_queue_purge(&kill
);
3994 EXPORT_SYMBOL(skb_errqueue_purge
);
3997 * skb_queue_head - queue a buffer at the list head
3998 * @list: list to use
3999 * @newsk: buffer to queue
4001 * Queue a buffer at the start of the list. This function takes the
4002 * list lock and can be used safely with other locking &sk_buff functions
4005 * A buffer cannot be placed on two lists at the same time.
4007 void skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
4009 unsigned long flags
;
4011 spin_lock_irqsave(&list
->lock
, flags
);
4012 __skb_queue_head(list
, newsk
);
4013 spin_unlock_irqrestore(&list
->lock
, flags
);
4015 EXPORT_SYMBOL(skb_queue_head
);
4018 * skb_queue_tail - queue a buffer at the list tail
4019 * @list: list to use
4020 * @newsk: buffer to queue
4022 * Queue a buffer at the tail of the list. This function takes the
4023 * list lock and can be used safely with other locking &sk_buff functions
4026 * A buffer cannot be placed on two lists at the same time.
4028 void skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
4030 unsigned long flags
;
4032 spin_lock_irqsave(&list
->lock
, flags
);
4033 __skb_queue_tail(list
, newsk
);
4034 spin_unlock_irqrestore(&list
->lock
, flags
);
4036 EXPORT_SYMBOL(skb_queue_tail
);
4039 * skb_unlink - remove a buffer from a list
4040 * @skb: buffer to remove
4041 * @list: list to use
4043 * Remove a packet from a list. The list locks are taken and this
4044 * function is atomic with respect to other list locked calls
4046 * You must know what list the SKB is on.
4048 void skb_unlink(struct sk_buff
*skb
, struct sk_buff_head
*list
)
4050 unsigned long flags
;
4052 spin_lock_irqsave(&list
->lock
, flags
);
4053 __skb_unlink(skb
, list
);
4054 spin_unlock_irqrestore(&list
->lock
, flags
);
4056 EXPORT_SYMBOL(skb_unlink
);
4059 * skb_append - append a buffer
4060 * @old: buffer to insert after
4061 * @newsk: buffer to insert
4062 * @list: list to use
4064 * Place a packet after a given packet in a list. The list locks are taken
4065 * and this function is atomic with respect to other list locked calls.
4066 * A buffer cannot be placed on two lists at the same time.
4068 void skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
, struct sk_buff_head
*list
)
4070 unsigned long flags
;
4072 spin_lock_irqsave(&list
->lock
, flags
);
4073 __skb_queue_after(list
, old
, newsk
);
4074 spin_unlock_irqrestore(&list
->lock
, flags
);
4076 EXPORT_SYMBOL(skb_append
);
4078 static inline void skb_split_inside_header(struct sk_buff
*skb
,
4079 struct sk_buff
* skb1
,
4080 const u32 len
, const int pos
)
4084 skb_copy_from_linear_data_offset(skb
, len
, skb_put(skb1
, pos
- len
),
4086 /* And move data appendix as is. */
4087 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
4088 skb_shinfo(skb1
)->frags
[i
] = skb_shinfo(skb
)->frags
[i
];
4090 skb_shinfo(skb1
)->nr_frags
= skb_shinfo(skb
)->nr_frags
;
4091 skb1
->unreadable
= skb
->unreadable
;
4092 skb_shinfo(skb
)->nr_frags
= 0;
4093 skb1
->data_len
= skb
->data_len
;
4094 skb1
->len
+= skb1
->data_len
;
4097 skb_set_tail_pointer(skb
, len
);
4100 static inline void skb_split_no_header(struct sk_buff
*skb
,
4101 struct sk_buff
* skb1
,
4102 const u32 len
, int pos
)
4105 const int nfrags
= skb_shinfo(skb
)->nr_frags
;
4107 skb_shinfo(skb
)->nr_frags
= 0;
4108 skb1
->len
= skb1
->data_len
= skb
->len
- len
;
4110 skb
->data_len
= len
- pos
;
4112 for (i
= 0; i
< nfrags
; i
++) {
4113 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
4115 if (pos
+ size
> len
) {
4116 skb_shinfo(skb1
)->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
4120 * We have two variants in this case:
4121 * 1. Move all the frag to the second
4122 * part, if it is possible. F.e.
4123 * this approach is mandatory for TUX,
4124 * where splitting is expensive.
4125 * 2. Split is accurately. We make this.
4127 skb_frag_ref(skb
, i
);
4128 skb_frag_off_add(&skb_shinfo(skb1
)->frags
[0], len
- pos
);
4129 skb_frag_size_sub(&skb_shinfo(skb1
)->frags
[0], len
- pos
);
4130 skb_frag_size_set(&skb_shinfo(skb
)->frags
[i
], len
- pos
);
4131 skb_shinfo(skb
)->nr_frags
++;
4135 skb_shinfo(skb
)->nr_frags
++;
4138 skb_shinfo(skb1
)->nr_frags
= k
;
4140 skb1
->unreadable
= skb
->unreadable
;
4144 * skb_split - Split fragmented skb to two parts at length len.
4145 * @skb: the buffer to split
4146 * @skb1: the buffer to receive the second part
4147 * @len: new length for skb
4149 void skb_split(struct sk_buff
*skb
, struct sk_buff
*skb1
, const u32 len
)
4151 int pos
= skb_headlen(skb
);
4152 const int zc_flags
= SKBFL_SHARED_FRAG
| SKBFL_PURE_ZEROCOPY
;
4154 skb_zcopy_downgrade_managed(skb
);
4156 skb_shinfo(skb1
)->flags
|= skb_shinfo(skb
)->flags
& zc_flags
;
4157 skb_zerocopy_clone(skb1
, skb
, 0);
4158 if (len
< pos
) /* Split line is inside header. */
4159 skb_split_inside_header(skb
, skb1
, len
, pos
);
4160 else /* Second chunk has no header, nothing to copy. */
4161 skb_split_no_header(skb
, skb1
, len
, pos
);
4163 EXPORT_SYMBOL(skb_split
);
4165 /* Shifting from/to a cloned skb is a no-go.
4167 * Caller cannot keep skb_shinfo related pointers past calling here!
4169 static int skb_prepare_for_shift(struct sk_buff
*skb
)
4171 return skb_unclone_keeptruesize(skb
, GFP_ATOMIC
);
4175 * skb_shift - Shifts paged data partially from skb to another
4176 * @tgt: buffer into which tail data gets added
4177 * @skb: buffer from which the paged data comes from
4178 * @shiftlen: shift up to this many bytes
4180 * Attempts to shift up to shiftlen worth of bytes, which may be less than
4181 * the length of the skb, from skb to tgt. Returns number bytes shifted.
4182 * It's up to caller to free skb if everything was shifted.
4184 * If @tgt runs out of frags, the whole operation is aborted.
4186 * Skb cannot include anything else but paged data while tgt is allowed
4187 * to have non-paged data as well.
4189 * TODO: full sized shift could be optimized but that would need
4190 * specialized skb free'er to handle frags without up-to-date nr_frags.
4192 int skb_shift(struct sk_buff
*tgt
, struct sk_buff
*skb
, int shiftlen
)
4194 int from
, to
, merge
, todo
;
4195 skb_frag_t
*fragfrom
, *fragto
;
4197 BUG_ON(shiftlen
> skb
->len
);
4199 if (skb_headlen(skb
))
4201 if (skb_zcopy(tgt
) || skb_zcopy(skb
))
4204 DEBUG_NET_WARN_ON_ONCE(tgt
->pp_recycle
!= skb
->pp_recycle
);
4205 DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt
, skb
));
4209 to
= skb_shinfo(tgt
)->nr_frags
;
4210 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
4212 /* Actual merge is delayed until the point when we know we can
4213 * commit all, so that we don't have to undo partial changes
4215 if (!skb_can_coalesce(tgt
, to
, skb_frag_page(fragfrom
),
4216 skb_frag_off(fragfrom
))) {
4221 todo
-= skb_frag_size(fragfrom
);
4223 if (skb_prepare_for_shift(skb
) ||
4224 skb_prepare_for_shift(tgt
))
4227 /* All previous frag pointers might be stale! */
4228 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
4229 fragto
= &skb_shinfo(tgt
)->frags
[merge
];
4231 skb_frag_size_add(fragto
, shiftlen
);
4232 skb_frag_size_sub(fragfrom
, shiftlen
);
4233 skb_frag_off_add(fragfrom
, shiftlen
);
4241 /* Skip full, not-fitting skb to avoid expensive operations */
4242 if ((shiftlen
== skb
->len
) &&
4243 (skb_shinfo(skb
)->nr_frags
- from
) > (MAX_SKB_FRAGS
- to
))
4246 if (skb_prepare_for_shift(skb
) || skb_prepare_for_shift(tgt
))
4249 while ((todo
> 0) && (from
< skb_shinfo(skb
)->nr_frags
)) {
4250 if (to
== MAX_SKB_FRAGS
)
4253 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
4254 fragto
= &skb_shinfo(tgt
)->frags
[to
];
4256 if (todo
>= skb_frag_size(fragfrom
)) {
4257 *fragto
= *fragfrom
;
4258 todo
-= skb_frag_size(fragfrom
);
4263 __skb_frag_ref(fragfrom
);
4264 skb_frag_page_copy(fragto
, fragfrom
);
4265 skb_frag_off_copy(fragto
, fragfrom
);
4266 skb_frag_size_set(fragto
, todo
);
4268 skb_frag_off_add(fragfrom
, todo
);
4269 skb_frag_size_sub(fragfrom
, todo
);
4277 /* Ready to "commit" this state change to tgt */
4278 skb_shinfo(tgt
)->nr_frags
= to
;
4281 fragfrom
= &skb_shinfo(skb
)->frags
[0];
4282 fragto
= &skb_shinfo(tgt
)->frags
[merge
];
4284 skb_frag_size_add(fragto
, skb_frag_size(fragfrom
));
4285 __skb_frag_unref(fragfrom
, skb
->pp_recycle
);
4288 /* Reposition in the original skb */
4290 while (from
< skb_shinfo(skb
)->nr_frags
)
4291 skb_shinfo(skb
)->frags
[to
++] = skb_shinfo(skb
)->frags
[from
++];
4292 skb_shinfo(skb
)->nr_frags
= to
;
4294 BUG_ON(todo
> 0 && !skb_shinfo(skb
)->nr_frags
);
4297 /* Most likely the tgt won't ever need its checksum anymore, skb on
4298 * the other hand might need it if it needs to be resent
4300 tgt
->ip_summed
= CHECKSUM_PARTIAL
;
4301 skb
->ip_summed
= CHECKSUM_PARTIAL
;
4303 skb_len_add(skb
, -shiftlen
);
4304 skb_len_add(tgt
, shiftlen
);
4310 * skb_prepare_seq_read - Prepare a sequential read of skb data
4311 * @skb: the buffer to read
4312 * @from: lower offset of data to be read
4313 * @to: upper offset of data to be read
4314 * @st: state variable
4316 * Initializes the specified state variable. Must be called before
4317 * invoking skb_seq_read() for the first time.
4319 void skb_prepare_seq_read(struct sk_buff
*skb
, unsigned int from
,
4320 unsigned int to
, struct skb_seq_state
*st
)
4322 st
->lower_offset
= from
;
4323 st
->upper_offset
= to
;
4324 st
->root_skb
= st
->cur_skb
= skb
;
4325 st
->frag_idx
= st
->stepped_offset
= 0;
4326 st
->frag_data
= NULL
;
4329 EXPORT_SYMBOL(skb_prepare_seq_read
);
4332 * skb_seq_read - Sequentially read skb data
4333 * @consumed: number of bytes consumed by the caller so far
4334 * @data: destination pointer for data to be returned
4335 * @st: state variable
4337 * Reads a block of skb data at @consumed relative to the
4338 * lower offset specified to skb_prepare_seq_read(). Assigns
4339 * the head of the data block to @data and returns the length
4340 * of the block or 0 if the end of the skb data or the upper
4341 * offset has been reached.
4343 * The caller is not required to consume all of the data
4344 * returned, i.e. @consumed is typically set to the number
4345 * of bytes already consumed and the next call to
4346 * skb_seq_read() will return the remaining part of the block.
4348 * Note 1: The size of each block of data returned can be arbitrary,
4349 * this limitation is the cost for zerocopy sequential
4350 * reads of potentially non linear data.
4352 * Note 2: Fragment lists within fragments are not implemented
4353 * at the moment, state->root_skb could be replaced with
4354 * a stack for this purpose.
4356 unsigned int skb_seq_read(unsigned int consumed
, const u8
**data
,
4357 struct skb_seq_state
*st
)
4359 unsigned int block_limit
, abs_offset
= consumed
+ st
->lower_offset
;
4362 if (unlikely(abs_offset
>= st
->upper_offset
)) {
4363 if (st
->frag_data
) {
4364 kunmap_atomic(st
->frag_data
);
4365 st
->frag_data
= NULL
;
4371 block_limit
= skb_headlen(st
->cur_skb
) + st
->stepped_offset
;
4373 if (abs_offset
< block_limit
&& !st
->frag_data
) {
4374 *data
= st
->cur_skb
->data
+ (abs_offset
- st
->stepped_offset
);
4375 return block_limit
- abs_offset
;
4378 if (!skb_frags_readable(st
->cur_skb
))
4381 if (st
->frag_idx
== 0 && !st
->frag_data
)
4382 st
->stepped_offset
+= skb_headlen(st
->cur_skb
);
4384 while (st
->frag_idx
< skb_shinfo(st
->cur_skb
)->nr_frags
) {
4385 unsigned int pg_idx
, pg_off
, pg_sz
;
4387 frag
= &skb_shinfo(st
->cur_skb
)->frags
[st
->frag_idx
];
4390 pg_off
= skb_frag_off(frag
);
4391 pg_sz
= skb_frag_size(frag
);
4393 if (skb_frag_must_loop(skb_frag_page(frag
))) {
4394 pg_idx
= (pg_off
+ st
->frag_off
) >> PAGE_SHIFT
;
4395 pg_off
= offset_in_page(pg_off
+ st
->frag_off
);
4396 pg_sz
= min_t(unsigned int, pg_sz
- st
->frag_off
,
4397 PAGE_SIZE
- pg_off
);
4400 block_limit
= pg_sz
+ st
->stepped_offset
;
4401 if (abs_offset
< block_limit
) {
4403 st
->frag_data
= kmap_atomic(skb_frag_page(frag
) + pg_idx
);
4405 *data
= (u8
*)st
->frag_data
+ pg_off
+
4406 (abs_offset
- st
->stepped_offset
);
4408 return block_limit
- abs_offset
;
4411 if (st
->frag_data
) {
4412 kunmap_atomic(st
->frag_data
);
4413 st
->frag_data
= NULL
;
4416 st
->stepped_offset
+= pg_sz
;
4417 st
->frag_off
+= pg_sz
;
4418 if (st
->frag_off
== skb_frag_size(frag
)) {
4424 if (st
->frag_data
) {
4425 kunmap_atomic(st
->frag_data
);
4426 st
->frag_data
= NULL
;
4429 if (st
->root_skb
== st
->cur_skb
&& skb_has_frag_list(st
->root_skb
)) {
4430 st
->cur_skb
= skb_shinfo(st
->root_skb
)->frag_list
;
4433 } else if (st
->cur_skb
->next
) {
4434 st
->cur_skb
= st
->cur_skb
->next
;
4441 EXPORT_SYMBOL(skb_seq_read
);
4444 * skb_abort_seq_read - Abort a sequential read of skb data
4445 * @st: state variable
4447 * Must be called if skb_seq_read() was not called until it
4450 void skb_abort_seq_read(struct skb_seq_state
*st
)
4453 kunmap_atomic(st
->frag_data
);
4455 EXPORT_SYMBOL(skb_abort_seq_read
);
4458 * skb_copy_seq_read() - copy from a skb_seq_state to a buffer
4459 * @st: source skb_seq_state
4460 * @offset: offset in source
4461 * @to: destination buffer
4462 * @len: number of bytes to copy
4464 * Copy @len bytes from @offset bytes into the source @st to the destination
4465 * buffer @to. `offset` should increase (or be unchanged) with each subsequent
4466 * call to this function. If offset needs to decrease from the previous use `st`
4467 * should be reset first.
4469 * Return: 0 on success or -EINVAL if the copy ended early
4471 int skb_copy_seq_read(struct skb_seq_state
*st
, int offset
, void *to
, int len
)
4477 sqlen
= skb_seq_read(offset
, &data
, st
);
4481 memcpy(to
, data
, len
);
4484 memcpy(to
, data
, sqlen
);
4490 EXPORT_SYMBOL(skb_copy_seq_read
);
4492 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
4494 static unsigned int skb_ts_get_next_block(unsigned int offset
, const u8
**text
,
4495 struct ts_config
*conf
,
4496 struct ts_state
*state
)
4498 return skb_seq_read(offset
, text
, TS_SKB_CB(state
));
4501 static void skb_ts_finish(struct ts_config
*conf
, struct ts_state
*state
)
4503 skb_abort_seq_read(TS_SKB_CB(state
));
4507 * skb_find_text - Find a text pattern in skb data
4508 * @skb: the buffer to look in
4509 * @from: search offset
4511 * @config: textsearch configuration
4513 * Finds a pattern in the skb data according to the specified
4514 * textsearch configuration. Use textsearch_next() to retrieve
4515 * subsequent occurrences of the pattern. Returns the offset
4516 * to the first occurrence or UINT_MAX if no match was found.
4518 unsigned int skb_find_text(struct sk_buff
*skb
, unsigned int from
,
4519 unsigned int to
, struct ts_config
*config
)
4521 unsigned int patlen
= config
->ops
->get_pattern_len(config
);
4522 struct ts_state state
;
4525 BUILD_BUG_ON(sizeof(struct skb_seq_state
) > sizeof(state
.cb
));
4527 config
->get_next_block
= skb_ts_get_next_block
;
4528 config
->finish
= skb_ts_finish
;
4530 skb_prepare_seq_read(skb
, from
, to
, TS_SKB_CB(&state
));
4532 ret
= textsearch_find(config
, &state
);
4533 return (ret
+ patlen
<= to
- from
? ret
: UINT_MAX
);
4535 EXPORT_SYMBOL(skb_find_text
);
4537 int skb_append_pagefrags(struct sk_buff
*skb
, struct page
*page
,
4538 int offset
, size_t size
, size_t max_frags
)
4540 int i
= skb_shinfo(skb
)->nr_frags
;
4542 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
4543 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], size
);
4544 } else if (i
< max_frags
) {
4545 skb_zcopy_downgrade_managed(skb
);
4547 skb_fill_page_desc_noacc(skb
, i
, page
, offset
, size
);
4554 EXPORT_SYMBOL_GPL(skb_append_pagefrags
);
4557 * skb_pull_rcsum - pull skb and update receive checksum
4558 * @skb: buffer to update
4559 * @len: length of data pulled
4561 * This function performs an skb_pull on the packet and updates
4562 * the CHECKSUM_COMPLETE checksum. It should be used on
4563 * receive path processing instead of skb_pull unless you know
4564 * that the checksum difference is zero (e.g., a valid IP header)
4565 * or you are setting ip_summed to CHECKSUM_NONE.
4567 void *skb_pull_rcsum(struct sk_buff
*skb
, unsigned int len
)
4569 unsigned char *data
= skb
->data
;
4571 BUG_ON(len
> skb
->len
);
4572 __skb_pull(skb
, len
);
4573 skb_postpull_rcsum(skb
, data
, len
);
4576 EXPORT_SYMBOL_GPL(skb_pull_rcsum
);
4578 static inline skb_frag_t
skb_head_frag_to_page_desc(struct sk_buff
*frag_skb
)
4580 skb_frag_t head_frag
;
4583 page
= virt_to_head_page(frag_skb
->head
);
4584 skb_frag_fill_page_desc(&head_frag
, page
, frag_skb
->data
-
4585 (unsigned char *)page_address(page
),
4586 skb_headlen(frag_skb
));
4590 struct sk_buff
*skb_segment_list(struct sk_buff
*skb
,
4591 netdev_features_t features
,
4592 unsigned int offset
)
4594 struct sk_buff
*list_skb
= skb_shinfo(skb
)->frag_list
;
4595 unsigned int tnl_hlen
= skb_tnl_header_len(skb
);
4596 unsigned int delta_truesize
= 0;
4597 unsigned int delta_len
= 0;
4598 struct sk_buff
*tail
= NULL
;
4599 struct sk_buff
*nskb
, *tmp
;
4602 skb_push(skb
, -skb_network_offset(skb
) + offset
);
4604 /* Ensure the head is writeable before touching the shared info */
4605 err
= skb_unclone(skb
, GFP_ATOMIC
);
4609 skb_shinfo(skb
)->frag_list
= NULL
;
4613 list_skb
= list_skb
->next
;
4616 delta_truesize
+= nskb
->truesize
;
4617 if (skb_shared(nskb
)) {
4618 tmp
= skb_clone(nskb
, GFP_ATOMIC
);
4622 err
= skb_unclone(nskb
, GFP_ATOMIC
);
4633 if (unlikely(err
)) {
4634 nskb
->next
= list_skb
;
4640 delta_len
+= nskb
->len
;
4642 skb_push(nskb
, -skb_network_offset(nskb
) + offset
);
4644 skb_release_head_state(nskb
);
4645 len_diff
= skb_network_header_len(nskb
) - skb_network_header_len(skb
);
4646 __copy_skb_header(nskb
, skb
);
4648 skb_headers_offset_update(nskb
, skb_headroom(nskb
) - skb_headroom(skb
));
4649 nskb
->transport_header
+= len_diff
;
4650 skb_copy_from_linear_data_offset(skb
, -tnl_hlen
,
4651 nskb
->data
- tnl_hlen
,
4654 if (skb_needs_linearize(nskb
, features
) &&
4655 __skb_linearize(nskb
))
4659 skb
->truesize
= skb
->truesize
- delta_truesize
;
4660 skb
->data_len
= skb
->data_len
- delta_len
;
4661 skb
->len
= skb
->len
- delta_len
;
4667 if (skb_needs_linearize(skb
, features
) &&
4668 __skb_linearize(skb
))
4676 kfree_skb_list(skb
->next
);
4678 return ERR_PTR(-ENOMEM
);
4680 EXPORT_SYMBOL_GPL(skb_segment_list
);
4683 * skb_segment - Perform protocol segmentation on skb.
4684 * @head_skb: buffer to segment
4685 * @features: features for the output path (see dev->features)
4687 * This function performs segmentation on the given skb. It returns
4688 * a pointer to the first in a list of new skbs for the segments.
4689 * In case of error it returns ERR_PTR(err).
4691 struct sk_buff
*skb_segment(struct sk_buff
*head_skb
,
4692 netdev_features_t features
)
4694 struct sk_buff
*segs
= NULL
;
4695 struct sk_buff
*tail
= NULL
;
4696 struct sk_buff
*list_skb
= skb_shinfo(head_skb
)->frag_list
;
4697 unsigned int mss
= skb_shinfo(head_skb
)->gso_size
;
4698 unsigned int doffset
= head_skb
->data
- skb_mac_header(head_skb
);
4699 unsigned int offset
= doffset
;
4700 unsigned int tnl_hlen
= skb_tnl_header_len(head_skb
);
4701 unsigned int partial_segs
= 0;
4702 unsigned int headroom
;
4703 unsigned int len
= head_skb
->len
;
4704 struct sk_buff
*frag_skb
;
4712 if ((skb_shinfo(head_skb
)->gso_type
& SKB_GSO_DODGY
) &&
4713 mss
!= GSO_BY_FRAGS
&& mss
!= skb_headlen(head_skb
)) {
4714 struct sk_buff
*check_skb
;
4716 for (check_skb
= list_skb
; check_skb
; check_skb
= check_skb
->next
) {
4717 if (skb_headlen(check_skb
) && !check_skb
->head_frag
) {
4718 /* gso_size is untrusted, and we have a frag_list with
4719 * a linear non head_frag item.
4721 * If head_skb's headlen does not fit requested gso_size,
4722 * it means that the frag_list members do NOT terminate
4723 * on exact gso_size boundaries. Hence we cannot perform
4724 * skb_frag_t page sharing. Therefore we must fallback to
4725 * copying the frag_list skbs; we do so by disabling SG.
4727 features
&= ~NETIF_F_SG
;
4733 __skb_push(head_skb
, doffset
);
4734 proto
= skb_network_protocol(head_skb
, NULL
);
4735 if (unlikely(!proto
))
4736 return ERR_PTR(-EINVAL
);
4738 sg
= !!(features
& NETIF_F_SG
);
4739 csum
= !!can_checksum_protocol(features
, proto
);
4741 if (sg
&& csum
&& (mss
!= GSO_BY_FRAGS
)) {
4742 if (!(features
& NETIF_F_GSO_PARTIAL
)) {
4743 struct sk_buff
*iter
;
4744 unsigned int frag_len
;
4747 !net_gso_ok(features
, skb_shinfo(head_skb
)->gso_type
))
4750 /* If we get here then all the required
4751 * GSO features except frag_list are supported.
4752 * Try to split the SKB to multiple GSO SKBs
4753 * with no frag_list.
4754 * Currently we can do that only when the buffers don't
4755 * have a linear part and all the buffers except
4756 * the last are of the same length.
4758 frag_len
= list_skb
->len
;
4759 skb_walk_frags(head_skb
, iter
) {
4760 if (frag_len
!= iter
->len
&& iter
->next
)
4762 if (skb_headlen(iter
) && !iter
->head_frag
)
4768 if (len
!= frag_len
)
4772 /* GSO partial only requires that we trim off any excess that
4773 * doesn't fit into an MSS sized block, so take care of that
4775 * Cap len to not accidentally hit GSO_BY_FRAGS.
4777 partial_segs
= min(len
, GSO_BY_FRAGS
- 1) / mss
;
4778 if (partial_segs
> 1)
4779 mss
*= partial_segs
;
4785 headroom
= skb_headroom(head_skb
);
4786 pos
= skb_headlen(head_skb
);
4788 if (skb_orphan_frags(head_skb
, GFP_ATOMIC
))
4789 return ERR_PTR(-ENOMEM
);
4791 nfrags
= skb_shinfo(head_skb
)->nr_frags
;
4792 frag
= skb_shinfo(head_skb
)->frags
;
4793 frag_skb
= head_skb
;
4796 struct sk_buff
*nskb
;
4797 skb_frag_t
*nskb_frag
;
4801 if (unlikely(mss
== GSO_BY_FRAGS
)) {
4802 len
= list_skb
->len
;
4804 len
= head_skb
->len
- offset
;
4809 hsize
= skb_headlen(head_skb
) - offset
;
4811 if (hsize
<= 0 && i
>= nfrags
&& skb_headlen(list_skb
) &&
4812 (skb_headlen(list_skb
) == len
|| sg
)) {
4813 BUG_ON(skb_headlen(list_skb
) > len
);
4815 nskb
= skb_clone(list_skb
, GFP_ATOMIC
);
4816 if (unlikely(!nskb
))
4820 nfrags
= skb_shinfo(list_skb
)->nr_frags
;
4821 frag
= skb_shinfo(list_skb
)->frags
;
4822 frag_skb
= list_skb
;
4823 pos
+= skb_headlen(list_skb
);
4825 while (pos
< offset
+ len
) {
4826 BUG_ON(i
>= nfrags
);
4828 size
= skb_frag_size(frag
);
4829 if (pos
+ size
> offset
+ len
)
4837 list_skb
= list_skb
->next
;
4839 if (unlikely(pskb_trim(nskb
, len
))) {
4844 hsize
= skb_end_offset(nskb
);
4845 if (skb_cow_head(nskb
, doffset
+ headroom
)) {
4850 nskb
->truesize
+= skb_end_offset(nskb
) - hsize
;
4851 skb_release_head_state(nskb
);
4852 __skb_push(nskb
, doffset
);
4856 if (hsize
> len
|| !sg
)
4859 nskb
= __alloc_skb(hsize
+ doffset
+ headroom
,
4860 GFP_ATOMIC
, skb_alloc_rx_flag(head_skb
),
4863 if (unlikely(!nskb
))
4866 skb_reserve(nskb
, headroom
);
4867 __skb_put(nskb
, doffset
);
4876 __copy_skb_header(nskb
, head_skb
);
4878 skb_headers_offset_update(nskb
, skb_headroom(nskb
) - headroom
);
4879 skb_reset_mac_len(nskb
);
4881 skb_copy_from_linear_data_offset(head_skb
, -tnl_hlen
,
4882 nskb
->data
- tnl_hlen
,
4883 doffset
+ tnl_hlen
);
4885 if (nskb
->len
== len
+ doffset
)
4886 goto perform_csum_check
;
4890 if (!nskb
->remcsum_offload
)
4891 nskb
->ip_summed
= CHECKSUM_NONE
;
4892 SKB_GSO_CB(nskb
)->csum
=
4893 skb_copy_and_csum_bits(head_skb
, offset
,
4897 SKB_GSO_CB(nskb
)->csum_start
=
4898 skb_headroom(nskb
) + doffset
;
4900 if (skb_copy_bits(head_skb
, offset
, skb_put(nskb
, len
), len
))
4906 nskb_frag
= skb_shinfo(nskb
)->frags
;
4908 skb_copy_from_linear_data_offset(head_skb
, offset
,
4909 skb_put(nskb
, hsize
), hsize
);
4911 skb_shinfo(nskb
)->flags
|= skb_shinfo(head_skb
)->flags
&
4914 if (skb_zerocopy_clone(nskb
, frag_skb
, GFP_ATOMIC
))
4917 while (pos
< offset
+ len
) {
4919 if (skb_orphan_frags(list_skb
, GFP_ATOMIC
) ||
4920 skb_zerocopy_clone(nskb
, list_skb
,
4925 nfrags
= skb_shinfo(list_skb
)->nr_frags
;
4926 frag
= skb_shinfo(list_skb
)->frags
;
4927 frag_skb
= list_skb
;
4928 if (!skb_headlen(list_skb
)) {
4931 BUG_ON(!list_skb
->head_frag
);
4933 /* to make room for head_frag. */
4938 list_skb
= list_skb
->next
;
4941 if (unlikely(skb_shinfo(nskb
)->nr_frags
>=
4943 net_warn_ratelimited(
4944 "skb_segment: too many frags: %u %u\n",
4950 *nskb_frag
= (i
< 0) ? skb_head_frag_to_page_desc(frag_skb
) : *frag
;
4951 __skb_frag_ref(nskb_frag
);
4952 size
= skb_frag_size(nskb_frag
);
4955 skb_frag_off_add(nskb_frag
, offset
- pos
);
4956 skb_frag_size_sub(nskb_frag
, offset
- pos
);
4959 skb_shinfo(nskb
)->nr_frags
++;
4961 if (pos
+ size
<= offset
+ len
) {
4966 skb_frag_size_sub(nskb_frag
, pos
+ size
- (offset
+ len
));
4974 nskb
->data_len
= len
- hsize
;
4975 nskb
->len
+= nskb
->data_len
;
4976 nskb
->truesize
+= nskb
->data_len
;
4980 if (skb_has_shared_frag(nskb
) &&
4981 __skb_linearize(nskb
))
4984 if (!nskb
->remcsum_offload
)
4985 nskb
->ip_summed
= CHECKSUM_NONE
;
4986 SKB_GSO_CB(nskb
)->csum
=
4987 skb_checksum(nskb
, doffset
,
4988 nskb
->len
- doffset
, 0);
4989 SKB_GSO_CB(nskb
)->csum_start
=
4990 skb_headroom(nskb
) + doffset
;
4992 } while ((offset
+= len
) < head_skb
->len
);
4994 /* Some callers want to get the end of the list.
4995 * Put it in segs->prev to avoid walking the list.
4996 * (see validate_xmit_skb_list() for example)
5001 struct sk_buff
*iter
;
5002 int type
= skb_shinfo(head_skb
)->gso_type
;
5003 unsigned short gso_size
= skb_shinfo(head_skb
)->gso_size
;
5005 /* Update type to add partial and then remove dodgy if set */
5006 type
|= (features
& NETIF_F_GSO_PARTIAL
) / NETIF_F_GSO_PARTIAL
* SKB_GSO_PARTIAL
;
5007 type
&= ~SKB_GSO_DODGY
;
5009 /* Update GSO info and prepare to start updating headers on
5010 * our way back down the stack of protocols.
5012 for (iter
= segs
; iter
; iter
= iter
->next
) {
5013 skb_shinfo(iter
)->gso_size
= gso_size
;
5014 skb_shinfo(iter
)->gso_segs
= partial_segs
;
5015 skb_shinfo(iter
)->gso_type
= type
;
5016 SKB_GSO_CB(iter
)->data_offset
= skb_headroom(iter
) + doffset
;
5019 if (tail
->len
- doffset
<= gso_size
)
5020 skb_shinfo(tail
)->gso_size
= 0;
5021 else if (tail
!= segs
)
5022 skb_shinfo(tail
)->gso_segs
= DIV_ROUND_UP(tail
->len
- doffset
, gso_size
);
5025 /* Following permits correct backpressure, for protocols
5026 * using skb_set_owner_w().
5027 * Idea is to tranfert ownership from head_skb to last segment.
5029 if (head_skb
->destructor
== sock_wfree
) {
5030 swap(tail
->truesize
, head_skb
->truesize
);
5031 swap(tail
->destructor
, head_skb
->destructor
);
5032 swap(tail
->sk
, head_skb
->sk
);
5037 kfree_skb_list(segs
);
5038 return ERR_PTR(err
);
5040 EXPORT_SYMBOL_GPL(skb_segment
);
5042 #ifdef CONFIG_SKB_EXTENSIONS
5043 #define SKB_EXT_ALIGN_VALUE 8
5044 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
5046 static const u8 skb_ext_type_len
[] = {
5047 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
5048 [SKB_EXT_BRIDGE_NF
] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info
),
5051 [SKB_EXT_SEC_PATH
] = SKB_EXT_CHUNKSIZEOF(struct sec_path
),
5053 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5054 [TC_SKB_EXT
] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext
),
5056 #if IS_ENABLED(CONFIG_MPTCP)
5057 [SKB_EXT_MPTCP
] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext
),
5059 #if IS_ENABLED(CONFIG_MCTP_FLOWS)
5060 [SKB_EXT_MCTP
] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow
),
5064 static __always_inline
unsigned int skb_ext_total_length(void)
5066 unsigned int l
= SKB_EXT_CHUNKSIZEOF(struct skb_ext
);
5069 for (i
= 0; i
< ARRAY_SIZE(skb_ext_type_len
); i
++)
5070 l
+= skb_ext_type_len
[i
];
5075 static void skb_extensions_init(void)
5077 BUILD_BUG_ON(SKB_EXT_NUM
>= 8);
5078 #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
5079 BUILD_BUG_ON(skb_ext_total_length() > 255);
5082 skbuff_ext_cache
= kmem_cache_create("skbuff_ext_cache",
5083 SKB_EXT_ALIGN_VALUE
* skb_ext_total_length(),
5085 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
5089 static void skb_extensions_init(void) {}
5092 /* The SKB kmem_cache slab is critical for network performance. Never
5093 * merge/alias the slab with similar sized objects. This avoids fragmentation
5094 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs.
5096 #ifndef CONFIG_SLUB_TINY
5097 #define FLAG_SKB_NO_MERGE SLAB_NO_MERGE
5098 #else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
5099 #define FLAG_SKB_NO_MERGE 0
5102 void __init
skb_init(void)
5104 net_hotdata
.skbuff_cache
= kmem_cache_create_usercopy("skbuff_head_cache",
5105 sizeof(struct sk_buff
),
5107 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
|
5109 offsetof(struct sk_buff
, cb
),
5110 sizeof_field(struct sk_buff
, cb
),
5112 net_hotdata
.skbuff_fclone_cache
= kmem_cache_create("skbuff_fclone_cache",
5113 sizeof(struct sk_buff_fclones
),
5115 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
5117 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes.
5118 * struct skb_shared_info is located at the end of skb->head,
5119 * and should not be copied to/from user.
5121 net_hotdata
.skb_small_head_cache
= kmem_cache_create_usercopy("skbuff_small_head",
5122 SKB_SMALL_HEAD_CACHE_SIZE
,
5124 SLAB_HWCACHE_ALIGN
| SLAB_PANIC
,
5126 SKB_SMALL_HEAD_HEADROOM
,
5128 skb_extensions_init();
5132 __skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
,
5133 unsigned int recursion_level
)
5135 int start
= skb_headlen(skb
);
5136 int i
, copy
= start
- offset
;
5137 struct sk_buff
*frag_iter
;
5140 if (unlikely(recursion_level
>= 24))
5146 sg_set_buf(sg
, skb
->data
+ offset
, copy
);
5148 if ((len
-= copy
) == 0)
5153 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5156 WARN_ON(start
> offset
+ len
);
5158 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
5159 if ((copy
= end
- offset
) > 0) {
5160 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5161 if (unlikely(elt
&& sg_is_last(&sg
[elt
- 1])))
5166 sg_set_page(&sg
[elt
], skb_frag_page(frag
), copy
,
5167 skb_frag_off(frag
) + offset
- start
);
5176 skb_walk_frags(skb
, frag_iter
) {
5179 WARN_ON(start
> offset
+ len
);
5181 end
= start
+ frag_iter
->len
;
5182 if ((copy
= end
- offset
) > 0) {
5183 if (unlikely(elt
&& sg_is_last(&sg
[elt
- 1])))
5188 ret
= __skb_to_sgvec(frag_iter
, sg
+elt
, offset
- start
,
5189 copy
, recursion_level
+ 1);
5190 if (unlikely(ret
< 0))
5193 if ((len
-= copy
) == 0)
5204 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
5205 * @skb: Socket buffer containing the buffers to be mapped
5206 * @sg: The scatter-gather list to map into
5207 * @offset: The offset into the buffer's contents to start mapping
5208 * @len: Length of buffer space to be mapped
5210 * Fill the specified scatter-gather list with mappings/pointers into a
5211 * region of the buffer space attached to a socket buffer. Returns either
5212 * the number of scatterlist items used, or -EMSGSIZE if the contents
5215 int skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
)
5217 int nsg
= __skb_to_sgvec(skb
, sg
, offset
, len
, 0);
5222 sg_mark_end(&sg
[nsg
- 1]);
5226 EXPORT_SYMBOL_GPL(skb_to_sgvec
);
5228 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
5229 * sglist without mark the sg which contain last skb data as the end.
5230 * So the caller can mannipulate sg list as will when padding new data after
5231 * the first call without calling sg_unmark_end to expend sg list.
5233 * Scenario to use skb_to_sgvec_nomark:
5235 * 2. skb_to_sgvec_nomark(payload1)
5236 * 3. skb_to_sgvec_nomark(payload2)
5238 * This is equivalent to:
5240 * 2. skb_to_sgvec(payload1)
5242 * 4. skb_to_sgvec(payload2)
5244 * When mapping multiple payload conditionally, skb_to_sgvec_nomark
5245 * is more preferable.
5247 int skb_to_sgvec_nomark(struct sk_buff
*skb
, struct scatterlist
*sg
,
5248 int offset
, int len
)
5250 return __skb_to_sgvec(skb
, sg
, offset
, len
, 0);
5252 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark
);
5257 * skb_cow_data - Check that a socket buffer's data buffers are writable
5258 * @skb: The socket buffer to check.
5259 * @tailbits: Amount of trailing space to be added
5260 * @trailer: Returned pointer to the skb where the @tailbits space begins
5262 * Make sure that the data buffers attached to a socket buffer are
5263 * writable. If they are not, private copies are made of the data buffers
5264 * and the socket buffer is set to use these instead.
5266 * If @tailbits is given, make sure that there is space to write @tailbits
5267 * bytes of data beyond current end of socket buffer. @trailer will be
5268 * set to point to the skb in which this space begins.
5270 * The number of scatterlist elements required to completely map the
5271 * COW'd and extended socket buffer will be returned.
5273 int skb_cow_data(struct sk_buff
*skb
, int tailbits
, struct sk_buff
**trailer
)
5277 struct sk_buff
*skb1
, **skb_p
;
5279 /* If skb is cloned or its head is paged, reallocate
5280 * head pulling out all the pages (pages are considered not writable
5281 * at the moment even if they are anonymous).
5283 if ((skb_cloned(skb
) || skb_shinfo(skb
)->nr_frags
) &&
5284 !__pskb_pull_tail(skb
, __skb_pagelen(skb
)))
5287 /* Easy case. Most of packets will go this way. */
5288 if (!skb_has_frag_list(skb
)) {
5289 /* A little of trouble, not enough of space for trailer.
5290 * This should not happen, when stack is tuned to generate
5291 * good frames. OK, on miss we reallocate and reserve even more
5292 * space, 128 bytes is fair. */
5294 if (skb_tailroom(skb
) < tailbits
&&
5295 pskb_expand_head(skb
, 0, tailbits
-skb_tailroom(skb
)+128, GFP_ATOMIC
))
5303 /* Misery. We are in troubles, going to mincer fragments... */
5306 skb_p
= &skb_shinfo(skb
)->frag_list
;
5309 while ((skb1
= *skb_p
) != NULL
) {
5312 /* The fragment is partially pulled by someone,
5313 * this can happen on input. Copy it and everything
5316 if (skb_shared(skb1
))
5319 /* If the skb is the last, worry about trailer. */
5321 if (skb1
->next
== NULL
&& tailbits
) {
5322 if (skb_shinfo(skb1
)->nr_frags
||
5323 skb_has_frag_list(skb1
) ||
5324 skb_tailroom(skb1
) < tailbits
)
5325 ntail
= tailbits
+ 128;
5331 skb_shinfo(skb1
)->nr_frags
||
5332 skb_has_frag_list(skb1
)) {
5333 struct sk_buff
*skb2
;
5335 /* Fuck, we are miserable poor guys... */
5337 skb2
= skb_copy(skb1
, GFP_ATOMIC
);
5339 skb2
= skb_copy_expand(skb1
,
5343 if (unlikely(skb2
== NULL
))
5347 skb_set_owner_w(skb2
, skb1
->sk
);
5349 /* Looking around. Are we still alive?
5350 * OK, link new skb, drop old one */
5352 skb2
->next
= skb1
->next
;
5359 skb_p
= &skb1
->next
;
5364 EXPORT_SYMBOL_GPL(skb_cow_data
);
5366 static void sock_rmem_free(struct sk_buff
*skb
)
5368 struct sock
*sk
= skb
->sk
;
5370 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
5373 static void skb_set_err_queue(struct sk_buff
*skb
)
5375 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
5376 * So, it is safe to (mis)use it to mark skbs on the error queue.
5378 skb
->pkt_type
= PACKET_OUTGOING
;
5379 BUILD_BUG_ON(PACKET_OUTGOING
== 0);
5383 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
5385 int sock_queue_err_skb(struct sock
*sk
, struct sk_buff
*skb
)
5387 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
5388 (unsigned int)READ_ONCE(sk
->sk_rcvbuf
))
5393 skb
->destructor
= sock_rmem_free
;
5394 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
5395 skb_set_err_queue(skb
);
5397 /* before exiting rcu section, make sure dst is refcounted */
5400 skb_queue_tail(&sk
->sk_error_queue
, skb
);
5401 if (!sock_flag(sk
, SOCK_DEAD
))
5402 sk_error_report(sk
);
5405 EXPORT_SYMBOL(sock_queue_err_skb
);
5407 static bool is_icmp_err_skb(const struct sk_buff
*skb
)
5409 return skb
&& (SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_ICMP
||
5410 SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_ICMP6
);
5413 struct sk_buff
*sock_dequeue_err_skb(struct sock
*sk
)
5415 struct sk_buff_head
*q
= &sk
->sk_error_queue
;
5416 struct sk_buff
*skb
, *skb_next
= NULL
;
5417 bool icmp_next
= false;
5418 unsigned long flags
;
5420 if (skb_queue_empty_lockless(q
))
5423 spin_lock_irqsave(&q
->lock
, flags
);
5424 skb
= __skb_dequeue(q
);
5425 if (skb
&& (skb_next
= skb_peek(q
))) {
5426 icmp_next
= is_icmp_err_skb(skb_next
);
5428 sk
->sk_err
= SKB_EXT_ERR(skb_next
)->ee
.ee_errno
;
5430 spin_unlock_irqrestore(&q
->lock
, flags
);
5432 if (is_icmp_err_skb(skb
) && !icmp_next
)
5436 sk_error_report(sk
);
5440 EXPORT_SYMBOL(sock_dequeue_err_skb
);
5443 * skb_clone_sk - create clone of skb, and take reference to socket
5444 * @skb: the skb to clone
5446 * This function creates a clone of a buffer that holds a reference on
5447 * sk_refcnt. Buffers created via this function are meant to be
5448 * returned using sock_queue_err_skb, or free via kfree_skb.
5450 * When passing buffers allocated with this function to sock_queue_err_skb
5451 * it is necessary to wrap the call with sock_hold/sock_put in order to
5452 * prevent the socket from being released prior to being enqueued on
5453 * the sk_error_queue.
5455 struct sk_buff
*skb_clone_sk(struct sk_buff
*skb
)
5457 struct sock
*sk
= skb
->sk
;
5458 struct sk_buff
*clone
;
5460 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
5463 clone
= skb_clone(skb
, GFP_ATOMIC
);
5470 clone
->destructor
= sock_efree
;
5474 EXPORT_SYMBOL(skb_clone_sk
);
5476 static void __skb_complete_tx_timestamp(struct sk_buff
*skb
,
5481 struct sock_exterr_skb
*serr
;
5484 BUILD_BUG_ON(sizeof(struct sock_exterr_skb
) > sizeof(skb
->cb
));
5486 serr
= SKB_EXT_ERR(skb
);
5487 memset(serr
, 0, sizeof(*serr
));
5488 serr
->ee
.ee_errno
= ENOMSG
;
5489 serr
->ee
.ee_origin
= SO_EE_ORIGIN_TIMESTAMPING
;
5490 serr
->ee
.ee_info
= tstype
;
5491 serr
->opt_stats
= opt_stats
;
5492 serr
->header
.h4
.iif
= skb
->dev
? skb
->dev
->ifindex
: 0;
5493 if (READ_ONCE(sk
->sk_tsflags
) & SOF_TIMESTAMPING_OPT_ID
) {
5494 serr
->ee
.ee_data
= skb_shinfo(skb
)->tskey
;
5496 serr
->ee
.ee_data
-= atomic_read(&sk
->sk_tskey
);
5499 err
= sock_queue_err_skb(sk
, skb
);
5505 static bool skb_may_tx_timestamp(struct sock
*sk
, bool tsonly
)
5509 if (likely(tsonly
|| READ_ONCE(sock_net(sk
)->core
.sysctl_tstamp_allow_data
)))
5512 read_lock_bh(&sk
->sk_callback_lock
);
5513 ret
= sk
->sk_socket
&& sk
->sk_socket
->file
&&
5514 file_ns_capable(sk
->sk_socket
->file
, &init_user_ns
, CAP_NET_RAW
);
5515 read_unlock_bh(&sk
->sk_callback_lock
);
5519 void skb_complete_tx_timestamp(struct sk_buff
*skb
,
5520 struct skb_shared_hwtstamps
*hwtstamps
)
5522 struct sock
*sk
= skb
->sk
;
5524 if (!skb_may_tx_timestamp(sk
, false))
5527 /* Take a reference to prevent skb_orphan() from freeing the socket,
5528 * but only if the socket refcount is not zero.
5530 if (likely(refcount_inc_not_zero(&sk
->sk_refcnt
))) {
5531 *skb_hwtstamps(skb
) = *hwtstamps
;
5532 __skb_complete_tx_timestamp(skb
, sk
, SCM_TSTAMP_SND
, false);
5540 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp
);
5542 void __skb_tstamp_tx(struct sk_buff
*orig_skb
,
5543 const struct sk_buff
*ack_skb
,
5544 struct skb_shared_hwtstamps
*hwtstamps
,
5545 struct sock
*sk
, int tstype
)
5547 struct sk_buff
*skb
;
5548 bool tsonly
, opt_stats
= false;
5554 tsflags
= READ_ONCE(sk
->sk_tsflags
);
5555 if (!hwtstamps
&& !(tsflags
& SOF_TIMESTAMPING_OPT_TX_SWHW
) &&
5556 skb_shinfo(orig_skb
)->tx_flags
& SKBTX_IN_PROGRESS
)
5559 tsonly
= tsflags
& SOF_TIMESTAMPING_OPT_TSONLY
;
5560 if (!skb_may_tx_timestamp(sk
, tsonly
))
5565 if ((tsflags
& SOF_TIMESTAMPING_OPT_STATS
) &&
5567 skb
= tcp_get_timestamping_opt_stats(sk
, orig_skb
,
5572 skb
= alloc_skb(0, GFP_ATOMIC
);
5574 skb
= skb_clone(orig_skb
, GFP_ATOMIC
);
5576 if (skb_orphan_frags_rx(skb
, GFP_ATOMIC
)) {
5585 skb_shinfo(skb
)->tx_flags
|= skb_shinfo(orig_skb
)->tx_flags
&
5587 skb_shinfo(skb
)->tskey
= skb_shinfo(orig_skb
)->tskey
;
5591 *skb_hwtstamps(skb
) = *hwtstamps
;
5593 __net_timestamp(skb
);
5595 __skb_complete_tx_timestamp(skb
, sk
, tstype
, opt_stats
);
5597 EXPORT_SYMBOL_GPL(__skb_tstamp_tx
);
5599 void skb_tstamp_tx(struct sk_buff
*orig_skb
,
5600 struct skb_shared_hwtstamps
*hwtstamps
)
5602 return __skb_tstamp_tx(orig_skb
, NULL
, hwtstamps
, orig_skb
->sk
,
5605 EXPORT_SYMBOL_GPL(skb_tstamp_tx
);
5607 #ifdef CONFIG_WIRELESS
5608 void skb_complete_wifi_ack(struct sk_buff
*skb
, bool acked
)
5610 struct sock
*sk
= skb
->sk
;
5611 struct sock_exterr_skb
*serr
;
5614 skb
->wifi_acked_valid
= 1;
5615 skb
->wifi_acked
= acked
;
5617 serr
= SKB_EXT_ERR(skb
);
5618 memset(serr
, 0, sizeof(*serr
));
5619 serr
->ee
.ee_errno
= ENOMSG
;
5620 serr
->ee
.ee_origin
= SO_EE_ORIGIN_TXSTATUS
;
5622 /* Take a reference to prevent skb_orphan() from freeing the socket,
5623 * but only if the socket refcount is not zero.
5625 if (likely(refcount_inc_not_zero(&sk
->sk_refcnt
))) {
5626 err
= sock_queue_err_skb(sk
, skb
);
5632 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack
);
5633 #endif /* CONFIG_WIRELESS */
5636 * skb_partial_csum_set - set up and verify partial csum values for packet
5637 * @skb: the skb to set
5638 * @start: the number of bytes after skb->data to start checksumming.
5639 * @off: the offset from start to place the checksum.
5641 * For untrusted partially-checksummed packets, we need to make sure the values
5642 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5644 * This function checks and sets those values and skb->ip_summed: if this
5645 * returns false you should drop the packet.
5647 bool skb_partial_csum_set(struct sk_buff
*skb
, u16 start
, u16 off
)
5649 u32 csum_end
= (u32
)start
+ (u32
)off
+ sizeof(__sum16
);
5650 u32 csum_start
= skb_headroom(skb
) + (u32
)start
;
5652 if (unlikely(csum_start
>= U16_MAX
|| csum_end
> skb_headlen(skb
))) {
5653 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
5654 start
, off
, skb_headroom(skb
), skb_headlen(skb
));
5657 skb
->ip_summed
= CHECKSUM_PARTIAL
;
5658 skb
->csum_start
= csum_start
;
5659 skb
->csum_offset
= off
;
5660 skb
->transport_header
= csum_start
;
5663 EXPORT_SYMBOL_GPL(skb_partial_csum_set
);
5665 static int skb_maybe_pull_tail(struct sk_buff
*skb
, unsigned int len
,
5668 if (skb_headlen(skb
) >= len
)
5671 /* If we need to pullup then pullup to the max, so we
5672 * won't need to do it again.
5677 if (__pskb_pull_tail(skb
, max
- skb_headlen(skb
)) == NULL
)
5680 if (skb_headlen(skb
) < len
)
5686 #define MAX_TCP_HDR_LEN (15 * 4)
5688 static __sum16
*skb_checksum_setup_ip(struct sk_buff
*skb
,
5689 typeof(IPPROTO_IP
) proto
,
5696 err
= skb_maybe_pull_tail(skb
, off
+ sizeof(struct tcphdr
),
5697 off
+ MAX_TCP_HDR_LEN
);
5698 if (!err
&& !skb_partial_csum_set(skb
, off
,
5699 offsetof(struct tcphdr
,
5702 return err
? ERR_PTR(err
) : &tcp_hdr(skb
)->check
;
5705 err
= skb_maybe_pull_tail(skb
, off
+ sizeof(struct udphdr
),
5706 off
+ sizeof(struct udphdr
));
5707 if (!err
&& !skb_partial_csum_set(skb
, off
,
5708 offsetof(struct udphdr
,
5711 return err
? ERR_PTR(err
) : &udp_hdr(skb
)->check
;
5714 return ERR_PTR(-EPROTO
);
5717 /* This value should be large enough to cover a tagged ethernet header plus
5718 * maximally sized IP and TCP or UDP headers.
5720 #define MAX_IP_HDR_LEN 128
5722 static int skb_checksum_setup_ipv4(struct sk_buff
*skb
, bool recalculate
)
5731 err
= skb_maybe_pull_tail(skb
,
5732 sizeof(struct iphdr
),
5737 if (ip_is_fragment(ip_hdr(skb
)))
5740 off
= ip_hdrlen(skb
);
5747 csum
= skb_checksum_setup_ip(skb
, ip_hdr(skb
)->protocol
, off
);
5749 return PTR_ERR(csum
);
5752 *csum
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
5755 ip_hdr(skb
)->protocol
, 0);
5762 /* This value should be large enough to cover a tagged ethernet header plus
5763 * an IPv6 header, all options, and a maximal TCP or UDP header.
5765 #define MAX_IPV6_HDR_LEN 256
5767 #define OPT_HDR(type, skb, off) \
5768 (type *)(skb_network_header(skb) + (off))
5770 static int skb_checksum_setup_ipv6(struct sk_buff
*skb
, bool recalculate
)
5783 off
= sizeof(struct ipv6hdr
);
5785 err
= skb_maybe_pull_tail(skb
, off
, MAX_IPV6_HDR_LEN
);
5789 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
5791 len
= sizeof(struct ipv6hdr
) + ntohs(ipv6_hdr(skb
)->payload_len
);
5792 while (off
<= len
&& !done
) {
5794 case IPPROTO_DSTOPTS
:
5795 case IPPROTO_HOPOPTS
:
5796 case IPPROTO_ROUTING
: {
5797 struct ipv6_opt_hdr
*hp
;
5799 err
= skb_maybe_pull_tail(skb
,
5801 sizeof(struct ipv6_opt_hdr
),
5806 hp
= OPT_HDR(struct ipv6_opt_hdr
, skb
, off
);
5807 nexthdr
= hp
->nexthdr
;
5808 off
+= ipv6_optlen(hp
);
5812 struct ip_auth_hdr
*hp
;
5814 err
= skb_maybe_pull_tail(skb
,
5816 sizeof(struct ip_auth_hdr
),
5821 hp
= OPT_HDR(struct ip_auth_hdr
, skb
, off
);
5822 nexthdr
= hp
->nexthdr
;
5823 off
+= ipv6_authlen(hp
);
5826 case IPPROTO_FRAGMENT
: {
5827 struct frag_hdr
*hp
;
5829 err
= skb_maybe_pull_tail(skb
,
5831 sizeof(struct frag_hdr
),
5836 hp
= OPT_HDR(struct frag_hdr
, skb
, off
);
5838 if (hp
->frag_off
& htons(IP6_OFFSET
| IP6_MF
))
5841 nexthdr
= hp
->nexthdr
;
5842 off
+= sizeof(struct frag_hdr
);
5853 if (!done
|| fragment
)
5856 csum
= skb_checksum_setup_ip(skb
, nexthdr
, off
);
5858 return PTR_ERR(csum
);
5861 *csum
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
5862 &ipv6_hdr(skb
)->daddr
,
5863 skb
->len
- off
, nexthdr
, 0);
5871 * skb_checksum_setup - set up partial checksum offset
5872 * @skb: the skb to set up
5873 * @recalculate: if true the pseudo-header checksum will be recalculated
5875 int skb_checksum_setup(struct sk_buff
*skb
, bool recalculate
)
5879 switch (skb
->protocol
) {
5880 case htons(ETH_P_IP
):
5881 err
= skb_checksum_setup_ipv4(skb
, recalculate
);
5884 case htons(ETH_P_IPV6
):
5885 err
= skb_checksum_setup_ipv6(skb
, recalculate
);
5895 EXPORT_SYMBOL(skb_checksum_setup
);
5898 * skb_checksum_maybe_trim - maybe trims the given skb
5899 * @skb: the skb to check
5900 * @transport_len: the data length beyond the network header
5902 * Checks whether the given skb has data beyond the given transport length.
5903 * If so, returns a cloned skb trimmed to this transport length.
5904 * Otherwise returns the provided skb. Returns NULL in error cases
5905 * (e.g. transport_len exceeds skb length or out-of-memory).
5907 * Caller needs to set the skb transport header and free any returned skb if it
5908 * differs from the provided skb.
5910 static struct sk_buff
*skb_checksum_maybe_trim(struct sk_buff
*skb
,
5911 unsigned int transport_len
)
5913 struct sk_buff
*skb_chk
;
5914 unsigned int len
= skb_transport_offset(skb
) + transport_len
;
5919 else if (skb
->len
== len
)
5922 skb_chk
= skb_clone(skb
, GFP_ATOMIC
);
5926 ret
= pskb_trim_rcsum(skb_chk
, len
);
5936 * skb_checksum_trimmed - validate checksum of an skb
5937 * @skb: the skb to check
5938 * @transport_len: the data length beyond the network header
5939 * @skb_chkf: checksum function to use
5941 * Applies the given checksum function skb_chkf to the provided skb.
5942 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5944 * If the skb has data beyond the given transport length, then a
5945 * trimmed & cloned skb is checked and returned.
5947 * Caller needs to set the skb transport header and free any returned skb if it
5948 * differs from the provided skb.
5950 struct sk_buff
*skb_checksum_trimmed(struct sk_buff
*skb
,
5951 unsigned int transport_len
,
5952 __sum16(*skb_chkf
)(struct sk_buff
*skb
))
5954 struct sk_buff
*skb_chk
;
5955 unsigned int offset
= skb_transport_offset(skb
);
5958 skb_chk
= skb_checksum_maybe_trim(skb
, transport_len
);
5962 if (!pskb_may_pull(skb_chk
, offset
))
5965 skb_pull_rcsum(skb_chk
, offset
);
5966 ret
= skb_chkf(skb_chk
);
5967 skb_push_rcsum(skb_chk
, offset
);
5975 if (skb_chk
&& skb_chk
!= skb
)
5981 EXPORT_SYMBOL(skb_checksum_trimmed
);
5983 void __skb_warn_lro_forwarding(const struct sk_buff
*skb
)
5985 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5988 EXPORT_SYMBOL(__skb_warn_lro_forwarding
);
5990 void kfree_skb_partial(struct sk_buff
*skb
, bool head_stolen
)
5993 skb_release_head_state(skb
);
5994 kmem_cache_free(net_hotdata
.skbuff_cache
, skb
);
5999 EXPORT_SYMBOL(kfree_skb_partial
);
6002 * skb_try_coalesce - try to merge skb to prior one
6004 * @from: buffer to add
6005 * @fragstolen: pointer to boolean
6006 * @delta_truesize: how much more was allocated than was requested
6008 bool skb_try_coalesce(struct sk_buff
*to
, struct sk_buff
*from
,
6009 bool *fragstolen
, int *delta_truesize
)
6011 struct skb_shared_info
*to_shinfo
, *from_shinfo
;
6012 int i
, delta
, len
= from
->len
;
6014 *fragstolen
= false;
6019 /* In general, avoid mixing page_pool and non-page_pool allocated
6020 * pages within the same SKB. In theory we could take full
6021 * references if @from is cloned and !@to->pp_recycle but its
6022 * tricky (due to potential race with the clone disappearing) and
6023 * rare, so not worth dealing with.
6025 if (to
->pp_recycle
!= from
->pp_recycle
)
6028 if (skb_frags_readable(from
) != skb_frags_readable(to
))
6031 if (len
<= skb_tailroom(to
) && skb_frags_readable(from
)) {
6033 BUG_ON(skb_copy_bits(from
, 0, skb_put(to
, len
), len
));
6034 *delta_truesize
= 0;
6038 to_shinfo
= skb_shinfo(to
);
6039 from_shinfo
= skb_shinfo(from
);
6040 if (to_shinfo
->frag_list
|| from_shinfo
->frag_list
)
6042 if (skb_zcopy(to
) || skb_zcopy(from
))
6045 if (skb_headlen(from
) != 0) {
6047 unsigned int offset
;
6049 if (to_shinfo
->nr_frags
+
6050 from_shinfo
->nr_frags
>= MAX_SKB_FRAGS
)
6053 if (skb_head_is_locked(from
))
6056 delta
= from
->truesize
- SKB_DATA_ALIGN(sizeof(struct sk_buff
));
6058 page
= virt_to_head_page(from
->head
);
6059 offset
= from
->data
- (unsigned char *)page_address(page
);
6061 skb_fill_page_desc(to
, to_shinfo
->nr_frags
,
6062 page
, offset
, skb_headlen(from
));
6065 if (to_shinfo
->nr_frags
+
6066 from_shinfo
->nr_frags
> MAX_SKB_FRAGS
)
6069 delta
= from
->truesize
- SKB_TRUESIZE(skb_end_offset(from
));
6072 WARN_ON_ONCE(delta
< len
);
6074 memcpy(to_shinfo
->frags
+ to_shinfo
->nr_frags
,
6076 from_shinfo
->nr_frags
* sizeof(skb_frag_t
));
6077 to_shinfo
->nr_frags
+= from_shinfo
->nr_frags
;
6079 if (!skb_cloned(from
))
6080 from_shinfo
->nr_frags
= 0;
6082 /* if the skb is not cloned this does nothing
6083 * since we set nr_frags to 0.
6085 if (skb_pp_frag_ref(from
)) {
6086 for (i
= 0; i
< from_shinfo
->nr_frags
; i
++)
6087 __skb_frag_ref(&from_shinfo
->frags
[i
]);
6090 to
->truesize
+= delta
;
6092 to
->data_len
+= len
;
6094 *delta_truesize
= delta
;
6097 EXPORT_SYMBOL(skb_try_coalesce
);
6100 * skb_scrub_packet - scrub an skb
6102 * @skb: buffer to clean
6103 * @xnet: packet is crossing netns
6105 * skb_scrub_packet can be used after encapsulating or decapsulating a packet
6106 * into/from a tunnel. Some information have to be cleared during these
6108 * skb_scrub_packet can also be used to clean a skb before injecting it in
6109 * another namespace (@xnet == true). We have to clear all information in the
6110 * skb that could impact namespace isolation.
6112 void skb_scrub_packet(struct sk_buff
*skb
, bool xnet
)
6114 skb
->pkt_type
= PACKET_HOST
;
6120 nf_reset_trace(skb
);
6122 #ifdef CONFIG_NET_SWITCHDEV
6123 skb
->offload_fwd_mark
= 0;
6124 skb
->offload_l3_fwd_mark
= 0;
6132 skb_clear_tstamp(skb
);
6134 EXPORT_SYMBOL_GPL(skb_scrub_packet
);
6136 static struct sk_buff
*skb_reorder_vlan_header(struct sk_buff
*skb
)
6138 int mac_len
, meta_len
;
6141 if (skb_cow(skb
, skb_headroom(skb
)) < 0) {
6146 mac_len
= skb
->data
- skb_mac_header(skb
);
6147 if (likely(mac_len
> VLAN_HLEN
+ ETH_TLEN
)) {
6148 memmove(skb_mac_header(skb
) + VLAN_HLEN
, skb_mac_header(skb
),
6149 mac_len
- VLAN_HLEN
- ETH_TLEN
);
6152 meta_len
= skb_metadata_len(skb
);
6154 meta
= skb_metadata_end(skb
) - meta_len
;
6155 memmove(meta
+ VLAN_HLEN
, meta
, meta_len
);
6158 skb
->mac_header
+= VLAN_HLEN
;
6162 struct sk_buff
*skb_vlan_untag(struct sk_buff
*skb
)
6164 struct vlan_hdr
*vhdr
;
6167 if (unlikely(skb_vlan_tag_present(skb
))) {
6168 /* vlan_tci is already set-up so leave this for another time */
6172 skb
= skb_share_check(skb
, GFP_ATOMIC
);
6175 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
6176 if (unlikely(!pskb_may_pull(skb
, VLAN_HLEN
+ sizeof(unsigned short))))
6179 vhdr
= (struct vlan_hdr
*)skb
->data
;
6180 vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
6181 __vlan_hwaccel_put_tag(skb
, skb
->protocol
, vlan_tci
);
6183 skb_pull_rcsum(skb
, VLAN_HLEN
);
6184 vlan_set_encap_proto(skb
, vhdr
);
6186 skb
= skb_reorder_vlan_header(skb
);
6190 skb_reset_network_header(skb
);
6191 if (!skb_transport_header_was_set(skb
))
6192 skb_reset_transport_header(skb
);
6193 skb_reset_mac_len(skb
);
6201 EXPORT_SYMBOL(skb_vlan_untag
);
6203 int skb_ensure_writable(struct sk_buff
*skb
, unsigned int write_len
)
6205 if (!pskb_may_pull(skb
, write_len
))
6208 if (!skb_frags_readable(skb
))
6211 if (!skb_cloned(skb
) || skb_clone_writable(skb
, write_len
))
6214 return pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
6216 EXPORT_SYMBOL(skb_ensure_writable
);
6218 int skb_ensure_writable_head_tail(struct sk_buff
*skb
, struct net_device
*dev
)
6220 int needed_headroom
= dev
->needed_headroom
;
6221 int needed_tailroom
= dev
->needed_tailroom
;
6223 /* For tail taggers, we need to pad short frames ourselves, to ensure
6224 * that the tail tag does not fail at its role of being at the end of
6225 * the packet, once the conduit interface pads the frame. Account for
6226 * that pad length here, and pad later.
6228 if (unlikely(needed_tailroom
&& skb
->len
< ETH_ZLEN
))
6229 needed_tailroom
+= ETH_ZLEN
- skb
->len
;
6230 /* skb_headroom() returns unsigned int... */
6231 needed_headroom
= max_t(int, needed_headroom
- skb_headroom(skb
), 0);
6232 needed_tailroom
= max_t(int, needed_tailroom
- skb_tailroom(skb
), 0);
6234 if (likely(!needed_headroom
&& !needed_tailroom
&& !skb_cloned(skb
)))
6235 /* No reallocation needed, yay! */
6238 return pskb_expand_head(skb
, needed_headroom
, needed_tailroom
,
6241 EXPORT_SYMBOL(skb_ensure_writable_head_tail
);
6243 /* remove VLAN header from packet and update csum accordingly.
6244 * expects a non skb_vlan_tag_present skb with a vlan tag payload
6246 int __skb_vlan_pop(struct sk_buff
*skb
, u16
*vlan_tci
)
6248 int offset
= skb
->data
- skb_mac_header(skb
);
6251 if (WARN_ONCE(offset
,
6252 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
6257 err
= skb_ensure_writable(skb
, VLAN_ETH_HLEN
);
6261 skb_postpull_rcsum(skb
, skb
->data
+ (2 * ETH_ALEN
), VLAN_HLEN
);
6263 vlan_remove_tag(skb
, vlan_tci
);
6265 skb
->mac_header
+= VLAN_HLEN
;
6267 if (skb_network_offset(skb
) < ETH_HLEN
)
6268 skb_set_network_header(skb
, ETH_HLEN
);
6270 skb_reset_mac_len(skb
);
6274 EXPORT_SYMBOL(__skb_vlan_pop
);
6276 /* Pop a vlan tag either from hwaccel or from payload.
6277 * Expects skb->data at mac header.
6279 int skb_vlan_pop(struct sk_buff
*skb
)
6285 if (likely(skb_vlan_tag_present(skb
))) {
6286 __vlan_hwaccel_clear_tag(skb
);
6288 if (unlikely(!eth_type_vlan(skb
->protocol
)))
6291 err
= __skb_vlan_pop(skb
, &vlan_tci
);
6295 /* move next vlan tag to hw accel tag */
6296 if (likely(!eth_type_vlan(skb
->protocol
)))
6299 vlan_proto
= skb
->protocol
;
6300 err
= __skb_vlan_pop(skb
, &vlan_tci
);
6304 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
6307 EXPORT_SYMBOL(skb_vlan_pop
);
6309 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
6310 * Expects skb->data at mac header.
6312 int skb_vlan_push(struct sk_buff
*skb
, __be16 vlan_proto
, u16 vlan_tci
)
6314 if (skb_vlan_tag_present(skb
)) {
6315 int offset
= skb
->data
- skb_mac_header(skb
);
6318 if (WARN_ONCE(offset
,
6319 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
6324 err
= __vlan_insert_tag(skb
, skb
->vlan_proto
,
6325 skb_vlan_tag_get(skb
));
6329 skb
->protocol
= skb
->vlan_proto
;
6330 skb
->network_header
-= VLAN_HLEN
;
6332 skb_postpush_rcsum(skb
, skb
->data
+ (2 * ETH_ALEN
), VLAN_HLEN
);
6334 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
6337 EXPORT_SYMBOL(skb_vlan_push
);
6340 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
6342 * @skb: Socket buffer to modify
6344 * Drop the Ethernet header of @skb.
6346 * Expects that skb->data points to the mac header and that no VLAN tags are
6349 * Returns 0 on success, -errno otherwise.
6351 int skb_eth_pop(struct sk_buff
*skb
)
6353 if (!pskb_may_pull(skb
, ETH_HLEN
) || skb_vlan_tagged(skb
) ||
6354 skb_network_offset(skb
) < ETH_HLEN
)
6357 skb_pull_rcsum(skb
, ETH_HLEN
);
6358 skb_reset_mac_header(skb
);
6359 skb_reset_mac_len(skb
);
6363 EXPORT_SYMBOL(skb_eth_pop
);
6366 * skb_eth_push() - Add a new Ethernet header at the head of a packet
6368 * @skb: Socket buffer to modify
6369 * @dst: Destination MAC address of the new header
6370 * @src: Source MAC address of the new header
6372 * Prepend @skb with a new Ethernet header.
6374 * Expects that skb->data points to the mac header, which must be empty.
6376 * Returns 0 on success, -errno otherwise.
6378 int skb_eth_push(struct sk_buff
*skb
, const unsigned char *dst
,
6379 const unsigned char *src
)
6384 if (skb_network_offset(skb
) || skb_vlan_tag_present(skb
))
6387 err
= skb_cow_head(skb
, sizeof(*eth
));
6391 skb_push(skb
, sizeof(*eth
));
6392 skb_reset_mac_header(skb
);
6393 skb_reset_mac_len(skb
);
6396 ether_addr_copy(eth
->h_dest
, dst
);
6397 ether_addr_copy(eth
->h_source
, src
);
6398 eth
->h_proto
= skb
->protocol
;
6400 skb_postpush_rcsum(skb
, eth
, sizeof(*eth
));
6404 EXPORT_SYMBOL(skb_eth_push
);
6406 /* Update the ethertype of hdr and the skb csum value if required. */
6407 static void skb_mod_eth_type(struct sk_buff
*skb
, struct ethhdr
*hdr
,
6410 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
6411 __be16 diff
[] = { ~hdr
->h_proto
, ethertype
};
6413 skb
->csum
= csum_partial((char *)diff
, sizeof(diff
), skb
->csum
);
6416 hdr
->h_proto
= ethertype
;
6420 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
6424 * @mpls_lse: MPLS label stack entry to push
6425 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
6426 * @mac_len: length of the MAC header
6427 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
6430 * Expects skb->data at mac header.
6432 * Returns 0 on success, -errno otherwise.
6434 int skb_mpls_push(struct sk_buff
*skb
, __be32 mpls_lse
, __be16 mpls_proto
,
6435 int mac_len
, bool ethernet
)
6437 struct mpls_shim_hdr
*lse
;
6440 if (unlikely(!eth_p_mpls(mpls_proto
)))
6443 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
6444 if (skb
->encapsulation
)
6447 err
= skb_cow_head(skb
, MPLS_HLEN
);
6451 if (!skb
->inner_protocol
) {
6452 skb_set_inner_network_header(skb
, skb_network_offset(skb
));
6453 skb_set_inner_protocol(skb
, skb
->protocol
);
6456 skb_push(skb
, MPLS_HLEN
);
6457 memmove(skb_mac_header(skb
) - MPLS_HLEN
, skb_mac_header(skb
),
6459 skb_reset_mac_header(skb
);
6460 skb_set_network_header(skb
, mac_len
);
6461 skb_reset_mac_len(skb
);
6463 lse
= mpls_hdr(skb
);
6464 lse
->label_stack_entry
= mpls_lse
;
6465 skb_postpush_rcsum(skb
, lse
, MPLS_HLEN
);
6467 if (ethernet
&& mac_len
>= ETH_HLEN
)
6468 skb_mod_eth_type(skb
, eth_hdr(skb
), mpls_proto
);
6469 skb
->protocol
= mpls_proto
;
6473 EXPORT_SYMBOL_GPL(skb_mpls_push
);
6476 * skb_mpls_pop() - pop the outermost MPLS header
6479 * @next_proto: ethertype of header after popped MPLS header
6480 * @mac_len: length of the MAC header
6481 * @ethernet: flag to indicate if the packet is ethernet
6483 * Expects skb->data at mac header.
6485 * Returns 0 on success, -errno otherwise.
6487 int skb_mpls_pop(struct sk_buff
*skb
, __be16 next_proto
, int mac_len
,
6492 if (unlikely(!eth_p_mpls(skb
->protocol
)))
6495 err
= skb_ensure_writable(skb
, mac_len
+ MPLS_HLEN
);
6499 skb_postpull_rcsum(skb
, mpls_hdr(skb
), MPLS_HLEN
);
6500 memmove(skb_mac_header(skb
) + MPLS_HLEN
, skb_mac_header(skb
),
6503 __skb_pull(skb
, MPLS_HLEN
);
6504 skb_reset_mac_header(skb
);
6505 skb_set_network_header(skb
, mac_len
);
6507 if (ethernet
&& mac_len
>= ETH_HLEN
) {
6510 /* use mpls_hdr() to get ethertype to account for VLANs. */
6511 hdr
= (struct ethhdr
*)((void *)mpls_hdr(skb
) - ETH_HLEN
);
6512 skb_mod_eth_type(skb
, hdr
, next_proto
);
6514 skb
->protocol
= next_proto
;
6518 EXPORT_SYMBOL_GPL(skb_mpls_pop
);
6521 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6524 * @mpls_lse: new MPLS label stack entry to update to
6526 * Expects skb->data at mac header.
6528 * Returns 0 on success, -errno otherwise.
6530 int skb_mpls_update_lse(struct sk_buff
*skb
, __be32 mpls_lse
)
6534 if (unlikely(!eth_p_mpls(skb
->protocol
)))
6537 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
6541 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
6542 __be32 diff
[] = { ~mpls_hdr(skb
)->label_stack_entry
, mpls_lse
};
6544 skb
->csum
= csum_partial((char *)diff
, sizeof(diff
), skb
->csum
);
6547 mpls_hdr(skb
)->label_stack_entry
= mpls_lse
;
6551 EXPORT_SYMBOL_GPL(skb_mpls_update_lse
);
6554 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6558 * Expects skb->data at mac header.
6560 * Returns 0 on success, -errno otherwise.
6562 int skb_mpls_dec_ttl(struct sk_buff
*skb
)
6567 if (unlikely(!eth_p_mpls(skb
->protocol
)))
6570 if (!pskb_may_pull(skb
, skb_network_offset(skb
) + MPLS_HLEN
))
6573 lse
= be32_to_cpu(mpls_hdr(skb
)->label_stack_entry
);
6574 ttl
= (lse
& MPLS_LS_TTL_MASK
) >> MPLS_LS_TTL_SHIFT
;
6578 lse
&= ~MPLS_LS_TTL_MASK
;
6579 lse
|= ttl
<< MPLS_LS_TTL_SHIFT
;
6581 return skb_mpls_update_lse(skb
, cpu_to_be32(lse
));
6583 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl
);
6586 * alloc_skb_with_frags - allocate skb with page frags
6588 * @header_len: size of linear part
6589 * @data_len: needed length in frags
6590 * @order: max page order desired.
6591 * @errcode: pointer to error code if any
6592 * @gfp_mask: allocation mask
6594 * This can be used to allocate a paged skb, given a maximal order for frags.
6596 struct sk_buff
*alloc_skb_with_frags(unsigned long header_len
,
6597 unsigned long data_len
,
6602 unsigned long chunk
;
6603 struct sk_buff
*skb
;
6607 *errcode
= -EMSGSIZE
;
6608 if (unlikely(data_len
> MAX_SKB_FRAGS
* (PAGE_SIZE
<< order
)))
6611 *errcode
= -ENOBUFS
;
6612 skb
= alloc_skb(header_len
, gfp_mask
);
6617 if (nr_frags
== MAX_SKB_FRAGS
- 1)
6619 while (order
&& PAGE_ALIGN(data_len
) < (PAGE_SIZE
<< order
))
6623 page
= alloc_pages((gfp_mask
& ~__GFP_DIRECT_RECLAIM
) |
6632 page
= alloc_page(gfp_mask
);
6636 chunk
= min_t(unsigned long, data_len
,
6637 PAGE_SIZE
<< order
);
6638 skb_fill_page_desc(skb
, nr_frags
, page
, 0, chunk
);
6640 skb
->truesize
+= (PAGE_SIZE
<< order
);
6649 EXPORT_SYMBOL(alloc_skb_with_frags
);
6651 /* carve out the first off bytes from skb when off < headlen */
6652 static int pskb_carve_inside_header(struct sk_buff
*skb
, const u32 off
,
6653 const int headlen
, gfp_t gfp_mask
)
6656 unsigned int size
= skb_end_offset(skb
);
6657 int new_hlen
= headlen
- off
;
6660 if (skb_pfmemalloc(skb
))
6661 gfp_mask
|= __GFP_MEMALLOC
;
6663 data
= kmalloc_reserve(&size
, gfp_mask
, NUMA_NO_NODE
, NULL
);
6666 size
= SKB_WITH_OVERHEAD(size
);
6668 /* Copy real data, and all frags */
6669 skb_copy_from_linear_data_offset(skb
, off
, data
, new_hlen
);
6672 memcpy((struct skb_shared_info
*)(data
+ size
),
6674 offsetof(struct skb_shared_info
,
6675 frags
[skb_shinfo(skb
)->nr_frags
]));
6676 if (skb_cloned(skb
)) {
6677 /* drop the old head gracefully */
6678 if (skb_orphan_frags(skb
, gfp_mask
)) {
6679 skb_kfree_head(data
, size
);
6682 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
6683 skb_frag_ref(skb
, i
);
6684 if (skb_has_frag_list(skb
))
6685 skb_clone_fraglist(skb
);
6686 skb_release_data(skb
, SKB_CONSUMED
);
6688 /* we can reuse existing recount- all we did was
6697 skb_set_end_offset(skb
, size
);
6698 skb_set_tail_pointer(skb
, skb_headlen(skb
));
6699 skb_headers_offset_update(skb
, 0);
6703 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
6708 static int pskb_carve(struct sk_buff
*skb
, const u32 off
, gfp_t gfp
);
6710 /* carve out the first eat bytes from skb's frag_list. May recurse into
6713 static int pskb_carve_frag_list(struct sk_buff
*skb
,
6714 struct skb_shared_info
*shinfo
, int eat
,
6717 struct sk_buff
*list
= shinfo
->frag_list
;
6718 struct sk_buff
*clone
= NULL
;
6719 struct sk_buff
*insp
= NULL
;
6723 pr_err("Not enough bytes to eat. Want %d\n", eat
);
6726 if (list
->len
<= eat
) {
6727 /* Eaten as whole. */
6732 /* Eaten partially. */
6733 if (skb_shared(list
)) {
6734 clone
= skb_clone(list
, gfp_mask
);
6740 /* This may be pulled without problems. */
6743 if (pskb_carve(list
, eat
, gfp_mask
) < 0) {
6751 /* Free pulled out fragments. */
6752 while ((list
= shinfo
->frag_list
) != insp
) {
6753 shinfo
->frag_list
= list
->next
;
6756 /* And insert new clone at head. */
6759 shinfo
->frag_list
= clone
;
6764 /* carve off first len bytes from skb. Split line (off) is in the
6765 * non-linear part of skb
6767 static int pskb_carve_inside_nonlinear(struct sk_buff
*skb
, const u32 off
,
6768 int pos
, gfp_t gfp_mask
)
6771 unsigned int size
= skb_end_offset(skb
);
6773 const int nfrags
= skb_shinfo(skb
)->nr_frags
;
6774 struct skb_shared_info
*shinfo
;
6776 if (skb_pfmemalloc(skb
))
6777 gfp_mask
|= __GFP_MEMALLOC
;
6779 data
= kmalloc_reserve(&size
, gfp_mask
, NUMA_NO_NODE
, NULL
);
6782 size
= SKB_WITH_OVERHEAD(size
);
6784 memcpy((struct skb_shared_info
*)(data
+ size
),
6785 skb_shinfo(skb
), offsetof(struct skb_shared_info
, frags
[0]));
6786 if (skb_orphan_frags(skb
, gfp_mask
)) {
6787 skb_kfree_head(data
, size
);
6790 shinfo
= (struct skb_shared_info
*)(data
+ size
);
6791 for (i
= 0; i
< nfrags
; i
++) {
6792 int fsize
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
6794 if (pos
+ fsize
> off
) {
6795 shinfo
->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
6799 * We have two variants in this case:
6800 * 1. Move all the frag to the second
6801 * part, if it is possible. F.e.
6802 * this approach is mandatory for TUX,
6803 * where splitting is expensive.
6804 * 2. Split is accurately. We make this.
6806 skb_frag_off_add(&shinfo
->frags
[0], off
- pos
);
6807 skb_frag_size_sub(&shinfo
->frags
[0], off
- pos
);
6809 skb_frag_ref(skb
, i
);
6814 shinfo
->nr_frags
= k
;
6815 if (skb_has_frag_list(skb
))
6816 skb_clone_fraglist(skb
);
6818 /* split line is in frag list */
6819 if (k
== 0 && pskb_carve_frag_list(skb
, shinfo
, off
- pos
, gfp_mask
)) {
6820 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6821 if (skb_has_frag_list(skb
))
6822 kfree_skb_list(skb_shinfo(skb
)->frag_list
);
6823 skb_kfree_head(data
, size
);
6826 skb_release_data(skb
, SKB_CONSUMED
);
6831 skb_set_end_offset(skb
, size
);
6832 skb_reset_tail_pointer(skb
);
6833 skb_headers_offset_update(skb
, 0);
6838 skb
->data_len
= skb
->len
;
6839 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
6843 /* remove len bytes from the beginning of the skb */
6844 static int pskb_carve(struct sk_buff
*skb
, const u32 len
, gfp_t gfp
)
6846 int headlen
= skb_headlen(skb
);
6849 return pskb_carve_inside_header(skb
, len
, headlen
, gfp
);
6851 return pskb_carve_inside_nonlinear(skb
, len
, headlen
, gfp
);
6854 /* Extract to_copy bytes starting at off from skb, and return this in
6857 struct sk_buff
*pskb_extract(struct sk_buff
*skb
, int off
,
6858 int to_copy
, gfp_t gfp
)
6860 struct sk_buff
*clone
= skb_clone(skb
, gfp
);
6865 if (pskb_carve(clone
, off
, gfp
) < 0 ||
6866 pskb_trim(clone
, to_copy
)) {
6872 EXPORT_SYMBOL(pskb_extract
);
6875 * skb_condense - try to get rid of fragments/frag_list if possible
6878 * Can be used to save memory before skb is added to a busy queue.
6879 * If packet has bytes in frags and enough tail room in skb->head,
6880 * pull all of them, so that we can free the frags right now and adjust
6883 * We do not reallocate skb->head thus can not fail.
6884 * Caller must re-evaluate skb->truesize if needed.
6886 void skb_condense(struct sk_buff
*skb
)
6888 if (skb
->data_len
) {
6889 if (skb
->data_len
> skb
->end
- skb
->tail
||
6890 skb_cloned(skb
) || !skb_frags_readable(skb
))
6893 /* Nice, we can free page frag(s) right now */
6894 __pskb_pull_tail(skb
, skb
->data_len
);
6896 /* At this point, skb->truesize might be over estimated,
6897 * because skb had a fragment, and fragments do not tell
6899 * When we pulled its content into skb->head, fragment
6900 * was freed, but __pskb_pull_tail() could not possibly
6901 * adjust skb->truesize, not knowing the frag truesize.
6903 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
6905 EXPORT_SYMBOL(skb_condense
);
6907 #ifdef CONFIG_SKB_EXTENSIONS
6908 static void *skb_ext_get_ptr(struct skb_ext
*ext
, enum skb_ext_id id
)
6910 return (void *)ext
+ (ext
->offset
[id
] * SKB_EXT_ALIGN_VALUE
);
6914 * __skb_ext_alloc - allocate a new skb extensions storage
6916 * @flags: See kmalloc().
6918 * Returns the newly allocated pointer. The pointer can later attached to a
6919 * skb via __skb_ext_set().
6920 * Note: caller must handle the skb_ext as an opaque data.
6922 struct skb_ext
*__skb_ext_alloc(gfp_t flags
)
6924 struct skb_ext
*new = kmem_cache_alloc(skbuff_ext_cache
, flags
);
6927 memset(new->offset
, 0, sizeof(new->offset
));
6928 refcount_set(&new->refcnt
, 1);
6934 static struct skb_ext
*skb_ext_maybe_cow(struct skb_ext
*old
,
6935 unsigned int old_active
)
6937 struct skb_ext
*new;
6939 if (refcount_read(&old
->refcnt
) == 1)
6942 new = kmem_cache_alloc(skbuff_ext_cache
, GFP_ATOMIC
);
6946 memcpy(new, old
, old
->chunks
* SKB_EXT_ALIGN_VALUE
);
6947 refcount_set(&new->refcnt
, 1);
6950 if (old_active
& (1 << SKB_EXT_SEC_PATH
)) {
6951 struct sec_path
*sp
= skb_ext_get_ptr(old
, SKB_EXT_SEC_PATH
);
6954 for (i
= 0; i
< sp
->len
; i
++)
6955 xfrm_state_hold(sp
->xvec
[i
]);
6958 #ifdef CONFIG_MCTP_FLOWS
6959 if (old_active
& (1 << SKB_EXT_MCTP
)) {
6960 struct mctp_flow
*flow
= skb_ext_get_ptr(old
, SKB_EXT_MCTP
);
6963 refcount_inc(&flow
->key
->refs
);
6971 * __skb_ext_set - attach the specified extension storage to this skb
6974 * @ext: extension storage previously allocated via __skb_ext_alloc()
6976 * Existing extensions, if any, are cleared.
6978 * Returns the pointer to the extension.
6980 void *__skb_ext_set(struct sk_buff
*skb
, enum skb_ext_id id
,
6981 struct skb_ext
*ext
)
6983 unsigned int newlen
, newoff
= SKB_EXT_CHUNKSIZEOF(*ext
);
6986 newlen
= newoff
+ skb_ext_type_len
[id
];
6987 ext
->chunks
= newlen
;
6988 ext
->offset
[id
] = newoff
;
6989 skb
->extensions
= ext
;
6990 skb
->active_extensions
= 1 << id
;
6991 return skb_ext_get_ptr(ext
, id
);
6995 * skb_ext_add - allocate space for given extension, COW if needed
6997 * @id: extension to allocate space for
6999 * Allocates enough space for the given extension.
7000 * If the extension is already present, a pointer to that extension
7003 * If the skb was cloned, COW applies and the returned memory can be
7004 * modified without changing the extension space of clones buffers.
7006 * Returns pointer to the extension or NULL on allocation failure.
7008 void *skb_ext_add(struct sk_buff
*skb
, enum skb_ext_id id
)
7010 struct skb_ext
*new, *old
= NULL
;
7011 unsigned int newlen
, newoff
;
7013 if (skb
->active_extensions
) {
7014 old
= skb
->extensions
;
7016 new = skb_ext_maybe_cow(old
, skb
->active_extensions
);
7020 if (__skb_ext_exist(new, id
))
7023 newoff
= new->chunks
;
7025 newoff
= SKB_EXT_CHUNKSIZEOF(*new);
7027 new = __skb_ext_alloc(GFP_ATOMIC
);
7032 newlen
= newoff
+ skb_ext_type_len
[id
];
7033 new->chunks
= newlen
;
7034 new->offset
[id
] = newoff
;
7037 skb
->extensions
= new;
7038 skb
->active_extensions
|= 1 << id
;
7039 return skb_ext_get_ptr(new, id
);
7041 EXPORT_SYMBOL(skb_ext_add
);
7044 static void skb_ext_put_sp(struct sec_path
*sp
)
7048 for (i
= 0; i
< sp
->len
; i
++)
7049 xfrm_state_put(sp
->xvec
[i
]);
7053 #ifdef CONFIG_MCTP_FLOWS
7054 static void skb_ext_put_mctp(struct mctp_flow
*flow
)
7057 mctp_key_unref(flow
->key
);
7061 void __skb_ext_del(struct sk_buff
*skb
, enum skb_ext_id id
)
7063 struct skb_ext
*ext
= skb
->extensions
;
7065 skb
->active_extensions
&= ~(1 << id
);
7066 if (skb
->active_extensions
== 0) {
7067 skb
->extensions
= NULL
;
7070 } else if (id
== SKB_EXT_SEC_PATH
&&
7071 refcount_read(&ext
->refcnt
) == 1) {
7072 struct sec_path
*sp
= skb_ext_get_ptr(ext
, SKB_EXT_SEC_PATH
);
7079 EXPORT_SYMBOL(__skb_ext_del
);
7081 void __skb_ext_put(struct skb_ext
*ext
)
7083 /* If this is last clone, nothing can increment
7084 * it after check passes. Avoids one atomic op.
7086 if (refcount_read(&ext
->refcnt
) == 1)
7089 if (!refcount_dec_and_test(&ext
->refcnt
))
7093 if (__skb_ext_exist(ext
, SKB_EXT_SEC_PATH
))
7094 skb_ext_put_sp(skb_ext_get_ptr(ext
, SKB_EXT_SEC_PATH
));
7096 #ifdef CONFIG_MCTP_FLOWS
7097 if (__skb_ext_exist(ext
, SKB_EXT_MCTP
))
7098 skb_ext_put_mctp(skb_ext_get_ptr(ext
, SKB_EXT_MCTP
));
7101 kmem_cache_free(skbuff_ext_cache
, ext
);
7103 EXPORT_SYMBOL(__skb_ext_put
);
7104 #endif /* CONFIG_SKB_EXTENSIONS */
7106 static void kfree_skb_napi_cache(struct sk_buff
*skb
)
7108 /* if SKB is a clone, don't handle this case */
7109 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
) {
7115 __napi_kfree_skb(skb
, SKB_CONSUMED
);
7120 * skb_attempt_defer_free - queue skb for remote freeing
7123 * Put @skb in a per-cpu list, using the cpu which
7124 * allocated the skb/pages to reduce false sharing
7125 * and memory zone spinlock contention.
7127 void skb_attempt_defer_free(struct sk_buff
*skb
)
7129 int cpu
= skb
->alloc_cpu
;
7130 struct softnet_data
*sd
;
7131 unsigned int defer_max
;
7134 if (cpu
== raw_smp_processor_id() ||
7135 WARN_ON_ONCE(cpu
>= nr_cpu_ids
) ||
7137 nodefer
: kfree_skb_napi_cache(skb
);
7141 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb
));
7142 DEBUG_NET_WARN_ON_ONCE(skb
->destructor
);
7144 sd
= &per_cpu(softnet_data
, cpu
);
7145 defer_max
= READ_ONCE(net_hotdata
.sysctl_skb_defer_max
);
7146 if (READ_ONCE(sd
->defer_count
) >= defer_max
)
7149 spin_lock_bh(&sd
->defer_lock
);
7150 /* Send an IPI every time queue reaches half capacity. */
7151 kick
= sd
->defer_count
== (defer_max
>> 1);
7152 /* Paired with the READ_ONCE() few lines above */
7153 WRITE_ONCE(sd
->defer_count
, sd
->defer_count
+ 1);
7155 skb
->next
= sd
->defer_list
;
7156 /* Paired with READ_ONCE() in skb_defer_free_flush() */
7157 WRITE_ONCE(sd
->defer_list
, skb
);
7158 spin_unlock_bh(&sd
->defer_lock
);
7160 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
7161 * if we are unlucky enough (this seems very unlikely).
7164 kick_defer_list_purge(sd
, cpu
);
7167 static void skb_splice_csum_page(struct sk_buff
*skb
, struct page
*page
,
7168 size_t offset
, size_t len
)
7173 kaddr
= kmap_local_page(page
);
7174 csum
= csum_partial(kaddr
+ offset
, len
, 0);
7175 kunmap_local(kaddr
);
7176 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
7180 * skb_splice_from_iter - Splice (or copy) pages to skbuff
7181 * @skb: The buffer to add pages to
7182 * @iter: Iterator representing the pages to be added
7183 * @maxsize: Maximum amount of pages to be added
7184 * @gfp: Allocation flags
7186 * This is a common helper function for supporting MSG_SPLICE_PAGES. It
7187 * extracts pages from an iterator and adds them to the socket buffer if
7188 * possible, copying them to fragments if not possible (such as if they're slab
7191 * Returns the amount of data spliced/copied or -EMSGSIZE if there's
7192 * insufficient space in the buffer to transfer anything.
7194 ssize_t
skb_splice_from_iter(struct sk_buff
*skb
, struct iov_iter
*iter
,
7195 ssize_t maxsize
, gfp_t gfp
)
7197 size_t frag_limit
= READ_ONCE(net_hotdata
.sysctl_max_skb_frags
);
7198 struct page
*pages
[8], **ppages
= pages
;
7199 ssize_t spliced
= 0, ret
= 0;
7202 while (iter
->count
> 0) {
7203 ssize_t space
, nr
, len
;
7207 space
= frag_limit
- skb_shinfo(skb
)->nr_frags
;
7211 /* We might be able to coalesce without increasing nr_frags */
7212 nr
= clamp_t(size_t, space
, 1, ARRAY_SIZE(pages
));
7214 len
= iov_iter_extract_pages(iter
, &ppages
, maxsize
, nr
, 0, &off
);
7222 struct page
*page
= pages
[i
++];
7223 size_t part
= min_t(size_t, PAGE_SIZE
- off
, len
);
7226 if (WARN_ON_ONCE(!sendpage_ok(page
)))
7229 ret
= skb_append_pagefrags(skb
, page
, off
, part
,
7232 iov_iter_revert(iter
, len
);
7236 if (skb
->ip_summed
== CHECKSUM_NONE
)
7237 skb_splice_csum_page(skb
, page
, off
, part
);
7250 skb_len_add(skb
, spliced
);
7251 return spliced
?: ret
;
7253 EXPORT_SYMBOL(skb_splice_from_iter
);
7255 static __always_inline
7256 size_t memcpy_from_iter_csum(void *iter_from
, size_t progress
,
7257 size_t len
, void *to
, void *priv2
)
7259 __wsum
*csum
= priv2
;
7260 __wsum next
= csum_partial_copy_nocheck(iter_from
, to
+ progress
, len
);
7262 *csum
= csum_block_add(*csum
, next
, progress
);
7266 static __always_inline
7267 size_t copy_from_user_iter_csum(void __user
*iter_from
, size_t progress
,
7268 size_t len
, void *to
, void *priv2
)
7270 __wsum next
, *csum
= priv2
;
7272 next
= csum_and_copy_from_user(iter_from
, to
+ progress
, len
);
7273 *csum
= csum_block_add(*csum
, next
, progress
);
7274 return next
? 0 : len
;
7277 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
,
7278 __wsum
*csum
, struct iov_iter
*i
)
7282 if (WARN_ON_ONCE(!i
->data_source
))
7284 copied
= iterate_and_advance2(i
, bytes
, addr
, csum
,
7285 copy_from_user_iter_csum
,
7286 memcpy_from_iter_csum
);
7287 if (likely(copied
== bytes
))
7289 iov_iter_revert(i
, copied
);
7292 EXPORT_SYMBOL(csum_and_copy_from_iter_full
);