2 * Routines having to do with the 'struct sk_buff' memory handlers.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
8 * Alan Cox : Fixed the worst of the load
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
36 * The functions in this file will not compile correctly with gcc 2.4.x
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/kernel.h>
45 #include <linux/interrupt.h>
47 #include <linux/inet.h>
48 #include <linux/slab.h>
49 #include <linux/tcp.h>
50 #include <linux/udp.h>
51 #include <linux/sctp.h>
52 #include <linux/netdevice.h>
53 #ifdef CONFIG_NET_CLS_ACT
54 #include <net/pkt_sched.h>
56 #include <linux/string.h>
57 #include <linux/skbuff.h>
58 #include <linux/splice.h>
59 #include <linux/cache.h>
60 #include <linux/rtnetlink.h>
61 #include <linux/init.h>
62 #include <linux/scatterlist.h>
63 #include <linux/errqueue.h>
64 #include <linux/prefetch.h>
65 #include <linux/if_vlan.h>
67 #include <net/protocol.h>
70 #include <net/checksum.h>
71 #include <net/ip6_checksum.h>
74 #include <linux/uaccess.h>
75 #include <trace/events/skb.h>
76 #include <linux/highmem.h>
77 #include <linux/capability.h>
78 #include <linux/user_namespace.h>
80 struct kmem_cache
*skbuff_head_cache __ro_after_init
;
81 static struct kmem_cache
*skbuff_fclone_cache __ro_after_init
;
82 int sysctl_max_skb_frags __read_mostly
= MAX_SKB_FRAGS
;
83 EXPORT_SYMBOL(sysctl_max_skb_frags
);
86 * skb_panic - private function for out-of-line support
90 * @msg: skb_over_panic or skb_under_panic
92 * Out-of-line support for skb_put() and skb_push().
93 * Called via the wrapper skb_over_panic() or skb_under_panic().
94 * Keep out of line to prevent kernel bloat.
95 * __builtin_return_address is not used because it is not always reliable.
97 static void skb_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
,
100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
101 msg
, addr
, skb
->len
, sz
, skb
->head
, skb
->data
,
102 (unsigned long)skb
->tail
, (unsigned long)skb
->end
,
103 skb
->dev
? skb
->dev
->name
: "<NULL>");
107 static void skb_over_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
)
109 skb_panic(skb
, sz
, addr
, __func__
);
112 static void skb_under_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
)
114 skb_panic(skb
, sz
, addr
, __func__
);
118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
119 * the caller if emergency pfmemalloc reserves are being used. If it is and
120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
121 * may be used. Otherwise, the packet data may be discarded until enough
124 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
125 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
127 static void *__kmalloc_reserve(size_t size
, gfp_t flags
, int node
,
128 unsigned long ip
, bool *pfmemalloc
)
131 bool ret_pfmemalloc
= false;
134 * Try a regular allocation, when that fails and we're not entitled
135 * to the reserves, fail.
137 obj
= kmalloc_node_track_caller(size
,
138 flags
| __GFP_NOMEMALLOC
| __GFP_NOWARN
,
140 if (obj
|| !(gfp_pfmemalloc_allowed(flags
)))
143 /* Try again but now we are using pfmemalloc reserves */
144 ret_pfmemalloc
= true;
145 obj
= kmalloc_node_track_caller(size
, flags
, node
);
149 *pfmemalloc
= ret_pfmemalloc
;
154 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the
161 * __alloc_skb - allocate a network buffer
162 * @size: size to allocate
163 * @gfp_mask: allocation mask
164 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
165 * instead of head cache and allocate a cloned (child) skb.
166 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
167 * allocations in case the data is required for writeback
168 * @node: numa node to allocate memory on
170 * Allocate a new &sk_buff. The returned buffer has no headroom and a
171 * tail room of at least size bytes. The object has a reference count
172 * of one. The return is the buffer. On a failure the return is %NULL.
174 * Buffers may only be allocated from interrupts using a @gfp_mask of
177 struct sk_buff
*__alloc_skb(unsigned int size
, gfp_t gfp_mask
,
180 struct kmem_cache
*cache
;
181 struct skb_shared_info
*shinfo
;
186 cache
= (flags
& SKB_ALLOC_FCLONE
)
187 ? skbuff_fclone_cache
: skbuff_head_cache
;
189 if (sk_memalloc_socks() && (flags
& SKB_ALLOC_RX
))
190 gfp_mask
|= __GFP_MEMALLOC
;
193 skb
= kmem_cache_alloc_node(cache
, gfp_mask
& ~__GFP_DMA
, node
);
198 /* We do our best to align skb_shared_info on a separate cache
199 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
200 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
201 * Both skb->head and skb_shared_info are cache line aligned.
203 size
= SKB_DATA_ALIGN(size
);
204 size
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
205 data
= kmalloc_reserve(size
, gfp_mask
, node
, &pfmemalloc
);
208 /* kmalloc(size) might give us more room than requested.
209 * Put skb_shared_info exactly at the end of allocated zone,
210 * to allow max possible filling before reallocation.
212 size
= SKB_WITH_OVERHEAD(ksize(data
));
213 prefetchw(data
+ size
);
216 * Only clear those fields we need to clear, not those that we will
217 * actually initialise below. Hence, don't put any more fields after
218 * the tail pointer in struct sk_buff!
220 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
221 /* Account for allocated memory : skb + skb->head */
222 skb
->truesize
= SKB_TRUESIZE(size
);
223 skb
->pfmemalloc
= pfmemalloc
;
224 refcount_set(&skb
->users
, 1);
227 skb_reset_tail_pointer(skb
);
228 skb
->end
= skb
->tail
+ size
;
229 skb
->mac_header
= (typeof(skb
->mac_header
))~0U;
230 skb
->transport_header
= (typeof(skb
->transport_header
))~0U;
232 /* make sure we initialize shinfo sequentially */
233 shinfo
= skb_shinfo(skb
);
234 memset(shinfo
, 0, offsetof(struct skb_shared_info
, dataref
));
235 atomic_set(&shinfo
->dataref
, 1);
237 if (flags
& SKB_ALLOC_FCLONE
) {
238 struct sk_buff_fclones
*fclones
;
240 fclones
= container_of(skb
, struct sk_buff_fclones
, skb1
);
242 skb
->fclone
= SKB_FCLONE_ORIG
;
243 refcount_set(&fclones
->fclone_ref
, 1);
245 fclones
->skb2
.fclone
= SKB_FCLONE_CLONE
;
250 kmem_cache_free(cache
, skb
);
254 EXPORT_SYMBOL(__alloc_skb
);
257 * __build_skb - build a network buffer
258 * @data: data buffer provided by caller
259 * @frag_size: size of data, or 0 if head was kmalloced
261 * Allocate a new &sk_buff. Caller provides space holding head and
262 * skb_shared_info. @data must have been allocated by kmalloc() only if
263 * @frag_size is 0, otherwise data should come from the page allocator
265 * The return is the new skb buffer.
266 * On a failure the return is %NULL, and @data is not freed.
268 * Before IO, driver allocates only data buffer where NIC put incoming frame
269 * Driver should add room at head (NET_SKB_PAD) and
270 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
271 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
272 * before giving packet to stack.
273 * RX rings only contains data buffers, not full skbs.
275 struct sk_buff
*__build_skb(void *data
, unsigned int frag_size
)
277 struct skb_shared_info
*shinfo
;
279 unsigned int size
= frag_size
? : ksize(data
);
281 skb
= kmem_cache_alloc(skbuff_head_cache
, GFP_ATOMIC
);
285 size
-= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
287 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
288 skb
->truesize
= SKB_TRUESIZE(size
);
289 refcount_set(&skb
->users
, 1);
292 skb_reset_tail_pointer(skb
);
293 skb
->end
= skb
->tail
+ size
;
294 skb
->mac_header
= (typeof(skb
->mac_header
))~0U;
295 skb
->transport_header
= (typeof(skb
->transport_header
))~0U;
297 /* make sure we initialize shinfo sequentially */
298 shinfo
= skb_shinfo(skb
);
299 memset(shinfo
, 0, offsetof(struct skb_shared_info
, dataref
));
300 atomic_set(&shinfo
->dataref
, 1);
305 /* build_skb() is wrapper over __build_skb(), that specifically
306 * takes care of skb->head and skb->pfmemalloc
307 * This means that if @frag_size is not zero, then @data must be backed
308 * by a page fragment, not kmalloc() or vmalloc()
310 struct sk_buff
*build_skb(void *data
, unsigned int frag_size
)
312 struct sk_buff
*skb
= __build_skb(data
, frag_size
);
314 if (skb
&& frag_size
) {
316 if (page_is_pfmemalloc(virt_to_head_page(data
)))
321 EXPORT_SYMBOL(build_skb
);
323 #define NAPI_SKB_CACHE_SIZE 64
325 struct napi_alloc_cache
{
326 struct page_frag_cache page
;
327 unsigned int skb_count
;
328 void *skb_cache
[NAPI_SKB_CACHE_SIZE
];
331 static DEFINE_PER_CPU(struct page_frag_cache
, netdev_alloc_cache
);
332 static DEFINE_PER_CPU(struct napi_alloc_cache
, napi_alloc_cache
);
334 static void *__netdev_alloc_frag(unsigned int fragsz
, gfp_t gfp_mask
)
336 struct page_frag_cache
*nc
;
340 local_irq_save(flags
);
341 nc
= this_cpu_ptr(&netdev_alloc_cache
);
342 data
= page_frag_alloc(nc
, fragsz
, gfp_mask
);
343 local_irq_restore(flags
);
348 * netdev_alloc_frag - allocate a page fragment
349 * @fragsz: fragment size
351 * Allocates a frag from a page for receive buffer.
352 * Uses GFP_ATOMIC allocations.
354 void *netdev_alloc_frag(unsigned int fragsz
)
356 return __netdev_alloc_frag(fragsz
, GFP_ATOMIC
);
358 EXPORT_SYMBOL(netdev_alloc_frag
);
360 static void *__napi_alloc_frag(unsigned int fragsz
, gfp_t gfp_mask
)
362 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
364 return page_frag_alloc(&nc
->page
, fragsz
, gfp_mask
);
367 void *napi_alloc_frag(unsigned int fragsz
)
369 return __napi_alloc_frag(fragsz
, GFP_ATOMIC
);
371 EXPORT_SYMBOL(napi_alloc_frag
);
374 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
375 * @dev: network device to receive on
376 * @len: length to allocate
377 * @gfp_mask: get_free_pages mask, passed to alloc_skb
379 * Allocate a new &sk_buff and assign it a usage count of one. The
380 * buffer has NET_SKB_PAD headroom built in. Users should allocate
381 * the headroom they think they need without accounting for the
382 * built in space. The built in space is used for optimisations.
384 * %NULL is returned if there is no free memory.
386 struct sk_buff
*__netdev_alloc_skb(struct net_device
*dev
, unsigned int len
,
389 struct page_frag_cache
*nc
;
397 if ((len
> SKB_WITH_OVERHEAD(PAGE_SIZE
)) ||
398 (gfp_mask
& (__GFP_DIRECT_RECLAIM
| GFP_DMA
))) {
399 skb
= __alloc_skb(len
, gfp_mask
, SKB_ALLOC_RX
, NUMA_NO_NODE
);
405 len
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
406 len
= SKB_DATA_ALIGN(len
);
408 if (sk_memalloc_socks())
409 gfp_mask
|= __GFP_MEMALLOC
;
411 local_irq_save(flags
);
413 nc
= this_cpu_ptr(&netdev_alloc_cache
);
414 data
= page_frag_alloc(nc
, len
, gfp_mask
);
415 pfmemalloc
= nc
->pfmemalloc
;
417 local_irq_restore(flags
);
422 skb
= __build_skb(data
, len
);
423 if (unlikely(!skb
)) {
428 /* use OR instead of assignment to avoid clearing of bits in mask */
434 skb_reserve(skb
, NET_SKB_PAD
);
440 EXPORT_SYMBOL(__netdev_alloc_skb
);
443 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
444 * @napi: napi instance this buffer was allocated for
445 * @len: length to allocate
446 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
448 * Allocate a new sk_buff for use in NAPI receive. This buffer will
449 * attempt to allocate the head from a special reserved region used
450 * only for NAPI Rx allocation. By doing this we can save several
451 * CPU cycles by avoiding having to disable and re-enable IRQs.
453 * %NULL is returned if there is no free memory.
455 struct sk_buff
*__napi_alloc_skb(struct napi_struct
*napi
, unsigned int len
,
458 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
462 len
+= NET_SKB_PAD
+ NET_IP_ALIGN
;
464 if ((len
> SKB_WITH_OVERHEAD(PAGE_SIZE
)) ||
465 (gfp_mask
& (__GFP_DIRECT_RECLAIM
| GFP_DMA
))) {
466 skb
= __alloc_skb(len
, gfp_mask
, SKB_ALLOC_RX
, NUMA_NO_NODE
);
472 len
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
473 len
= SKB_DATA_ALIGN(len
);
475 if (sk_memalloc_socks())
476 gfp_mask
|= __GFP_MEMALLOC
;
478 data
= page_frag_alloc(&nc
->page
, len
, gfp_mask
);
482 skb
= __build_skb(data
, len
);
483 if (unlikely(!skb
)) {
488 /* use OR instead of assignment to avoid clearing of bits in mask */
489 if (nc
->page
.pfmemalloc
)
494 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
495 skb
->dev
= napi
->dev
;
500 EXPORT_SYMBOL(__napi_alloc_skb
);
502 void skb_add_rx_frag(struct sk_buff
*skb
, int i
, struct page
*page
, int off
,
503 int size
, unsigned int truesize
)
505 skb_fill_page_desc(skb
, i
, page
, off
, size
);
507 skb
->data_len
+= size
;
508 skb
->truesize
+= truesize
;
510 EXPORT_SYMBOL(skb_add_rx_frag
);
512 void skb_coalesce_rx_frag(struct sk_buff
*skb
, int i
, int size
,
513 unsigned int truesize
)
515 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
517 skb_frag_size_add(frag
, size
);
519 skb
->data_len
+= size
;
520 skb
->truesize
+= truesize
;
522 EXPORT_SYMBOL(skb_coalesce_rx_frag
);
524 static void skb_drop_list(struct sk_buff
**listp
)
526 kfree_skb_list(*listp
);
530 static inline void skb_drop_fraglist(struct sk_buff
*skb
)
532 skb_drop_list(&skb_shinfo(skb
)->frag_list
);
535 static void skb_clone_fraglist(struct sk_buff
*skb
)
537 struct sk_buff
*list
;
539 skb_walk_frags(skb
, list
)
543 static void skb_free_head(struct sk_buff
*skb
)
545 unsigned char *head
= skb
->head
;
553 static void skb_release_data(struct sk_buff
*skb
)
555 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
559 atomic_sub_return(skb
->nohdr
? (1 << SKB_DATAREF_SHIFT
) + 1 : 1,
563 for (i
= 0; i
< shinfo
->nr_frags
; i
++)
564 __skb_frag_unref(&shinfo
->frags
[i
]);
566 if (shinfo
->frag_list
)
567 kfree_skb_list(shinfo
->frag_list
);
569 skb_zcopy_clear(skb
, true);
574 * Free an skbuff by memory without cleaning the state.
576 static void kfree_skbmem(struct sk_buff
*skb
)
578 struct sk_buff_fclones
*fclones
;
580 switch (skb
->fclone
) {
581 case SKB_FCLONE_UNAVAILABLE
:
582 kmem_cache_free(skbuff_head_cache
, skb
);
585 case SKB_FCLONE_ORIG
:
586 fclones
= container_of(skb
, struct sk_buff_fclones
, skb1
);
588 /* We usually free the clone (TX completion) before original skb
589 * This test would have no chance to be true for the clone,
590 * while here, branch prediction will be good.
592 if (refcount_read(&fclones
->fclone_ref
) == 1)
596 default: /* SKB_FCLONE_CLONE */
597 fclones
= container_of(skb
, struct sk_buff_fclones
, skb2
);
600 if (!refcount_dec_and_test(&fclones
->fclone_ref
))
603 kmem_cache_free(skbuff_fclone_cache
, fclones
);
606 void skb_release_head_state(struct sk_buff
*skb
)
610 if (skb
->destructor
) {
612 skb
->destructor(skb
);
614 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
615 nf_conntrack_put(skb_nfct(skb
));
617 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
618 nf_bridge_put(skb
->nf_bridge
);
622 /* Free everything but the sk_buff shell. */
623 static void skb_release_all(struct sk_buff
*skb
)
625 skb_release_head_state(skb
);
626 if (likely(skb
->head
))
627 skb_release_data(skb
);
631 * __kfree_skb - private function
634 * Free an sk_buff. Release anything attached to the buffer.
635 * Clean the state. This is an internal helper function. Users should
636 * always call kfree_skb
639 void __kfree_skb(struct sk_buff
*skb
)
641 skb_release_all(skb
);
644 EXPORT_SYMBOL(__kfree_skb
);
647 * kfree_skb - free an sk_buff
648 * @skb: buffer to free
650 * Drop a reference to the buffer and free it if the usage count has
653 void kfree_skb(struct sk_buff
*skb
)
658 trace_kfree_skb(skb
, __builtin_return_address(0));
661 EXPORT_SYMBOL(kfree_skb
);
663 void kfree_skb_list(struct sk_buff
*segs
)
666 struct sk_buff
*next
= segs
->next
;
672 EXPORT_SYMBOL(kfree_skb_list
);
675 * skb_tx_error - report an sk_buff xmit error
676 * @skb: buffer that triggered an error
678 * Report xmit error if a device callback is tracking this skb.
679 * skb must be freed afterwards.
681 void skb_tx_error(struct sk_buff
*skb
)
683 skb_zcopy_clear(skb
, true);
685 EXPORT_SYMBOL(skb_tx_error
);
688 * consume_skb - free an skbuff
689 * @skb: buffer to free
691 * Drop a ref to the buffer and free it if the usage count has hit zero
692 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
693 * is being dropped after a failure and notes that
695 void consume_skb(struct sk_buff
*skb
)
700 trace_consume_skb(skb
);
703 EXPORT_SYMBOL(consume_skb
);
706 * consume_stateless_skb - free an skbuff, assuming it is stateless
707 * @skb: buffer to free
709 * Alike consume_skb(), but this variant assumes that this is the last
710 * skb reference and all the head states have been already dropped
712 void __consume_stateless_skb(struct sk_buff
*skb
)
714 trace_consume_skb(skb
);
715 skb_release_data(skb
);
719 void __kfree_skb_flush(void)
721 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
723 /* flush skb_cache if containing objects */
725 kmem_cache_free_bulk(skbuff_head_cache
, nc
->skb_count
,
731 static inline void _kfree_skb_defer(struct sk_buff
*skb
)
733 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
735 /* drop skb->head and call any destructors for packet */
736 skb_release_all(skb
);
738 /* record skb to CPU local list */
739 nc
->skb_cache
[nc
->skb_count
++] = skb
;
742 /* SLUB writes into objects when freeing */
746 /* flush skb_cache if it is filled */
747 if (unlikely(nc
->skb_count
== NAPI_SKB_CACHE_SIZE
)) {
748 kmem_cache_free_bulk(skbuff_head_cache
, NAPI_SKB_CACHE_SIZE
,
753 void __kfree_skb_defer(struct sk_buff
*skb
)
755 _kfree_skb_defer(skb
);
758 void napi_consume_skb(struct sk_buff
*skb
, int budget
)
763 /* Zero budget indicate non-NAPI context called us, like netpoll */
764 if (unlikely(!budget
)) {
765 dev_consume_skb_any(skb
);
772 /* if reaching here SKB is ready to free */
773 trace_consume_skb(skb
);
775 /* if SKB is a clone, don't handle this case */
776 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
) {
781 _kfree_skb_defer(skb
);
783 EXPORT_SYMBOL(napi_consume_skb
);
785 /* Make sure a field is enclosed inside headers_start/headers_end section */
786 #define CHECK_SKB_FIELD(field) \
787 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
788 offsetof(struct sk_buff, headers_start)); \
789 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
790 offsetof(struct sk_buff, headers_end)); \
792 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
794 new->tstamp
= old
->tstamp
;
795 /* We do not copy old->sk */
797 memcpy(new->cb
, old
->cb
, sizeof(old
->cb
));
798 skb_dst_copy(new, old
);
800 new->sp
= secpath_get(old
->sp
);
802 __nf_copy(new, old
, false);
804 /* Note : this field could be in headers_start/headers_end section
805 * It is not yet because we do not want to have a 16 bit hole
807 new->queue_mapping
= old
->queue_mapping
;
809 memcpy(&new->headers_start
, &old
->headers_start
,
810 offsetof(struct sk_buff
, headers_end
) -
811 offsetof(struct sk_buff
, headers_start
));
812 CHECK_SKB_FIELD(protocol
);
813 CHECK_SKB_FIELD(csum
);
814 CHECK_SKB_FIELD(hash
);
815 CHECK_SKB_FIELD(priority
);
816 CHECK_SKB_FIELD(skb_iif
);
817 CHECK_SKB_FIELD(vlan_proto
);
818 CHECK_SKB_FIELD(vlan_tci
);
819 CHECK_SKB_FIELD(transport_header
);
820 CHECK_SKB_FIELD(network_header
);
821 CHECK_SKB_FIELD(mac_header
);
822 CHECK_SKB_FIELD(inner_protocol
);
823 CHECK_SKB_FIELD(inner_transport_header
);
824 CHECK_SKB_FIELD(inner_network_header
);
825 CHECK_SKB_FIELD(inner_mac_header
);
826 CHECK_SKB_FIELD(mark
);
827 #ifdef CONFIG_NETWORK_SECMARK
828 CHECK_SKB_FIELD(secmark
);
830 #ifdef CONFIG_NET_RX_BUSY_POLL
831 CHECK_SKB_FIELD(napi_id
);
834 CHECK_SKB_FIELD(sender_cpu
);
836 #ifdef CONFIG_NET_SCHED
837 CHECK_SKB_FIELD(tc_index
);
843 * You should not add any new code to this function. Add it to
844 * __copy_skb_header above instead.
846 static struct sk_buff
*__skb_clone(struct sk_buff
*n
, struct sk_buff
*skb
)
848 #define C(x) n->x = skb->x
850 n
->next
= n
->prev
= NULL
;
852 __copy_skb_header(n
, skb
);
857 n
->hdr_len
= skb
->nohdr
? skb_headroom(skb
) : skb
->hdr_len
;
862 n
->destructor
= NULL
;
869 refcount_set(&n
->users
, 1);
871 atomic_inc(&(skb_shinfo(skb
)->dataref
));
879 * skb_morph - morph one skb into another
880 * @dst: the skb to receive the contents
881 * @src: the skb to supply the contents
883 * This is identical to skb_clone except that the target skb is
884 * supplied by the user.
886 * The target skb is returned upon exit.
888 struct sk_buff
*skb_morph(struct sk_buff
*dst
, struct sk_buff
*src
)
890 skb_release_all(dst
);
891 return __skb_clone(dst
, src
);
893 EXPORT_SYMBOL_GPL(skb_morph
);
895 int mm_account_pinned_pages(struct mmpin
*mmp
, size_t size
)
897 unsigned long max_pg
, num_pg
, new_pg
, old_pg
;
898 struct user_struct
*user
;
900 if (capable(CAP_IPC_LOCK
) || !size
)
903 num_pg
= (size
>> PAGE_SHIFT
) + 2; /* worst case */
904 max_pg
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
905 user
= mmp
->user
? : current_user();
908 old_pg
= atomic_long_read(&user
->locked_vm
);
909 new_pg
= old_pg
+ num_pg
;
912 } while (atomic_long_cmpxchg(&user
->locked_vm
, old_pg
, new_pg
) !=
916 mmp
->user
= get_uid(user
);
917 mmp
->num_pg
= num_pg
;
919 mmp
->num_pg
+= num_pg
;
924 EXPORT_SYMBOL_GPL(mm_account_pinned_pages
);
926 void mm_unaccount_pinned_pages(struct mmpin
*mmp
)
929 atomic_long_sub(mmp
->num_pg
, &mmp
->user
->locked_vm
);
933 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages
);
935 struct ubuf_info
*sock_zerocopy_alloc(struct sock
*sk
, size_t size
)
937 struct ubuf_info
*uarg
;
940 WARN_ON_ONCE(!in_task());
942 if (!sock_flag(sk
, SOCK_ZEROCOPY
))
945 skb
= sock_omalloc(sk
, 0, GFP_KERNEL
);
949 BUILD_BUG_ON(sizeof(*uarg
) > sizeof(skb
->cb
));
950 uarg
= (void *)skb
->cb
;
951 uarg
->mmp
.user
= NULL
;
953 if (mm_account_pinned_pages(&uarg
->mmp
, size
)) {
958 uarg
->callback
= sock_zerocopy_callback
;
959 uarg
->id
= ((u32
)atomic_inc_return(&sk
->sk_zckey
)) - 1;
961 uarg
->bytelen
= size
;
963 refcount_set(&uarg
->refcnt
, 1);
968 EXPORT_SYMBOL_GPL(sock_zerocopy_alloc
);
970 static inline struct sk_buff
*skb_from_uarg(struct ubuf_info
*uarg
)
972 return container_of((void *)uarg
, struct sk_buff
, cb
);
975 struct ubuf_info
*sock_zerocopy_realloc(struct sock
*sk
, size_t size
,
976 struct ubuf_info
*uarg
)
979 const u32 byte_limit
= 1 << 19; /* limit to a few TSO */
982 /* realloc only when socket is locked (TCP, UDP cork),
983 * so uarg->len and sk_zckey access is serialized
985 if (!sock_owned_by_user(sk
)) {
990 bytelen
= uarg
->bytelen
+ size
;
991 if (uarg
->len
== USHRT_MAX
- 1 || bytelen
> byte_limit
) {
992 /* TCP can create new skb to attach new uarg */
993 if (sk
->sk_type
== SOCK_STREAM
)
998 next
= (u32
)atomic_read(&sk
->sk_zckey
);
999 if ((u32
)(uarg
->id
+ uarg
->len
) == next
) {
1000 if (mm_account_pinned_pages(&uarg
->mmp
, size
))
1003 uarg
->bytelen
= bytelen
;
1004 atomic_set(&sk
->sk_zckey
, ++next
);
1005 sock_zerocopy_get(uarg
);
1011 return sock_zerocopy_alloc(sk
, size
);
1013 EXPORT_SYMBOL_GPL(sock_zerocopy_realloc
);
1015 static bool skb_zerocopy_notify_extend(struct sk_buff
*skb
, u32 lo
, u16 len
)
1017 struct sock_exterr_skb
*serr
= SKB_EXT_ERR(skb
);
1021 old_lo
= serr
->ee
.ee_info
;
1022 old_hi
= serr
->ee
.ee_data
;
1023 sum_len
= old_hi
- old_lo
+ 1ULL + len
;
1025 if (sum_len
>= (1ULL << 32))
1028 if (lo
!= old_hi
+ 1)
1031 serr
->ee
.ee_data
+= len
;
1035 void sock_zerocopy_callback(struct ubuf_info
*uarg
, bool success
)
1037 struct sk_buff
*tail
, *skb
= skb_from_uarg(uarg
);
1038 struct sock_exterr_skb
*serr
;
1039 struct sock
*sk
= skb
->sk
;
1040 struct sk_buff_head
*q
;
1041 unsigned long flags
;
1045 mm_unaccount_pinned_pages(&uarg
->mmp
);
1047 /* if !len, there was only 1 call, and it was aborted
1048 * so do not queue a completion notification
1050 if (!uarg
->len
|| sock_flag(sk
, SOCK_DEAD
))
1055 hi
= uarg
->id
+ len
- 1;
1057 serr
= SKB_EXT_ERR(skb
);
1058 memset(serr
, 0, sizeof(*serr
));
1059 serr
->ee
.ee_errno
= 0;
1060 serr
->ee
.ee_origin
= SO_EE_ORIGIN_ZEROCOPY
;
1061 serr
->ee
.ee_data
= hi
;
1062 serr
->ee
.ee_info
= lo
;
1064 serr
->ee
.ee_code
|= SO_EE_CODE_ZEROCOPY_COPIED
;
1066 q
= &sk
->sk_error_queue
;
1067 spin_lock_irqsave(&q
->lock
, flags
);
1068 tail
= skb_peek_tail(q
);
1069 if (!tail
|| SKB_EXT_ERR(tail
)->ee
.ee_origin
!= SO_EE_ORIGIN_ZEROCOPY
||
1070 !skb_zerocopy_notify_extend(tail
, lo
, len
)) {
1071 __skb_queue_tail(q
, skb
);
1074 spin_unlock_irqrestore(&q
->lock
, flags
);
1076 sk
->sk_error_report(sk
);
1082 EXPORT_SYMBOL_GPL(sock_zerocopy_callback
);
1084 void sock_zerocopy_put(struct ubuf_info
*uarg
)
1086 if (uarg
&& refcount_dec_and_test(&uarg
->refcnt
)) {
1088 uarg
->callback(uarg
, uarg
->zerocopy
);
1090 consume_skb(skb_from_uarg(uarg
));
1093 EXPORT_SYMBOL_GPL(sock_zerocopy_put
);
1095 void sock_zerocopy_put_abort(struct ubuf_info
*uarg
)
1098 struct sock
*sk
= skb_from_uarg(uarg
)->sk
;
1100 atomic_dec(&sk
->sk_zckey
);
1103 sock_zerocopy_put(uarg
);
1106 EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort
);
1108 extern int __zerocopy_sg_from_iter(struct sock
*sk
, struct sk_buff
*skb
,
1109 struct iov_iter
*from
, size_t length
);
1111 int skb_zerocopy_iter_stream(struct sock
*sk
, struct sk_buff
*skb
,
1112 struct msghdr
*msg
, int len
,
1113 struct ubuf_info
*uarg
)
1115 struct ubuf_info
*orig_uarg
= skb_zcopy(skb
);
1116 struct iov_iter orig_iter
= msg
->msg_iter
;
1117 int err
, orig_len
= skb
->len
;
1119 /* An skb can only point to one uarg. This edge case happens when
1120 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1122 if (orig_uarg
&& uarg
!= orig_uarg
)
1125 err
= __zerocopy_sg_from_iter(sk
, skb
, &msg
->msg_iter
, len
);
1126 if (err
== -EFAULT
|| (err
== -EMSGSIZE
&& skb
->len
== orig_len
)) {
1127 struct sock
*save_sk
= skb
->sk
;
1129 /* Streams do not free skb on error. Reset to prev state. */
1130 msg
->msg_iter
= orig_iter
;
1132 ___pskb_trim(skb
, orig_len
);
1137 skb_zcopy_set(skb
, uarg
);
1138 return skb
->len
- orig_len
;
1140 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream
);
1142 static int skb_zerocopy_clone(struct sk_buff
*nskb
, struct sk_buff
*orig
,
1145 if (skb_zcopy(orig
)) {
1146 if (skb_zcopy(nskb
)) {
1147 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1152 if (skb_uarg(nskb
) == skb_uarg(orig
))
1154 if (skb_copy_ubufs(nskb
, GFP_ATOMIC
))
1157 skb_zcopy_set(nskb
, skb_uarg(orig
));
1163 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1164 * @skb: the skb to modify
1165 * @gfp_mask: allocation priority
1167 * This must be called on SKBTX_DEV_ZEROCOPY skb.
1168 * It will copy all frags into kernel and drop the reference
1169 * to userspace pages.
1171 * If this function is called from an interrupt gfp_mask() must be
1174 * Returns 0 on success or a negative error code on failure
1175 * to allocate kernel memory to copy to.
1177 int skb_copy_ubufs(struct sk_buff
*skb
, gfp_t gfp_mask
)
1179 int num_frags
= skb_shinfo(skb
)->nr_frags
;
1180 struct page
*page
, *head
= NULL
;
1184 if (skb_shared(skb
) || skb_unclone(skb
, gfp_mask
))
1190 new_frags
= (__skb_pagelen(skb
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1191 for (i
= 0; i
< new_frags
; i
++) {
1192 page
= alloc_page(gfp_mask
);
1195 struct page
*next
= (struct page
*)page_private(head
);
1201 set_page_private(page
, (unsigned long)head
);
1207 for (i
= 0; i
< num_frags
; i
++) {
1208 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
1209 u32 p_off
, p_len
, copied
;
1213 skb_frag_foreach_page(f
, f
->page_offset
, skb_frag_size(f
),
1214 p
, p_off
, p_len
, copied
) {
1216 vaddr
= kmap_atomic(p
);
1218 while (done
< p_len
) {
1219 if (d_off
== PAGE_SIZE
) {
1221 page
= (struct page
*)page_private(page
);
1223 copy
= min_t(u32
, PAGE_SIZE
- d_off
, p_len
- done
);
1224 memcpy(page_address(page
) + d_off
,
1225 vaddr
+ p_off
+ done
, copy
);
1229 kunmap_atomic(vaddr
);
1233 /* skb frags release userspace buffers */
1234 for (i
= 0; i
< num_frags
; i
++)
1235 skb_frag_unref(skb
, i
);
1237 /* skb frags point to kernel buffers */
1238 for (i
= 0; i
< new_frags
- 1; i
++) {
1239 __skb_fill_page_desc(skb
, i
, head
, 0, PAGE_SIZE
);
1240 head
= (struct page
*)page_private(head
);
1242 __skb_fill_page_desc(skb
, new_frags
- 1, head
, 0, d_off
);
1243 skb_shinfo(skb
)->nr_frags
= new_frags
;
1246 skb_zcopy_clear(skb
, false);
1249 EXPORT_SYMBOL_GPL(skb_copy_ubufs
);
1252 * skb_clone - duplicate an sk_buff
1253 * @skb: buffer to clone
1254 * @gfp_mask: allocation priority
1256 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1257 * copies share the same packet data but not structure. The new
1258 * buffer has a reference count of 1. If the allocation fails the
1259 * function returns %NULL otherwise the new buffer is returned.
1261 * If this function is called from an interrupt gfp_mask() must be
1265 struct sk_buff
*skb_clone(struct sk_buff
*skb
, gfp_t gfp_mask
)
1267 struct sk_buff_fclones
*fclones
= container_of(skb
,
1268 struct sk_buff_fclones
,
1272 if (skb_orphan_frags(skb
, gfp_mask
))
1275 if (skb
->fclone
== SKB_FCLONE_ORIG
&&
1276 refcount_read(&fclones
->fclone_ref
) == 1) {
1278 refcount_set(&fclones
->fclone_ref
, 2);
1280 if (skb_pfmemalloc(skb
))
1281 gfp_mask
|= __GFP_MEMALLOC
;
1283 n
= kmem_cache_alloc(skbuff_head_cache
, gfp_mask
);
1287 n
->fclone
= SKB_FCLONE_UNAVAILABLE
;
1290 return __skb_clone(n
, skb
);
1292 EXPORT_SYMBOL(skb_clone
);
1294 static void skb_headers_offset_update(struct sk_buff
*skb
, int off
)
1296 /* Only adjust this if it actually is csum_start rather than csum */
1297 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1298 skb
->csum_start
+= off
;
1299 /* {transport,network,mac}_header and tail are relative to skb->head */
1300 skb
->transport_header
+= off
;
1301 skb
->network_header
+= off
;
1302 if (skb_mac_header_was_set(skb
))
1303 skb
->mac_header
+= off
;
1304 skb
->inner_transport_header
+= off
;
1305 skb
->inner_network_header
+= off
;
1306 skb
->inner_mac_header
+= off
;
1309 void skb_copy_header(struct sk_buff
*new, const struct sk_buff
*old
)
1311 __copy_skb_header(new, old
);
1313 skb_shinfo(new)->gso_size
= skb_shinfo(old
)->gso_size
;
1314 skb_shinfo(new)->gso_segs
= skb_shinfo(old
)->gso_segs
;
1315 skb_shinfo(new)->gso_type
= skb_shinfo(old
)->gso_type
;
1317 EXPORT_SYMBOL(skb_copy_header
);
1319 static inline int skb_alloc_rx_flag(const struct sk_buff
*skb
)
1321 if (skb_pfmemalloc(skb
))
1322 return SKB_ALLOC_RX
;
1327 * skb_copy - create private copy of an sk_buff
1328 * @skb: buffer to copy
1329 * @gfp_mask: allocation priority
1331 * Make a copy of both an &sk_buff and its data. This is used when the
1332 * caller wishes to modify the data and needs a private copy of the
1333 * data to alter. Returns %NULL on failure or the pointer to the buffer
1334 * on success. The returned buffer has a reference count of 1.
1336 * As by-product this function converts non-linear &sk_buff to linear
1337 * one, so that &sk_buff becomes completely private and caller is allowed
1338 * to modify all the data of returned buffer. This means that this
1339 * function is not recommended for use in circumstances when only
1340 * header is going to be modified. Use pskb_copy() instead.
1343 struct sk_buff
*skb_copy(const struct sk_buff
*skb
, gfp_t gfp_mask
)
1345 int headerlen
= skb_headroom(skb
);
1346 unsigned int size
= skb_end_offset(skb
) + skb
->data_len
;
1347 struct sk_buff
*n
= __alloc_skb(size
, gfp_mask
,
1348 skb_alloc_rx_flag(skb
), NUMA_NO_NODE
);
1353 /* Set the data pointer */
1354 skb_reserve(n
, headerlen
);
1355 /* Set the tail pointer and length */
1356 skb_put(n
, skb
->len
);
1358 BUG_ON(skb_copy_bits(skb
, -headerlen
, n
->head
, headerlen
+ skb
->len
));
1360 skb_copy_header(n
, skb
);
1363 EXPORT_SYMBOL(skb_copy
);
1366 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1367 * @skb: buffer to copy
1368 * @headroom: headroom of new skb
1369 * @gfp_mask: allocation priority
1370 * @fclone: if true allocate the copy of the skb from the fclone
1371 * cache instead of the head cache; it is recommended to set this
1372 * to true for the cases where the copy will likely be cloned
1374 * Make a copy of both an &sk_buff and part of its data, located
1375 * in header. Fragmented data remain shared. This is used when
1376 * the caller wishes to modify only header of &sk_buff and needs
1377 * private copy of the header to alter. Returns %NULL on failure
1378 * or the pointer to the buffer on success.
1379 * The returned buffer has a reference count of 1.
1382 struct sk_buff
*__pskb_copy_fclone(struct sk_buff
*skb
, int headroom
,
1383 gfp_t gfp_mask
, bool fclone
)
1385 unsigned int size
= skb_headlen(skb
) + headroom
;
1386 int flags
= skb_alloc_rx_flag(skb
) | (fclone
? SKB_ALLOC_FCLONE
: 0);
1387 struct sk_buff
*n
= __alloc_skb(size
, gfp_mask
, flags
, NUMA_NO_NODE
);
1392 /* Set the data pointer */
1393 skb_reserve(n
, headroom
);
1394 /* Set the tail pointer and length */
1395 skb_put(n
, skb_headlen(skb
));
1396 /* Copy the bytes */
1397 skb_copy_from_linear_data(skb
, n
->data
, n
->len
);
1399 n
->truesize
+= skb
->data_len
;
1400 n
->data_len
= skb
->data_len
;
1403 if (skb_shinfo(skb
)->nr_frags
) {
1406 if (skb_orphan_frags(skb
, gfp_mask
) ||
1407 skb_zerocopy_clone(n
, skb
, gfp_mask
)) {
1412 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1413 skb_shinfo(n
)->frags
[i
] = skb_shinfo(skb
)->frags
[i
];
1414 skb_frag_ref(skb
, i
);
1416 skb_shinfo(n
)->nr_frags
= i
;
1419 if (skb_has_frag_list(skb
)) {
1420 skb_shinfo(n
)->frag_list
= skb_shinfo(skb
)->frag_list
;
1421 skb_clone_fraglist(n
);
1424 skb_copy_header(n
, skb
);
1428 EXPORT_SYMBOL(__pskb_copy_fclone
);
1431 * pskb_expand_head - reallocate header of &sk_buff
1432 * @skb: buffer to reallocate
1433 * @nhead: room to add at head
1434 * @ntail: room to add at tail
1435 * @gfp_mask: allocation priority
1437 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1438 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1439 * reference count of 1. Returns zero in the case of success or error,
1440 * if expansion failed. In the last case, &sk_buff is not changed.
1442 * All the pointers pointing into skb header may change and must be
1443 * reloaded after call to this function.
1446 int pskb_expand_head(struct sk_buff
*skb
, int nhead
, int ntail
,
1449 int i
, osize
= skb_end_offset(skb
);
1450 int size
= osize
+ nhead
+ ntail
;
1456 BUG_ON(skb_shared(skb
));
1458 size
= SKB_DATA_ALIGN(size
);
1460 if (skb_pfmemalloc(skb
))
1461 gfp_mask
|= __GFP_MEMALLOC
;
1462 data
= kmalloc_reserve(size
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
1463 gfp_mask
, NUMA_NO_NODE
, NULL
);
1466 size
= SKB_WITH_OVERHEAD(ksize(data
));
1468 /* Copy only real data... and, alas, header. This should be
1469 * optimized for the cases when header is void.
1471 memcpy(data
+ nhead
, skb
->head
, skb_tail_pointer(skb
) - skb
->head
);
1473 memcpy((struct skb_shared_info
*)(data
+ size
),
1475 offsetof(struct skb_shared_info
, frags
[skb_shinfo(skb
)->nr_frags
]));
1478 * if shinfo is shared we must drop the old head gracefully, but if it
1479 * is not we can just drop the old head and let the existing refcount
1480 * be since all we did is relocate the values
1482 if (skb_cloned(skb
)) {
1483 if (skb_orphan_frags(skb
, gfp_mask
))
1486 refcount_inc(&skb_uarg(skb
)->refcnt
);
1487 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
1488 skb_frag_ref(skb
, i
);
1490 if (skb_has_frag_list(skb
))
1491 skb_clone_fraglist(skb
);
1493 skb_release_data(skb
);
1497 off
= (data
+ nhead
) - skb
->head
;
1502 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1506 skb
->end
= skb
->head
+ size
;
1509 skb_headers_offset_update(skb
, nhead
);
1513 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
1515 skb_metadata_clear(skb
);
1517 /* It is not generally safe to change skb->truesize.
1518 * For the moment, we really care of rx path, or
1519 * when skb is orphaned (not attached to a socket).
1521 if (!skb
->sk
|| skb
->destructor
== sock_edemux
)
1522 skb
->truesize
+= size
- osize
;
1531 EXPORT_SYMBOL(pskb_expand_head
);
1533 /* Make private copy of skb with writable head and some headroom */
1535 struct sk_buff
*skb_realloc_headroom(struct sk_buff
*skb
, unsigned int headroom
)
1537 struct sk_buff
*skb2
;
1538 int delta
= headroom
- skb_headroom(skb
);
1541 skb2
= pskb_copy(skb
, GFP_ATOMIC
);
1543 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1544 if (skb2
&& pskb_expand_head(skb2
, SKB_DATA_ALIGN(delta
), 0,
1552 EXPORT_SYMBOL(skb_realloc_headroom
);
1555 * skb_copy_expand - copy and expand sk_buff
1556 * @skb: buffer to copy
1557 * @newheadroom: new free bytes at head
1558 * @newtailroom: new free bytes at tail
1559 * @gfp_mask: allocation priority
1561 * Make a copy of both an &sk_buff and its data and while doing so
1562 * allocate additional space.
1564 * This is used when the caller wishes to modify the data and needs a
1565 * private copy of the data to alter as well as more space for new fields.
1566 * Returns %NULL on failure or the pointer to the buffer
1567 * on success. The returned buffer has a reference count of 1.
1569 * You must pass %GFP_ATOMIC as the allocation priority if this function
1570 * is called from an interrupt.
1572 struct sk_buff
*skb_copy_expand(const struct sk_buff
*skb
,
1573 int newheadroom
, int newtailroom
,
1577 * Allocate the copy buffer
1579 struct sk_buff
*n
= __alloc_skb(newheadroom
+ skb
->len
+ newtailroom
,
1580 gfp_mask
, skb_alloc_rx_flag(skb
),
1582 int oldheadroom
= skb_headroom(skb
);
1583 int head_copy_len
, head_copy_off
;
1588 skb_reserve(n
, newheadroom
);
1590 /* Set the tail pointer and length */
1591 skb_put(n
, skb
->len
);
1593 head_copy_len
= oldheadroom
;
1595 if (newheadroom
<= head_copy_len
)
1596 head_copy_len
= newheadroom
;
1598 head_copy_off
= newheadroom
- head_copy_len
;
1600 /* Copy the linear header and data. */
1601 BUG_ON(skb_copy_bits(skb
, -head_copy_len
, n
->head
+ head_copy_off
,
1602 skb
->len
+ head_copy_len
));
1604 skb_copy_header(n
, skb
);
1606 skb_headers_offset_update(n
, newheadroom
- oldheadroom
);
1610 EXPORT_SYMBOL(skb_copy_expand
);
1613 * __skb_pad - zero pad the tail of an skb
1614 * @skb: buffer to pad
1615 * @pad: space to pad
1616 * @free_on_error: free buffer on error
1618 * Ensure that a buffer is followed by a padding area that is zero
1619 * filled. Used by network drivers which may DMA or transfer data
1620 * beyond the buffer end onto the wire.
1622 * May return error in out of memory cases. The skb is freed on error
1623 * if @free_on_error is true.
1626 int __skb_pad(struct sk_buff
*skb
, int pad
, bool free_on_error
)
1631 /* If the skbuff is non linear tailroom is always zero.. */
1632 if (!skb_cloned(skb
) && skb_tailroom(skb
) >= pad
) {
1633 memset(skb
->data
+skb
->len
, 0, pad
);
1637 ntail
= skb
->data_len
+ pad
- (skb
->end
- skb
->tail
);
1638 if (likely(skb_cloned(skb
) || ntail
> 0)) {
1639 err
= pskb_expand_head(skb
, 0, ntail
, GFP_ATOMIC
);
1644 /* FIXME: The use of this function with non-linear skb's really needs
1647 err
= skb_linearize(skb
);
1651 memset(skb
->data
+ skb
->len
, 0, pad
);
1659 EXPORT_SYMBOL(__skb_pad
);
1662 * pskb_put - add data to the tail of a potentially fragmented buffer
1663 * @skb: start of the buffer to use
1664 * @tail: tail fragment of the buffer to use
1665 * @len: amount of data to add
1667 * This function extends the used data area of the potentially
1668 * fragmented buffer. @tail must be the last fragment of @skb -- or
1669 * @skb itself. If this would exceed the total buffer size the kernel
1670 * will panic. A pointer to the first byte of the extra data is
1674 void *pskb_put(struct sk_buff
*skb
, struct sk_buff
*tail
, int len
)
1677 skb
->data_len
+= len
;
1680 return skb_put(tail
, len
);
1682 EXPORT_SYMBOL_GPL(pskb_put
);
1685 * skb_put - add data to a buffer
1686 * @skb: buffer to use
1687 * @len: amount of data to add
1689 * This function extends the used data area of the buffer. If this would
1690 * exceed the total buffer size the kernel will panic. A pointer to the
1691 * first byte of the extra data is returned.
1693 void *skb_put(struct sk_buff
*skb
, unsigned int len
)
1695 void *tmp
= skb_tail_pointer(skb
);
1696 SKB_LINEAR_ASSERT(skb
);
1699 if (unlikely(skb
->tail
> skb
->end
))
1700 skb_over_panic(skb
, len
, __builtin_return_address(0));
1703 EXPORT_SYMBOL(skb_put
);
1706 * skb_push - add data to the start of a buffer
1707 * @skb: buffer to use
1708 * @len: amount of data to add
1710 * This function extends the used data area of the buffer at the buffer
1711 * start. If this would exceed the total buffer headroom the kernel will
1712 * panic. A pointer to the first byte of the extra data is returned.
1714 void *skb_push(struct sk_buff
*skb
, unsigned int len
)
1718 if (unlikely(skb
->data
<skb
->head
))
1719 skb_under_panic(skb
, len
, __builtin_return_address(0));
1722 EXPORT_SYMBOL(skb_push
);
1725 * skb_pull - remove data from the start of a buffer
1726 * @skb: buffer to use
1727 * @len: amount of data to remove
1729 * This function removes data from the start of a buffer, returning
1730 * the memory to the headroom. A pointer to the next data in the buffer
1731 * is returned. Once the data has been pulled future pushes will overwrite
1734 void *skb_pull(struct sk_buff
*skb
, unsigned int len
)
1736 return skb_pull_inline(skb
, len
);
1738 EXPORT_SYMBOL(skb_pull
);
1741 * skb_trim - remove end from a buffer
1742 * @skb: buffer to alter
1745 * Cut the length of a buffer down by removing data from the tail. If
1746 * the buffer is already under the length specified it is not modified.
1747 * The skb must be linear.
1749 void skb_trim(struct sk_buff
*skb
, unsigned int len
)
1752 __skb_trim(skb
, len
);
1754 EXPORT_SYMBOL(skb_trim
);
1756 /* Trims skb to length len. It can change skb pointers.
1759 int ___pskb_trim(struct sk_buff
*skb
, unsigned int len
)
1761 struct sk_buff
**fragp
;
1762 struct sk_buff
*frag
;
1763 int offset
= skb_headlen(skb
);
1764 int nfrags
= skb_shinfo(skb
)->nr_frags
;
1768 if (skb_cloned(skb
) &&
1769 unlikely((err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))))
1776 for (; i
< nfrags
; i
++) {
1777 int end
= offset
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
1784 skb_frag_size_set(&skb_shinfo(skb
)->frags
[i
++], len
- offset
);
1787 skb_shinfo(skb
)->nr_frags
= i
;
1789 for (; i
< nfrags
; i
++)
1790 skb_frag_unref(skb
, i
);
1792 if (skb_has_frag_list(skb
))
1793 skb_drop_fraglist(skb
);
1797 for (fragp
= &skb_shinfo(skb
)->frag_list
; (frag
= *fragp
);
1798 fragp
= &frag
->next
) {
1799 int end
= offset
+ frag
->len
;
1801 if (skb_shared(frag
)) {
1802 struct sk_buff
*nfrag
;
1804 nfrag
= skb_clone(frag
, GFP_ATOMIC
);
1805 if (unlikely(!nfrag
))
1808 nfrag
->next
= frag
->next
;
1820 unlikely((err
= pskb_trim(frag
, len
- offset
))))
1824 skb_drop_list(&frag
->next
);
1829 if (len
> skb_headlen(skb
)) {
1830 skb
->data_len
-= skb
->len
- len
;
1835 skb_set_tail_pointer(skb
, len
);
1838 if (!skb
->sk
|| skb
->destructor
== sock_edemux
)
1842 EXPORT_SYMBOL(___pskb_trim
);
1844 /* Note : use pskb_trim_rcsum() instead of calling this directly
1846 int pskb_trim_rcsum_slow(struct sk_buff
*skb
, unsigned int len
)
1848 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1849 int delta
= skb
->len
- len
;
1851 skb
->csum
= csum_sub(skb
->csum
,
1852 skb_checksum(skb
, len
, delta
, 0));
1854 return __pskb_trim(skb
, len
);
1856 EXPORT_SYMBOL(pskb_trim_rcsum_slow
);
1859 * __pskb_pull_tail - advance tail of skb header
1860 * @skb: buffer to reallocate
1861 * @delta: number of bytes to advance tail
1863 * The function makes a sense only on a fragmented &sk_buff,
1864 * it expands header moving its tail forward and copying necessary
1865 * data from fragmented part.
1867 * &sk_buff MUST have reference count of 1.
1869 * Returns %NULL (and &sk_buff does not change) if pull failed
1870 * or value of new tail of skb in the case of success.
1872 * All the pointers pointing into skb header may change and must be
1873 * reloaded after call to this function.
1876 /* Moves tail of skb head forward, copying data from fragmented part,
1877 * when it is necessary.
1878 * 1. It may fail due to malloc failure.
1879 * 2. It may change skb pointers.
1881 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1883 void *__pskb_pull_tail(struct sk_buff
*skb
, int delta
)
1885 /* If skb has not enough free space at tail, get new one
1886 * plus 128 bytes for future expansions. If we have enough
1887 * room at tail, reallocate without expansion only if skb is cloned.
1889 int i
, k
, eat
= (skb
->tail
+ delta
) - skb
->end
;
1891 if (eat
> 0 || skb_cloned(skb
)) {
1892 if (pskb_expand_head(skb
, 0, eat
> 0 ? eat
+ 128 : 0,
1897 BUG_ON(skb_copy_bits(skb
, skb_headlen(skb
),
1898 skb_tail_pointer(skb
), delta
));
1900 /* Optimization: no fragments, no reasons to preestimate
1901 * size of pulled pages. Superb.
1903 if (!skb_has_frag_list(skb
))
1906 /* Estimate size of pulled pages. */
1908 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1909 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
1916 /* If we need update frag list, we are in troubles.
1917 * Certainly, it is possible to add an offset to skb data,
1918 * but taking into account that pulling is expected to
1919 * be very rare operation, it is worth to fight against
1920 * further bloating skb head and crucify ourselves here instead.
1921 * Pure masohism, indeed. 8)8)
1924 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
1925 struct sk_buff
*clone
= NULL
;
1926 struct sk_buff
*insp
= NULL
;
1931 if (list
->len
<= eat
) {
1932 /* Eaten as whole. */
1937 /* Eaten partially. */
1939 if (skb_shared(list
)) {
1940 /* Sucks! We need to fork list. :-( */
1941 clone
= skb_clone(list
, GFP_ATOMIC
);
1947 /* This may be pulled without
1951 if (!pskb_pull(list
, eat
)) {
1959 /* Free pulled out fragments. */
1960 while ((list
= skb_shinfo(skb
)->frag_list
) != insp
) {
1961 skb_shinfo(skb
)->frag_list
= list
->next
;
1964 /* And insert new clone at head. */
1967 skb_shinfo(skb
)->frag_list
= clone
;
1970 /* Success! Now we may commit changes to skb data. */
1975 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1976 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
1979 skb_frag_unref(skb
, i
);
1982 skb_shinfo(skb
)->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
1984 skb_shinfo(skb
)->frags
[k
].page_offset
+= eat
;
1985 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[k
], eat
);
1993 skb_shinfo(skb
)->nr_frags
= k
;
1997 skb
->data_len
-= delta
;
2000 skb_zcopy_clear(skb
, false);
2002 return skb_tail_pointer(skb
);
2004 EXPORT_SYMBOL(__pskb_pull_tail
);
2007 * skb_copy_bits - copy bits from skb to kernel buffer
2009 * @offset: offset in source
2010 * @to: destination buffer
2011 * @len: number of bytes to copy
2013 * Copy the specified number of bytes from the source skb to the
2014 * destination buffer.
2017 * If its prototype is ever changed,
2018 * check arch/{*}/net/{*}.S files,
2019 * since it is called from BPF assembly code.
2021 int skb_copy_bits(const struct sk_buff
*skb
, int offset
, void *to
, int len
)
2023 int start
= skb_headlen(skb
);
2024 struct sk_buff
*frag_iter
;
2027 if (offset
> (int)skb
->len
- len
)
2031 if ((copy
= start
- offset
) > 0) {
2034 skb_copy_from_linear_data_offset(skb
, offset
, to
, copy
);
2035 if ((len
-= copy
) == 0)
2041 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2043 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
2045 WARN_ON(start
> offset
+ len
);
2047 end
= start
+ skb_frag_size(f
);
2048 if ((copy
= end
- offset
) > 0) {
2049 u32 p_off
, p_len
, copied
;
2056 skb_frag_foreach_page(f
,
2057 f
->page_offset
+ offset
- start
,
2058 copy
, p
, p_off
, p_len
, copied
) {
2059 vaddr
= kmap_atomic(p
);
2060 memcpy(to
+ copied
, vaddr
+ p_off
, p_len
);
2061 kunmap_atomic(vaddr
);
2064 if ((len
-= copy
) == 0)
2072 skb_walk_frags(skb
, frag_iter
) {
2075 WARN_ON(start
> offset
+ len
);
2077 end
= start
+ frag_iter
->len
;
2078 if ((copy
= end
- offset
) > 0) {
2081 if (skb_copy_bits(frag_iter
, offset
- start
, to
, copy
))
2083 if ((len
-= copy
) == 0)
2097 EXPORT_SYMBOL(skb_copy_bits
);
2100 * Callback from splice_to_pipe(), if we need to release some pages
2101 * at the end of the spd in case we error'ed out in filling the pipe.
2103 static void sock_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
2105 put_page(spd
->pages
[i
]);
2108 static struct page
*linear_to_page(struct page
*page
, unsigned int *len
,
2109 unsigned int *offset
,
2112 struct page_frag
*pfrag
= sk_page_frag(sk
);
2114 if (!sk_page_frag_refill(sk
, pfrag
))
2117 *len
= min_t(unsigned int, *len
, pfrag
->size
- pfrag
->offset
);
2119 memcpy(page_address(pfrag
->page
) + pfrag
->offset
,
2120 page_address(page
) + *offset
, *len
);
2121 *offset
= pfrag
->offset
;
2122 pfrag
->offset
+= *len
;
2127 static bool spd_can_coalesce(const struct splice_pipe_desc
*spd
,
2129 unsigned int offset
)
2131 return spd
->nr_pages
&&
2132 spd
->pages
[spd
->nr_pages
- 1] == page
&&
2133 (spd
->partial
[spd
->nr_pages
- 1].offset
+
2134 spd
->partial
[spd
->nr_pages
- 1].len
== offset
);
2138 * Fill page/offset/length into spd, if it can hold more pages.
2140 static bool spd_fill_page(struct splice_pipe_desc
*spd
,
2141 struct pipe_inode_info
*pipe
, struct page
*page
,
2142 unsigned int *len
, unsigned int offset
,
2146 if (unlikely(spd
->nr_pages
== MAX_SKB_FRAGS
))
2150 page
= linear_to_page(page
, len
, &offset
, sk
);
2154 if (spd_can_coalesce(spd
, page
, offset
)) {
2155 spd
->partial
[spd
->nr_pages
- 1].len
+= *len
;
2159 spd
->pages
[spd
->nr_pages
] = page
;
2160 spd
->partial
[spd
->nr_pages
].len
= *len
;
2161 spd
->partial
[spd
->nr_pages
].offset
= offset
;
2167 static bool __splice_segment(struct page
*page
, unsigned int poff
,
2168 unsigned int plen
, unsigned int *off
,
2170 struct splice_pipe_desc
*spd
, bool linear
,
2172 struct pipe_inode_info
*pipe
)
2177 /* skip this segment if already processed */
2183 /* ignore any bits we already processed */
2189 unsigned int flen
= min(*len
, plen
);
2191 if (spd_fill_page(spd
, pipe
, page
, &flen
, poff
,
2197 } while (*len
&& plen
);
2203 * Map linear and fragment data from the skb to spd. It reports true if the
2204 * pipe is full or if we already spliced the requested length.
2206 static bool __skb_splice_bits(struct sk_buff
*skb
, struct pipe_inode_info
*pipe
,
2207 unsigned int *offset
, unsigned int *len
,
2208 struct splice_pipe_desc
*spd
, struct sock
*sk
)
2211 struct sk_buff
*iter
;
2213 /* map the linear part :
2214 * If skb->head_frag is set, this 'linear' part is backed by a
2215 * fragment, and if the head is not shared with any clones then
2216 * we can avoid a copy since we own the head portion of this page.
2218 if (__splice_segment(virt_to_page(skb
->data
),
2219 (unsigned long) skb
->data
& (PAGE_SIZE
- 1),
2222 skb_head_is_locked(skb
),
2227 * then map the fragments
2229 for (seg
= 0; seg
< skb_shinfo(skb
)->nr_frags
; seg
++) {
2230 const skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[seg
];
2232 if (__splice_segment(skb_frag_page(f
),
2233 f
->page_offset
, skb_frag_size(f
),
2234 offset
, len
, spd
, false, sk
, pipe
))
2238 skb_walk_frags(skb
, iter
) {
2239 if (*offset
>= iter
->len
) {
2240 *offset
-= iter
->len
;
2243 /* __skb_splice_bits() only fails if the output has no room
2244 * left, so no point in going over the frag_list for the error
2247 if (__skb_splice_bits(iter
, pipe
, offset
, len
, spd
, sk
))
2255 * Map data from the skb to a pipe. Should handle both the linear part,
2256 * the fragments, and the frag list.
2258 int skb_splice_bits(struct sk_buff
*skb
, struct sock
*sk
, unsigned int offset
,
2259 struct pipe_inode_info
*pipe
, unsigned int tlen
,
2262 struct partial_page partial
[MAX_SKB_FRAGS
];
2263 struct page
*pages
[MAX_SKB_FRAGS
];
2264 struct splice_pipe_desc spd
= {
2267 .nr_pages_max
= MAX_SKB_FRAGS
,
2268 .ops
= &nosteal_pipe_buf_ops
,
2269 .spd_release
= sock_spd_release
,
2273 __skb_splice_bits(skb
, pipe
, &offset
, &tlen
, &spd
, sk
);
2276 ret
= splice_to_pipe(pipe
, &spd
);
2280 EXPORT_SYMBOL_GPL(skb_splice_bits
);
2282 /* Send skb data on a socket. Socket must be locked. */
2283 int skb_send_sock_locked(struct sock
*sk
, struct sk_buff
*skb
, int offset
,
2286 unsigned int orig_len
= len
;
2287 struct sk_buff
*head
= skb
;
2288 unsigned short fragidx
;
2293 /* Deal with head data */
2294 while (offset
< skb_headlen(skb
) && len
) {
2298 slen
= min_t(int, len
, skb_headlen(skb
) - offset
);
2299 kv
.iov_base
= skb
->data
+ offset
;
2301 memset(&msg
, 0, sizeof(msg
));
2303 ret
= kernel_sendmsg_locked(sk
, &msg
, &kv
, 1, slen
);
2311 /* All the data was skb head? */
2315 /* Make offset relative to start of frags */
2316 offset
-= skb_headlen(skb
);
2318 /* Find where we are in frag list */
2319 for (fragidx
= 0; fragidx
< skb_shinfo(skb
)->nr_frags
; fragidx
++) {
2320 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[fragidx
];
2322 if (offset
< frag
->size
)
2325 offset
-= frag
->size
;
2328 for (; len
&& fragidx
< skb_shinfo(skb
)->nr_frags
; fragidx
++) {
2329 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[fragidx
];
2331 slen
= min_t(size_t, len
, frag
->size
- offset
);
2334 ret
= kernel_sendpage_locked(sk
, frag
->page
.p
,
2335 frag
->page_offset
+ offset
,
2336 slen
, MSG_DONTWAIT
);
2349 /* Process any frag lists */
2352 if (skb_has_frag_list(skb
)) {
2353 skb
= skb_shinfo(skb
)->frag_list
;
2356 } else if (skb
->next
) {
2363 return orig_len
- len
;
2366 return orig_len
== len
? ret
: orig_len
- len
;
2368 EXPORT_SYMBOL_GPL(skb_send_sock_locked
);
2370 /* Send skb data on a socket. */
2371 int skb_send_sock(struct sock
*sk
, struct sk_buff
*skb
, int offset
, int len
)
2376 ret
= skb_send_sock_locked(sk
, skb
, offset
, len
);
2381 EXPORT_SYMBOL_GPL(skb_send_sock
);
2384 * skb_store_bits - store bits from kernel buffer to skb
2385 * @skb: destination buffer
2386 * @offset: offset in destination
2387 * @from: source buffer
2388 * @len: number of bytes to copy
2390 * Copy the specified number of bytes from the source buffer to the
2391 * destination skb. This function handles all the messy bits of
2392 * traversing fragment lists and such.
2395 int skb_store_bits(struct sk_buff
*skb
, int offset
, const void *from
, int len
)
2397 int start
= skb_headlen(skb
);
2398 struct sk_buff
*frag_iter
;
2401 if (offset
> (int)skb
->len
- len
)
2404 if ((copy
= start
- offset
) > 0) {
2407 skb_copy_to_linear_data_offset(skb
, offset
, from
, copy
);
2408 if ((len
-= copy
) == 0)
2414 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2415 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2418 WARN_ON(start
> offset
+ len
);
2420 end
= start
+ skb_frag_size(frag
);
2421 if ((copy
= end
- offset
) > 0) {
2422 u32 p_off
, p_len
, copied
;
2429 skb_frag_foreach_page(frag
,
2430 frag
->page_offset
+ offset
- start
,
2431 copy
, p
, p_off
, p_len
, copied
) {
2432 vaddr
= kmap_atomic(p
);
2433 memcpy(vaddr
+ p_off
, from
+ copied
, p_len
);
2434 kunmap_atomic(vaddr
);
2437 if ((len
-= copy
) == 0)
2445 skb_walk_frags(skb
, frag_iter
) {
2448 WARN_ON(start
> offset
+ len
);
2450 end
= start
+ frag_iter
->len
;
2451 if ((copy
= end
- offset
) > 0) {
2454 if (skb_store_bits(frag_iter
, offset
- start
,
2457 if ((len
-= copy
) == 0)
2470 EXPORT_SYMBOL(skb_store_bits
);
2472 /* Checksum skb data. */
2473 __wsum
__skb_checksum(const struct sk_buff
*skb
, int offset
, int len
,
2474 __wsum csum
, const struct skb_checksum_ops
*ops
)
2476 int start
= skb_headlen(skb
);
2477 int i
, copy
= start
- offset
;
2478 struct sk_buff
*frag_iter
;
2481 /* Checksum header. */
2485 csum
= ops
->update(skb
->data
+ offset
, copy
, csum
);
2486 if ((len
-= copy
) == 0)
2492 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2494 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2496 WARN_ON(start
> offset
+ len
);
2498 end
= start
+ skb_frag_size(frag
);
2499 if ((copy
= end
- offset
) > 0) {
2500 u32 p_off
, p_len
, copied
;
2508 skb_frag_foreach_page(frag
,
2509 frag
->page_offset
+ offset
- start
,
2510 copy
, p
, p_off
, p_len
, copied
) {
2511 vaddr
= kmap_atomic(p
);
2512 csum2
= ops
->update(vaddr
+ p_off
, p_len
, 0);
2513 kunmap_atomic(vaddr
);
2514 csum
= ops
->combine(csum
, csum2
, pos
, p_len
);
2525 skb_walk_frags(skb
, frag_iter
) {
2528 WARN_ON(start
> offset
+ len
);
2530 end
= start
+ frag_iter
->len
;
2531 if ((copy
= end
- offset
) > 0) {
2535 csum2
= __skb_checksum(frag_iter
, offset
- start
,
2537 csum
= ops
->combine(csum
, csum2
, pos
, copy
);
2538 if ((len
-= copy
) == 0)
2549 EXPORT_SYMBOL(__skb_checksum
);
2551 __wsum
skb_checksum(const struct sk_buff
*skb
, int offset
,
2552 int len
, __wsum csum
)
2554 const struct skb_checksum_ops ops
= {
2555 .update
= csum_partial_ext
,
2556 .combine
= csum_block_add_ext
,
2559 return __skb_checksum(skb
, offset
, len
, csum
, &ops
);
2561 EXPORT_SYMBOL(skb_checksum
);
2563 /* Both of above in one bottle. */
2565 __wsum
skb_copy_and_csum_bits(const struct sk_buff
*skb
, int offset
,
2566 u8
*to
, int len
, __wsum csum
)
2568 int start
= skb_headlen(skb
);
2569 int i
, copy
= start
- offset
;
2570 struct sk_buff
*frag_iter
;
2577 csum
= csum_partial_copy_nocheck(skb
->data
+ offset
, to
,
2579 if ((len
-= copy
) == 0)
2586 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2589 WARN_ON(start
> offset
+ len
);
2591 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2592 if ((copy
= end
- offset
) > 0) {
2593 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2594 u32 p_off
, p_len
, copied
;
2602 skb_frag_foreach_page(frag
,
2603 frag
->page_offset
+ offset
- start
,
2604 copy
, p
, p_off
, p_len
, copied
) {
2605 vaddr
= kmap_atomic(p
);
2606 csum2
= csum_partial_copy_nocheck(vaddr
+ p_off
,
2609 kunmap_atomic(vaddr
);
2610 csum
= csum_block_add(csum
, csum2
, pos
);
2622 skb_walk_frags(skb
, frag_iter
) {
2626 WARN_ON(start
> offset
+ len
);
2628 end
= start
+ frag_iter
->len
;
2629 if ((copy
= end
- offset
) > 0) {
2632 csum2
= skb_copy_and_csum_bits(frag_iter
,
2635 csum
= csum_block_add(csum
, csum2
, pos
);
2636 if ((len
-= copy
) == 0)
2647 EXPORT_SYMBOL(skb_copy_and_csum_bits
);
2649 static __wsum
warn_crc32c_csum_update(const void *buff
, int len
, __wsum sum
)
2651 net_warn_ratelimited(
2652 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2657 static __wsum
warn_crc32c_csum_combine(__wsum csum
, __wsum csum2
,
2658 int offset
, int len
)
2660 net_warn_ratelimited(
2661 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2666 static const struct skb_checksum_ops default_crc32c_ops
= {
2667 .update
= warn_crc32c_csum_update
,
2668 .combine
= warn_crc32c_csum_combine
,
2671 const struct skb_checksum_ops
*crc32c_csum_stub __read_mostly
=
2672 &default_crc32c_ops
;
2673 EXPORT_SYMBOL(crc32c_csum_stub
);
2676 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2677 * @from: source buffer
2679 * Calculates the amount of linear headroom needed in the 'to' skb passed
2680 * into skb_zerocopy().
2683 skb_zerocopy_headlen(const struct sk_buff
*from
)
2685 unsigned int hlen
= 0;
2687 if (!from
->head_frag
||
2688 skb_headlen(from
) < L1_CACHE_BYTES
||
2689 skb_shinfo(from
)->nr_frags
>= MAX_SKB_FRAGS
)
2690 hlen
= skb_headlen(from
);
2692 if (skb_has_frag_list(from
))
2697 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen
);
2700 * skb_zerocopy - Zero copy skb to skb
2701 * @to: destination buffer
2702 * @from: source buffer
2703 * @len: number of bytes to copy from source buffer
2704 * @hlen: size of linear headroom in destination buffer
2706 * Copies up to `len` bytes from `from` to `to` by creating references
2707 * to the frags in the source buffer.
2709 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2710 * headroom in the `to` buffer.
2713 * 0: everything is OK
2714 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2715 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2718 skb_zerocopy(struct sk_buff
*to
, struct sk_buff
*from
, int len
, int hlen
)
2721 int plen
= 0; /* length of skb->head fragment */
2724 unsigned int offset
;
2726 BUG_ON(!from
->head_frag
&& !hlen
);
2728 /* dont bother with small payloads */
2729 if (len
<= skb_tailroom(to
))
2730 return skb_copy_bits(from
, 0, skb_put(to
, len
), len
);
2733 ret
= skb_copy_bits(from
, 0, skb_put(to
, hlen
), hlen
);
2738 plen
= min_t(int, skb_headlen(from
), len
);
2740 page
= virt_to_head_page(from
->head
);
2741 offset
= from
->data
- (unsigned char *)page_address(page
);
2742 __skb_fill_page_desc(to
, 0, page
, offset
, plen
);
2749 to
->truesize
+= len
+ plen
;
2750 to
->len
+= len
+ plen
;
2751 to
->data_len
+= len
+ plen
;
2753 if (unlikely(skb_orphan_frags(from
, GFP_ATOMIC
))) {
2757 skb_zerocopy_clone(to
, from
, GFP_ATOMIC
);
2759 for (i
= 0; i
< skb_shinfo(from
)->nr_frags
; i
++) {
2762 skb_shinfo(to
)->frags
[j
] = skb_shinfo(from
)->frags
[i
];
2763 skb_shinfo(to
)->frags
[j
].size
= min_t(int, skb_shinfo(to
)->frags
[j
].size
, len
);
2764 len
-= skb_shinfo(to
)->frags
[j
].size
;
2765 skb_frag_ref(to
, j
);
2768 skb_shinfo(to
)->nr_frags
= j
;
2772 EXPORT_SYMBOL_GPL(skb_zerocopy
);
2774 void skb_copy_and_csum_dev(const struct sk_buff
*skb
, u8
*to
)
2779 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2780 csstart
= skb_checksum_start_offset(skb
);
2782 csstart
= skb_headlen(skb
);
2784 BUG_ON(csstart
> skb_headlen(skb
));
2786 skb_copy_from_linear_data(skb
, to
, csstart
);
2789 if (csstart
!= skb
->len
)
2790 csum
= skb_copy_and_csum_bits(skb
, csstart
, to
+ csstart
,
2791 skb
->len
- csstart
, 0);
2793 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2794 long csstuff
= csstart
+ skb
->csum_offset
;
2796 *((__sum16
*)(to
+ csstuff
)) = csum_fold(csum
);
2799 EXPORT_SYMBOL(skb_copy_and_csum_dev
);
2802 * skb_dequeue - remove from the head of the queue
2803 * @list: list to dequeue from
2805 * Remove the head of the list. The list lock is taken so the function
2806 * may be used safely with other locking list functions. The head item is
2807 * returned or %NULL if the list is empty.
2810 struct sk_buff
*skb_dequeue(struct sk_buff_head
*list
)
2812 unsigned long flags
;
2813 struct sk_buff
*result
;
2815 spin_lock_irqsave(&list
->lock
, flags
);
2816 result
= __skb_dequeue(list
);
2817 spin_unlock_irqrestore(&list
->lock
, flags
);
2820 EXPORT_SYMBOL(skb_dequeue
);
2823 * skb_dequeue_tail - remove from the tail of the queue
2824 * @list: list to dequeue from
2826 * Remove the tail of the list. The list lock is taken so the function
2827 * may be used safely with other locking list functions. The tail item is
2828 * returned or %NULL if the list is empty.
2830 struct sk_buff
*skb_dequeue_tail(struct sk_buff_head
*list
)
2832 unsigned long flags
;
2833 struct sk_buff
*result
;
2835 spin_lock_irqsave(&list
->lock
, flags
);
2836 result
= __skb_dequeue_tail(list
);
2837 spin_unlock_irqrestore(&list
->lock
, flags
);
2840 EXPORT_SYMBOL(skb_dequeue_tail
);
2843 * skb_queue_purge - empty a list
2844 * @list: list to empty
2846 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2847 * the list and one reference dropped. This function takes the list
2848 * lock and is atomic with respect to other list locking functions.
2850 void skb_queue_purge(struct sk_buff_head
*list
)
2852 struct sk_buff
*skb
;
2853 while ((skb
= skb_dequeue(list
)) != NULL
)
2856 EXPORT_SYMBOL(skb_queue_purge
);
2859 * skb_rbtree_purge - empty a skb rbtree
2860 * @root: root of the rbtree to empty
2862 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
2863 * the list and one reference dropped. This function does not take
2864 * any lock. Synchronization should be handled by the caller (e.g., TCP
2865 * out-of-order queue is protected by the socket lock).
2867 void skb_rbtree_purge(struct rb_root
*root
)
2869 struct rb_node
*p
= rb_first(root
);
2872 struct sk_buff
*skb
= rb_entry(p
, struct sk_buff
, rbnode
);
2875 rb_erase(&skb
->rbnode
, root
);
2881 * skb_queue_head - queue a buffer at the list head
2882 * @list: list to use
2883 * @newsk: buffer to queue
2885 * Queue a buffer at the start of the list. This function takes the
2886 * list lock and can be used safely with other locking &sk_buff functions
2889 * A buffer cannot be placed on two lists at the same time.
2891 void skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
2893 unsigned long flags
;
2895 spin_lock_irqsave(&list
->lock
, flags
);
2896 __skb_queue_head(list
, newsk
);
2897 spin_unlock_irqrestore(&list
->lock
, flags
);
2899 EXPORT_SYMBOL(skb_queue_head
);
2902 * skb_queue_tail - queue a buffer at the list tail
2903 * @list: list to use
2904 * @newsk: buffer to queue
2906 * Queue a buffer at the tail of the list. This function takes the
2907 * list lock and can be used safely with other locking &sk_buff functions
2910 * A buffer cannot be placed on two lists at the same time.
2912 void skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
2914 unsigned long flags
;
2916 spin_lock_irqsave(&list
->lock
, flags
);
2917 __skb_queue_tail(list
, newsk
);
2918 spin_unlock_irqrestore(&list
->lock
, flags
);
2920 EXPORT_SYMBOL(skb_queue_tail
);
2923 * skb_unlink - remove a buffer from a list
2924 * @skb: buffer to remove
2925 * @list: list to use
2927 * Remove a packet from a list. The list locks are taken and this
2928 * function is atomic with respect to other list locked calls
2930 * You must know what list the SKB is on.
2932 void skb_unlink(struct sk_buff
*skb
, struct sk_buff_head
*list
)
2934 unsigned long flags
;
2936 spin_lock_irqsave(&list
->lock
, flags
);
2937 __skb_unlink(skb
, list
);
2938 spin_unlock_irqrestore(&list
->lock
, flags
);
2940 EXPORT_SYMBOL(skb_unlink
);
2943 * skb_append - append a buffer
2944 * @old: buffer to insert after
2945 * @newsk: buffer to insert
2946 * @list: list to use
2948 * Place a packet after a given packet in a list. The list locks are taken
2949 * and this function is atomic with respect to other list locked calls.
2950 * A buffer cannot be placed on two lists at the same time.
2952 void skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
, struct sk_buff_head
*list
)
2954 unsigned long flags
;
2956 spin_lock_irqsave(&list
->lock
, flags
);
2957 __skb_queue_after(list
, old
, newsk
);
2958 spin_unlock_irqrestore(&list
->lock
, flags
);
2960 EXPORT_SYMBOL(skb_append
);
2963 * skb_insert - insert a buffer
2964 * @old: buffer to insert before
2965 * @newsk: buffer to insert
2966 * @list: list to use
2968 * Place a packet before a given packet in a list. The list locks are
2969 * taken and this function is atomic with respect to other list locked
2972 * A buffer cannot be placed on two lists at the same time.
2974 void skb_insert(struct sk_buff
*old
, struct sk_buff
*newsk
, struct sk_buff_head
*list
)
2976 unsigned long flags
;
2978 spin_lock_irqsave(&list
->lock
, flags
);
2979 __skb_insert(newsk
, old
->prev
, old
, list
);
2980 spin_unlock_irqrestore(&list
->lock
, flags
);
2982 EXPORT_SYMBOL(skb_insert
);
2984 static inline void skb_split_inside_header(struct sk_buff
*skb
,
2985 struct sk_buff
* skb1
,
2986 const u32 len
, const int pos
)
2990 skb_copy_from_linear_data_offset(skb
, len
, skb_put(skb1
, pos
- len
),
2992 /* And move data appendix as is. */
2993 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
2994 skb_shinfo(skb1
)->frags
[i
] = skb_shinfo(skb
)->frags
[i
];
2996 skb_shinfo(skb1
)->nr_frags
= skb_shinfo(skb
)->nr_frags
;
2997 skb_shinfo(skb
)->nr_frags
= 0;
2998 skb1
->data_len
= skb
->data_len
;
2999 skb1
->len
+= skb1
->data_len
;
3002 skb_set_tail_pointer(skb
, len
);
3005 static inline void skb_split_no_header(struct sk_buff
*skb
,
3006 struct sk_buff
* skb1
,
3007 const u32 len
, int pos
)
3010 const int nfrags
= skb_shinfo(skb
)->nr_frags
;
3012 skb_shinfo(skb
)->nr_frags
= 0;
3013 skb1
->len
= skb1
->data_len
= skb
->len
- len
;
3015 skb
->data_len
= len
- pos
;
3017 for (i
= 0; i
< nfrags
; i
++) {
3018 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
3020 if (pos
+ size
> len
) {
3021 skb_shinfo(skb1
)->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
3025 * We have two variants in this case:
3026 * 1. Move all the frag to the second
3027 * part, if it is possible. F.e.
3028 * this approach is mandatory for TUX,
3029 * where splitting is expensive.
3030 * 2. Split is accurately. We make this.
3032 skb_frag_ref(skb
, i
);
3033 skb_shinfo(skb1
)->frags
[0].page_offset
+= len
- pos
;
3034 skb_frag_size_sub(&skb_shinfo(skb1
)->frags
[0], len
- pos
);
3035 skb_frag_size_set(&skb_shinfo(skb
)->frags
[i
], len
- pos
);
3036 skb_shinfo(skb
)->nr_frags
++;
3040 skb_shinfo(skb
)->nr_frags
++;
3043 skb_shinfo(skb1
)->nr_frags
= k
;
3047 * skb_split - Split fragmented skb to two parts at length len.
3048 * @skb: the buffer to split
3049 * @skb1: the buffer to receive the second part
3050 * @len: new length for skb
3052 void skb_split(struct sk_buff
*skb
, struct sk_buff
*skb1
, const u32 len
)
3054 int pos
= skb_headlen(skb
);
3056 skb_shinfo(skb1
)->tx_flags
|= skb_shinfo(skb
)->tx_flags
&
3058 skb_zerocopy_clone(skb1
, skb
, 0);
3059 if (len
< pos
) /* Split line is inside header. */
3060 skb_split_inside_header(skb
, skb1
, len
, pos
);
3061 else /* Second chunk has no header, nothing to copy. */
3062 skb_split_no_header(skb
, skb1
, len
, pos
);
3064 EXPORT_SYMBOL(skb_split
);
3066 /* Shifting from/to a cloned skb is a no-go.
3068 * Caller cannot keep skb_shinfo related pointers past calling here!
3070 static int skb_prepare_for_shift(struct sk_buff
*skb
)
3072 return skb_cloned(skb
) && pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3076 * skb_shift - Shifts paged data partially from skb to another
3077 * @tgt: buffer into which tail data gets added
3078 * @skb: buffer from which the paged data comes from
3079 * @shiftlen: shift up to this many bytes
3081 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3082 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3083 * It's up to caller to free skb if everything was shifted.
3085 * If @tgt runs out of frags, the whole operation is aborted.
3087 * Skb cannot include anything else but paged data while tgt is allowed
3088 * to have non-paged data as well.
3090 * TODO: full sized shift could be optimized but that would need
3091 * specialized skb free'er to handle frags without up-to-date nr_frags.
3093 int skb_shift(struct sk_buff
*tgt
, struct sk_buff
*skb
, int shiftlen
)
3095 int from
, to
, merge
, todo
;
3096 struct skb_frag_struct
*fragfrom
, *fragto
;
3098 BUG_ON(shiftlen
> skb
->len
);
3100 if (skb_headlen(skb
))
3102 if (skb_zcopy(tgt
) || skb_zcopy(skb
))
3107 to
= skb_shinfo(tgt
)->nr_frags
;
3108 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
3110 /* Actual merge is delayed until the point when we know we can
3111 * commit all, so that we don't have to undo partial changes
3114 !skb_can_coalesce(tgt
, to
, skb_frag_page(fragfrom
),
3115 fragfrom
->page_offset
)) {
3120 todo
-= skb_frag_size(fragfrom
);
3122 if (skb_prepare_for_shift(skb
) ||
3123 skb_prepare_for_shift(tgt
))
3126 /* All previous frag pointers might be stale! */
3127 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
3128 fragto
= &skb_shinfo(tgt
)->frags
[merge
];
3130 skb_frag_size_add(fragto
, shiftlen
);
3131 skb_frag_size_sub(fragfrom
, shiftlen
);
3132 fragfrom
->page_offset
+= shiftlen
;
3140 /* Skip full, not-fitting skb to avoid expensive operations */
3141 if ((shiftlen
== skb
->len
) &&
3142 (skb_shinfo(skb
)->nr_frags
- from
) > (MAX_SKB_FRAGS
- to
))
3145 if (skb_prepare_for_shift(skb
) || skb_prepare_for_shift(tgt
))
3148 while ((todo
> 0) && (from
< skb_shinfo(skb
)->nr_frags
)) {
3149 if (to
== MAX_SKB_FRAGS
)
3152 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
3153 fragto
= &skb_shinfo(tgt
)->frags
[to
];
3155 if (todo
>= skb_frag_size(fragfrom
)) {
3156 *fragto
= *fragfrom
;
3157 todo
-= skb_frag_size(fragfrom
);
3162 __skb_frag_ref(fragfrom
);
3163 fragto
->page
= fragfrom
->page
;
3164 fragto
->page_offset
= fragfrom
->page_offset
;
3165 skb_frag_size_set(fragto
, todo
);
3167 fragfrom
->page_offset
+= todo
;
3168 skb_frag_size_sub(fragfrom
, todo
);
3176 /* Ready to "commit" this state change to tgt */
3177 skb_shinfo(tgt
)->nr_frags
= to
;
3180 fragfrom
= &skb_shinfo(skb
)->frags
[0];
3181 fragto
= &skb_shinfo(tgt
)->frags
[merge
];
3183 skb_frag_size_add(fragto
, skb_frag_size(fragfrom
));
3184 __skb_frag_unref(fragfrom
);
3187 /* Reposition in the original skb */
3189 while (from
< skb_shinfo(skb
)->nr_frags
)
3190 skb_shinfo(skb
)->frags
[to
++] = skb_shinfo(skb
)->frags
[from
++];
3191 skb_shinfo(skb
)->nr_frags
= to
;
3193 BUG_ON(todo
> 0 && !skb_shinfo(skb
)->nr_frags
);
3196 /* Most likely the tgt won't ever need its checksum anymore, skb on
3197 * the other hand might need it if it needs to be resent
3199 tgt
->ip_summed
= CHECKSUM_PARTIAL
;
3200 skb
->ip_summed
= CHECKSUM_PARTIAL
;
3202 /* Yak, is it really working this way? Some helper please? */
3203 skb
->len
-= shiftlen
;
3204 skb
->data_len
-= shiftlen
;
3205 skb
->truesize
-= shiftlen
;
3206 tgt
->len
+= shiftlen
;
3207 tgt
->data_len
+= shiftlen
;
3208 tgt
->truesize
+= shiftlen
;
3214 * skb_prepare_seq_read - Prepare a sequential read of skb data
3215 * @skb: the buffer to read
3216 * @from: lower offset of data to be read
3217 * @to: upper offset of data to be read
3218 * @st: state variable
3220 * Initializes the specified state variable. Must be called before
3221 * invoking skb_seq_read() for the first time.
3223 void skb_prepare_seq_read(struct sk_buff
*skb
, unsigned int from
,
3224 unsigned int to
, struct skb_seq_state
*st
)
3226 st
->lower_offset
= from
;
3227 st
->upper_offset
= to
;
3228 st
->root_skb
= st
->cur_skb
= skb
;
3229 st
->frag_idx
= st
->stepped_offset
= 0;
3230 st
->frag_data
= NULL
;
3232 EXPORT_SYMBOL(skb_prepare_seq_read
);
3235 * skb_seq_read - Sequentially read skb data
3236 * @consumed: number of bytes consumed by the caller so far
3237 * @data: destination pointer for data to be returned
3238 * @st: state variable
3240 * Reads a block of skb data at @consumed relative to the
3241 * lower offset specified to skb_prepare_seq_read(). Assigns
3242 * the head of the data block to @data and returns the length
3243 * of the block or 0 if the end of the skb data or the upper
3244 * offset has been reached.
3246 * The caller is not required to consume all of the data
3247 * returned, i.e. @consumed is typically set to the number
3248 * of bytes already consumed and the next call to
3249 * skb_seq_read() will return the remaining part of the block.
3251 * Note 1: The size of each block of data returned can be arbitrary,
3252 * this limitation is the cost for zerocopy sequential
3253 * reads of potentially non linear data.
3255 * Note 2: Fragment lists within fragments are not implemented
3256 * at the moment, state->root_skb could be replaced with
3257 * a stack for this purpose.
3259 unsigned int skb_seq_read(unsigned int consumed
, const u8
**data
,
3260 struct skb_seq_state
*st
)
3262 unsigned int block_limit
, abs_offset
= consumed
+ st
->lower_offset
;
3265 if (unlikely(abs_offset
>= st
->upper_offset
)) {
3266 if (st
->frag_data
) {
3267 kunmap_atomic(st
->frag_data
);
3268 st
->frag_data
= NULL
;
3274 block_limit
= skb_headlen(st
->cur_skb
) + st
->stepped_offset
;
3276 if (abs_offset
< block_limit
&& !st
->frag_data
) {
3277 *data
= st
->cur_skb
->data
+ (abs_offset
- st
->stepped_offset
);
3278 return block_limit
- abs_offset
;
3281 if (st
->frag_idx
== 0 && !st
->frag_data
)
3282 st
->stepped_offset
+= skb_headlen(st
->cur_skb
);
3284 while (st
->frag_idx
< skb_shinfo(st
->cur_skb
)->nr_frags
) {
3285 frag
= &skb_shinfo(st
->cur_skb
)->frags
[st
->frag_idx
];
3286 block_limit
= skb_frag_size(frag
) + st
->stepped_offset
;
3288 if (abs_offset
< block_limit
) {
3290 st
->frag_data
= kmap_atomic(skb_frag_page(frag
));
3292 *data
= (u8
*) st
->frag_data
+ frag
->page_offset
+
3293 (abs_offset
- st
->stepped_offset
);
3295 return block_limit
- abs_offset
;
3298 if (st
->frag_data
) {
3299 kunmap_atomic(st
->frag_data
);
3300 st
->frag_data
= NULL
;
3304 st
->stepped_offset
+= skb_frag_size(frag
);
3307 if (st
->frag_data
) {
3308 kunmap_atomic(st
->frag_data
);
3309 st
->frag_data
= NULL
;
3312 if (st
->root_skb
== st
->cur_skb
&& skb_has_frag_list(st
->root_skb
)) {
3313 st
->cur_skb
= skb_shinfo(st
->root_skb
)->frag_list
;
3316 } else if (st
->cur_skb
->next
) {
3317 st
->cur_skb
= st
->cur_skb
->next
;
3324 EXPORT_SYMBOL(skb_seq_read
);
3327 * skb_abort_seq_read - Abort a sequential read of skb data
3328 * @st: state variable
3330 * Must be called if skb_seq_read() was not called until it
3333 void skb_abort_seq_read(struct skb_seq_state
*st
)
3336 kunmap_atomic(st
->frag_data
);
3338 EXPORT_SYMBOL(skb_abort_seq_read
);
3340 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3342 static unsigned int skb_ts_get_next_block(unsigned int offset
, const u8
**text
,
3343 struct ts_config
*conf
,
3344 struct ts_state
*state
)
3346 return skb_seq_read(offset
, text
, TS_SKB_CB(state
));
3349 static void skb_ts_finish(struct ts_config
*conf
, struct ts_state
*state
)
3351 skb_abort_seq_read(TS_SKB_CB(state
));
3355 * skb_find_text - Find a text pattern in skb data
3356 * @skb: the buffer to look in
3357 * @from: search offset
3359 * @config: textsearch configuration
3361 * Finds a pattern in the skb data according to the specified
3362 * textsearch configuration. Use textsearch_next() to retrieve
3363 * subsequent occurrences of the pattern. Returns the offset
3364 * to the first occurrence or UINT_MAX if no match was found.
3366 unsigned int skb_find_text(struct sk_buff
*skb
, unsigned int from
,
3367 unsigned int to
, struct ts_config
*config
)
3369 struct ts_state state
;
3372 config
->get_next_block
= skb_ts_get_next_block
;
3373 config
->finish
= skb_ts_finish
;
3375 skb_prepare_seq_read(skb
, from
, to
, TS_SKB_CB(&state
));
3377 ret
= textsearch_find(config
, &state
);
3378 return (ret
<= to
- from
? ret
: UINT_MAX
);
3380 EXPORT_SYMBOL(skb_find_text
);
3383 * skb_append_datato_frags - append the user data to a skb
3384 * @sk: sock structure
3385 * @skb: skb structure to be appended with user data.
3386 * @getfrag: call back function to be used for getting the user data
3387 * @from: pointer to user message iov
3388 * @length: length of the iov message
3390 * Description: This procedure append the user data in the fragment part
3391 * of the skb if any page alloc fails user this procedure returns -ENOMEM
3393 int skb_append_datato_frags(struct sock
*sk
, struct sk_buff
*skb
,
3394 int (*getfrag
)(void *from
, char *to
, int offset
,
3395 int len
, int odd
, struct sk_buff
*skb
),
3396 void *from
, int length
)
3398 int frg_cnt
= skb_shinfo(skb
)->nr_frags
;
3402 struct page_frag
*pfrag
= ¤t
->task_frag
;
3405 /* Return error if we don't have space for new frag */
3406 if (frg_cnt
>= MAX_SKB_FRAGS
)
3409 if (!sk_page_frag_refill(sk
, pfrag
))
3412 /* copy the user data to page */
3413 copy
= min_t(int, length
, pfrag
->size
- pfrag
->offset
);
3415 ret
= getfrag(from
, page_address(pfrag
->page
) + pfrag
->offset
,
3416 offset
, copy
, 0, skb
);
3420 /* copy was successful so update the size parameters */
3421 skb_fill_page_desc(skb
, frg_cnt
, pfrag
->page
, pfrag
->offset
,
3424 pfrag
->offset
+= copy
;
3425 get_page(pfrag
->page
);
3427 skb
->truesize
+= copy
;
3428 refcount_add(copy
, &sk
->sk_wmem_alloc
);
3430 skb
->data_len
+= copy
;
3434 } while (length
> 0);
3438 EXPORT_SYMBOL(skb_append_datato_frags
);
3440 int skb_append_pagefrags(struct sk_buff
*skb
, struct page
*page
,
3441 int offset
, size_t size
)
3443 int i
= skb_shinfo(skb
)->nr_frags
;
3445 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
3446 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], size
);
3447 } else if (i
< MAX_SKB_FRAGS
) {
3449 skb_fill_page_desc(skb
, i
, page
, offset
, size
);
3456 EXPORT_SYMBOL_GPL(skb_append_pagefrags
);
3459 * skb_pull_rcsum - pull skb and update receive checksum
3460 * @skb: buffer to update
3461 * @len: length of data pulled
3463 * This function performs an skb_pull on the packet and updates
3464 * the CHECKSUM_COMPLETE checksum. It should be used on
3465 * receive path processing instead of skb_pull unless you know
3466 * that the checksum difference is zero (e.g., a valid IP header)
3467 * or you are setting ip_summed to CHECKSUM_NONE.
3469 void *skb_pull_rcsum(struct sk_buff
*skb
, unsigned int len
)
3471 unsigned char *data
= skb
->data
;
3473 BUG_ON(len
> skb
->len
);
3474 __skb_pull(skb
, len
);
3475 skb_postpull_rcsum(skb
, data
, len
);
3478 EXPORT_SYMBOL_GPL(skb_pull_rcsum
);
3480 static inline skb_frag_t
skb_head_frag_to_page_desc(struct sk_buff
*frag_skb
)
3482 skb_frag_t head_frag
;
3485 page
= virt_to_head_page(frag_skb
->head
);
3486 head_frag
.page
.p
= page
;
3487 head_frag
.page_offset
= frag_skb
->data
-
3488 (unsigned char *)page_address(page
);
3489 head_frag
.size
= skb_headlen(frag_skb
);
3494 * skb_segment - Perform protocol segmentation on skb.
3495 * @head_skb: buffer to segment
3496 * @features: features for the output path (see dev->features)
3498 * This function performs segmentation on the given skb. It returns
3499 * a pointer to the first in a list of new skbs for the segments.
3500 * In case of error it returns ERR_PTR(err).
3502 struct sk_buff
*skb_segment(struct sk_buff
*head_skb
,
3503 netdev_features_t features
)
3505 struct sk_buff
*segs
= NULL
;
3506 struct sk_buff
*tail
= NULL
;
3507 struct sk_buff
*list_skb
= skb_shinfo(head_skb
)->frag_list
;
3508 skb_frag_t
*frag
= skb_shinfo(head_skb
)->frags
;
3509 unsigned int mss
= skb_shinfo(head_skb
)->gso_size
;
3510 unsigned int doffset
= head_skb
->data
- skb_mac_header(head_skb
);
3511 struct sk_buff
*frag_skb
= head_skb
;
3512 unsigned int offset
= doffset
;
3513 unsigned int tnl_hlen
= skb_tnl_header_len(head_skb
);
3514 unsigned int partial_segs
= 0;
3515 unsigned int headroom
;
3516 unsigned int len
= head_skb
->len
;
3519 int nfrags
= skb_shinfo(head_skb
)->nr_frags
;
3525 __skb_push(head_skb
, doffset
);
3526 proto
= skb_network_protocol(head_skb
, &dummy
);
3527 if (unlikely(!proto
))
3528 return ERR_PTR(-EINVAL
);
3530 sg
= !!(features
& NETIF_F_SG
);
3531 csum
= !!can_checksum_protocol(features
, proto
);
3533 if (sg
&& csum
&& (mss
!= GSO_BY_FRAGS
)) {
3534 if (!(features
& NETIF_F_GSO_PARTIAL
)) {
3535 struct sk_buff
*iter
;
3536 unsigned int frag_len
;
3539 !net_gso_ok(features
, skb_shinfo(head_skb
)->gso_type
))
3542 /* If we get here then all the required
3543 * GSO features except frag_list are supported.
3544 * Try to split the SKB to multiple GSO SKBs
3545 * with no frag_list.
3546 * Currently we can do that only when the buffers don't
3547 * have a linear part and all the buffers except
3548 * the last are of the same length.
3550 frag_len
= list_skb
->len
;
3551 skb_walk_frags(head_skb
, iter
) {
3552 if (frag_len
!= iter
->len
&& iter
->next
)
3554 if (skb_headlen(iter
) && !iter
->head_frag
)
3560 if (len
!= frag_len
)
3564 /* GSO partial only requires that we trim off any excess that
3565 * doesn't fit into an MSS sized block, so take care of that
3568 partial_segs
= len
/ mss
;
3569 if (partial_segs
> 1)
3570 mss
*= partial_segs
;
3576 headroom
= skb_headroom(head_skb
);
3577 pos
= skb_headlen(head_skb
);
3580 struct sk_buff
*nskb
;
3581 skb_frag_t
*nskb_frag
;
3585 if (unlikely(mss
== GSO_BY_FRAGS
)) {
3586 len
= list_skb
->len
;
3588 len
= head_skb
->len
- offset
;
3593 hsize
= skb_headlen(head_skb
) - offset
;
3596 if (hsize
> len
|| !sg
)
3599 if (!hsize
&& i
>= nfrags
&& skb_headlen(list_skb
) &&
3600 (skb_headlen(list_skb
) == len
|| sg
)) {
3601 BUG_ON(skb_headlen(list_skb
) > len
);
3604 nfrags
= skb_shinfo(list_skb
)->nr_frags
;
3605 frag
= skb_shinfo(list_skb
)->frags
;
3606 frag_skb
= list_skb
;
3607 pos
+= skb_headlen(list_skb
);
3609 while (pos
< offset
+ len
) {
3610 BUG_ON(i
>= nfrags
);
3612 size
= skb_frag_size(frag
);
3613 if (pos
+ size
> offset
+ len
)
3621 nskb
= skb_clone(list_skb
, GFP_ATOMIC
);
3622 list_skb
= list_skb
->next
;
3624 if (unlikely(!nskb
))
3627 if (unlikely(pskb_trim(nskb
, len
))) {
3632 hsize
= skb_end_offset(nskb
);
3633 if (skb_cow_head(nskb
, doffset
+ headroom
)) {
3638 nskb
->truesize
+= skb_end_offset(nskb
) - hsize
;
3639 skb_release_head_state(nskb
);
3640 __skb_push(nskb
, doffset
);
3642 nskb
= __alloc_skb(hsize
+ doffset
+ headroom
,
3643 GFP_ATOMIC
, skb_alloc_rx_flag(head_skb
),
3646 if (unlikely(!nskb
))
3649 skb_reserve(nskb
, headroom
);
3650 __skb_put(nskb
, doffset
);
3659 __copy_skb_header(nskb
, head_skb
);
3661 skb_headers_offset_update(nskb
, skb_headroom(nskb
) - headroom
);
3662 skb_reset_mac_len(nskb
);
3664 skb_copy_from_linear_data_offset(head_skb
, -tnl_hlen
,
3665 nskb
->data
- tnl_hlen
,
3666 doffset
+ tnl_hlen
);
3668 if (nskb
->len
== len
+ doffset
)
3669 goto perform_csum_check
;
3672 if (!nskb
->remcsum_offload
)
3673 nskb
->ip_summed
= CHECKSUM_NONE
;
3674 SKB_GSO_CB(nskb
)->csum
=
3675 skb_copy_and_csum_bits(head_skb
, offset
,
3678 SKB_GSO_CB(nskb
)->csum_start
=
3679 skb_headroom(nskb
) + doffset
;
3683 nskb_frag
= skb_shinfo(nskb
)->frags
;
3685 skb_copy_from_linear_data_offset(head_skb
, offset
,
3686 skb_put(nskb
, hsize
), hsize
);
3688 skb_shinfo(nskb
)->tx_flags
|= skb_shinfo(head_skb
)->tx_flags
&
3691 if (skb_orphan_frags(frag_skb
, GFP_ATOMIC
) ||
3692 skb_zerocopy_clone(nskb
, frag_skb
, GFP_ATOMIC
))
3695 while (pos
< offset
+ len
) {
3698 nfrags
= skb_shinfo(list_skb
)->nr_frags
;
3699 frag
= skb_shinfo(list_skb
)->frags
;
3700 frag_skb
= list_skb
;
3701 if (!skb_headlen(list_skb
)) {
3704 BUG_ON(!list_skb
->head_frag
);
3706 /* to make room for head_frag. */
3710 if (skb_orphan_frags(frag_skb
, GFP_ATOMIC
) ||
3711 skb_zerocopy_clone(nskb
, frag_skb
,
3715 list_skb
= list_skb
->next
;
3718 if (unlikely(skb_shinfo(nskb
)->nr_frags
>=
3720 net_warn_ratelimited(
3721 "skb_segment: too many frags: %u %u\n",
3726 *nskb_frag
= (i
< 0) ? skb_head_frag_to_page_desc(frag_skb
) : *frag
;
3727 __skb_frag_ref(nskb_frag
);
3728 size
= skb_frag_size(nskb_frag
);
3731 nskb_frag
->page_offset
+= offset
- pos
;
3732 skb_frag_size_sub(nskb_frag
, offset
- pos
);
3735 skb_shinfo(nskb
)->nr_frags
++;
3737 if (pos
+ size
<= offset
+ len
) {
3742 skb_frag_size_sub(nskb_frag
, pos
+ size
- (offset
+ len
));
3750 nskb
->data_len
= len
- hsize
;
3751 nskb
->len
+= nskb
->data_len
;
3752 nskb
->truesize
+= nskb
->data_len
;
3756 if (skb_has_shared_frag(nskb
)) {
3757 err
= __skb_linearize(nskb
);
3761 if (!nskb
->remcsum_offload
)
3762 nskb
->ip_summed
= CHECKSUM_NONE
;
3763 SKB_GSO_CB(nskb
)->csum
=
3764 skb_checksum(nskb
, doffset
,
3765 nskb
->len
- doffset
, 0);
3766 SKB_GSO_CB(nskb
)->csum_start
=
3767 skb_headroom(nskb
) + doffset
;
3769 } while ((offset
+= len
) < head_skb
->len
);
3771 /* Some callers want to get the end of the list.
3772 * Put it in segs->prev to avoid walking the list.
3773 * (see validate_xmit_skb_list() for example)
3778 struct sk_buff
*iter
;
3779 int type
= skb_shinfo(head_skb
)->gso_type
;
3780 unsigned short gso_size
= skb_shinfo(head_skb
)->gso_size
;
3782 /* Update type to add partial and then remove dodgy if set */
3783 type
|= (features
& NETIF_F_GSO_PARTIAL
) / NETIF_F_GSO_PARTIAL
* SKB_GSO_PARTIAL
;
3784 type
&= ~SKB_GSO_DODGY
;
3786 /* Update GSO info and prepare to start updating headers on
3787 * our way back down the stack of protocols.
3789 for (iter
= segs
; iter
; iter
= iter
->next
) {
3790 skb_shinfo(iter
)->gso_size
= gso_size
;
3791 skb_shinfo(iter
)->gso_segs
= partial_segs
;
3792 skb_shinfo(iter
)->gso_type
= type
;
3793 SKB_GSO_CB(iter
)->data_offset
= skb_headroom(iter
) + doffset
;
3796 if (tail
->len
- doffset
<= gso_size
)
3797 skb_shinfo(tail
)->gso_size
= 0;
3798 else if (tail
!= segs
)
3799 skb_shinfo(tail
)->gso_segs
= DIV_ROUND_UP(tail
->len
- doffset
, gso_size
);
3802 /* Following permits correct backpressure, for protocols
3803 * using skb_set_owner_w().
3804 * Idea is to tranfert ownership from head_skb to last segment.
3806 if (head_skb
->destructor
== sock_wfree
) {
3807 swap(tail
->truesize
, head_skb
->truesize
);
3808 swap(tail
->destructor
, head_skb
->destructor
);
3809 swap(tail
->sk
, head_skb
->sk
);
3814 kfree_skb_list(segs
);
3815 return ERR_PTR(err
);
3817 EXPORT_SYMBOL_GPL(skb_segment
);
3819 int skb_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
3821 struct skb_shared_info
*pinfo
, *skbinfo
= skb_shinfo(skb
);
3822 unsigned int offset
= skb_gro_offset(skb
);
3823 unsigned int headlen
= skb_headlen(skb
);
3824 unsigned int len
= skb_gro_len(skb
);
3825 struct sk_buff
*lp
, *p
= *head
;
3826 unsigned int delta_truesize
;
3828 if (unlikely(p
->len
+ len
>= 65536))
3831 lp
= NAPI_GRO_CB(p
)->last
;
3832 pinfo
= skb_shinfo(lp
);
3834 if (headlen
<= offset
) {
3837 int i
= skbinfo
->nr_frags
;
3838 int nr_frags
= pinfo
->nr_frags
+ i
;
3840 if (nr_frags
> MAX_SKB_FRAGS
)
3844 pinfo
->nr_frags
= nr_frags
;
3845 skbinfo
->nr_frags
= 0;
3847 frag
= pinfo
->frags
+ nr_frags
;
3848 frag2
= skbinfo
->frags
+ i
;
3853 frag
->page_offset
+= offset
;
3854 skb_frag_size_sub(frag
, offset
);
3856 /* all fragments truesize : remove (head size + sk_buff) */
3857 delta_truesize
= skb
->truesize
-
3858 SKB_TRUESIZE(skb_end_offset(skb
));
3860 skb
->truesize
-= skb
->data_len
;
3861 skb
->len
-= skb
->data_len
;
3864 NAPI_GRO_CB(skb
)->free
= NAPI_GRO_FREE
;
3866 } else if (skb
->head_frag
) {
3867 int nr_frags
= pinfo
->nr_frags
;
3868 skb_frag_t
*frag
= pinfo
->frags
+ nr_frags
;
3869 struct page
*page
= virt_to_head_page(skb
->head
);
3870 unsigned int first_size
= headlen
- offset
;
3871 unsigned int first_offset
;
3873 if (nr_frags
+ 1 + skbinfo
->nr_frags
> MAX_SKB_FRAGS
)
3876 first_offset
= skb
->data
-
3877 (unsigned char *)page_address(page
) +
3880 pinfo
->nr_frags
= nr_frags
+ 1 + skbinfo
->nr_frags
;
3882 frag
->page
.p
= page
;
3883 frag
->page_offset
= first_offset
;
3884 skb_frag_size_set(frag
, first_size
);
3886 memcpy(frag
+ 1, skbinfo
->frags
, sizeof(*frag
) * skbinfo
->nr_frags
);
3887 /* We dont need to clear skbinfo->nr_frags here */
3889 delta_truesize
= skb
->truesize
- SKB_DATA_ALIGN(sizeof(struct sk_buff
));
3890 NAPI_GRO_CB(skb
)->free
= NAPI_GRO_FREE_STOLEN_HEAD
;
3895 delta_truesize
= skb
->truesize
;
3896 if (offset
> headlen
) {
3897 unsigned int eat
= offset
- headlen
;
3899 skbinfo
->frags
[0].page_offset
+= eat
;
3900 skb_frag_size_sub(&skbinfo
->frags
[0], eat
);
3901 skb
->data_len
-= eat
;
3906 __skb_pull(skb
, offset
);
3908 if (NAPI_GRO_CB(p
)->last
== p
)
3909 skb_shinfo(p
)->frag_list
= skb
;
3911 NAPI_GRO_CB(p
)->last
->next
= skb
;
3912 NAPI_GRO_CB(p
)->last
= skb
;
3913 __skb_header_release(skb
);
3917 NAPI_GRO_CB(p
)->count
++;
3919 p
->truesize
+= delta_truesize
;
3922 lp
->data_len
+= len
;
3923 lp
->truesize
+= delta_truesize
;
3926 NAPI_GRO_CB(skb
)->same_flow
= 1;
3929 EXPORT_SYMBOL_GPL(skb_gro_receive
);
3931 void __init
skb_init(void)
3933 skbuff_head_cache
= kmem_cache_create_usercopy("skbuff_head_cache",
3934 sizeof(struct sk_buff
),
3936 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
3937 offsetof(struct sk_buff
, cb
),
3938 sizeof_field(struct sk_buff
, cb
),
3940 skbuff_fclone_cache
= kmem_cache_create("skbuff_fclone_cache",
3941 sizeof(struct sk_buff_fclones
),
3943 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
3948 __skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
,
3949 unsigned int recursion_level
)
3951 int start
= skb_headlen(skb
);
3952 int i
, copy
= start
- offset
;
3953 struct sk_buff
*frag_iter
;
3956 if (unlikely(recursion_level
>= 24))
3962 sg_set_buf(sg
, skb
->data
+ offset
, copy
);
3964 if ((len
-= copy
) == 0)
3969 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3972 WARN_ON(start
> offset
+ len
);
3974 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
3975 if ((copy
= end
- offset
) > 0) {
3976 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3977 if (unlikely(elt
&& sg_is_last(&sg
[elt
- 1])))
3982 sg_set_page(&sg
[elt
], skb_frag_page(frag
), copy
,
3983 frag
->page_offset
+offset
-start
);
3992 skb_walk_frags(skb
, frag_iter
) {
3995 WARN_ON(start
> offset
+ len
);
3997 end
= start
+ frag_iter
->len
;
3998 if ((copy
= end
- offset
) > 0) {
3999 if (unlikely(elt
&& sg_is_last(&sg
[elt
- 1])))
4004 ret
= __skb_to_sgvec(frag_iter
, sg
+elt
, offset
- start
,
4005 copy
, recursion_level
+ 1);
4006 if (unlikely(ret
< 0))
4009 if ((len
-= copy
) == 0)
4020 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4021 * @skb: Socket buffer containing the buffers to be mapped
4022 * @sg: The scatter-gather list to map into
4023 * @offset: The offset into the buffer's contents to start mapping
4024 * @len: Length of buffer space to be mapped
4026 * Fill the specified scatter-gather list with mappings/pointers into a
4027 * region of the buffer space attached to a socket buffer. Returns either
4028 * the number of scatterlist items used, or -EMSGSIZE if the contents
4031 int skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
)
4033 int nsg
= __skb_to_sgvec(skb
, sg
, offset
, len
, 0);
4038 sg_mark_end(&sg
[nsg
- 1]);
4042 EXPORT_SYMBOL_GPL(skb_to_sgvec
);
4044 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4045 * sglist without mark the sg which contain last skb data as the end.
4046 * So the caller can mannipulate sg list as will when padding new data after
4047 * the first call without calling sg_unmark_end to expend sg list.
4049 * Scenario to use skb_to_sgvec_nomark:
4051 * 2. skb_to_sgvec_nomark(payload1)
4052 * 3. skb_to_sgvec_nomark(payload2)
4054 * This is equivalent to:
4056 * 2. skb_to_sgvec(payload1)
4058 * 4. skb_to_sgvec(payload2)
4060 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4061 * is more preferable.
4063 int skb_to_sgvec_nomark(struct sk_buff
*skb
, struct scatterlist
*sg
,
4064 int offset
, int len
)
4066 return __skb_to_sgvec(skb
, sg
, offset
, len
, 0);
4068 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark
);
4073 * skb_cow_data - Check that a socket buffer's data buffers are writable
4074 * @skb: The socket buffer to check.
4075 * @tailbits: Amount of trailing space to be added
4076 * @trailer: Returned pointer to the skb where the @tailbits space begins
4078 * Make sure that the data buffers attached to a socket buffer are
4079 * writable. If they are not, private copies are made of the data buffers
4080 * and the socket buffer is set to use these instead.
4082 * If @tailbits is given, make sure that there is space to write @tailbits
4083 * bytes of data beyond current end of socket buffer. @trailer will be
4084 * set to point to the skb in which this space begins.
4086 * The number of scatterlist elements required to completely map the
4087 * COW'd and extended socket buffer will be returned.
4089 int skb_cow_data(struct sk_buff
*skb
, int tailbits
, struct sk_buff
**trailer
)
4093 struct sk_buff
*skb1
, **skb_p
;
4095 /* If skb is cloned or its head is paged, reallocate
4096 * head pulling out all the pages (pages are considered not writable
4097 * at the moment even if they are anonymous).
4099 if ((skb_cloned(skb
) || skb_shinfo(skb
)->nr_frags
) &&
4100 __pskb_pull_tail(skb
, skb_pagelen(skb
)-skb_headlen(skb
)) == NULL
)
4103 /* Easy case. Most of packets will go this way. */
4104 if (!skb_has_frag_list(skb
)) {
4105 /* A little of trouble, not enough of space for trailer.
4106 * This should not happen, when stack is tuned to generate
4107 * good frames. OK, on miss we reallocate and reserve even more
4108 * space, 128 bytes is fair. */
4110 if (skb_tailroom(skb
) < tailbits
&&
4111 pskb_expand_head(skb
, 0, tailbits
-skb_tailroom(skb
)+128, GFP_ATOMIC
))
4119 /* Misery. We are in troubles, going to mincer fragments... */
4122 skb_p
= &skb_shinfo(skb
)->frag_list
;
4125 while ((skb1
= *skb_p
) != NULL
) {
4128 /* The fragment is partially pulled by someone,
4129 * this can happen on input. Copy it and everything
4132 if (skb_shared(skb1
))
4135 /* If the skb is the last, worry about trailer. */
4137 if (skb1
->next
== NULL
&& tailbits
) {
4138 if (skb_shinfo(skb1
)->nr_frags
||
4139 skb_has_frag_list(skb1
) ||
4140 skb_tailroom(skb1
) < tailbits
)
4141 ntail
= tailbits
+ 128;
4147 skb_shinfo(skb1
)->nr_frags
||
4148 skb_has_frag_list(skb1
)) {
4149 struct sk_buff
*skb2
;
4151 /* Fuck, we are miserable poor guys... */
4153 skb2
= skb_copy(skb1
, GFP_ATOMIC
);
4155 skb2
= skb_copy_expand(skb1
,
4159 if (unlikely(skb2
== NULL
))
4163 skb_set_owner_w(skb2
, skb1
->sk
);
4165 /* Looking around. Are we still alive?
4166 * OK, link new skb, drop old one */
4168 skb2
->next
= skb1
->next
;
4175 skb_p
= &skb1
->next
;
4180 EXPORT_SYMBOL_GPL(skb_cow_data
);
4182 static void sock_rmem_free(struct sk_buff
*skb
)
4184 struct sock
*sk
= skb
->sk
;
4186 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
4189 static void skb_set_err_queue(struct sk_buff
*skb
)
4191 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4192 * So, it is safe to (mis)use it to mark skbs on the error queue.
4194 skb
->pkt_type
= PACKET_OUTGOING
;
4195 BUILD_BUG_ON(PACKET_OUTGOING
== 0);
4199 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4201 int sock_queue_err_skb(struct sock
*sk
, struct sk_buff
*skb
)
4203 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
4204 (unsigned int)sk
->sk_rcvbuf
)
4209 skb
->destructor
= sock_rmem_free
;
4210 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
4211 skb_set_err_queue(skb
);
4213 /* before exiting rcu section, make sure dst is refcounted */
4216 skb_queue_tail(&sk
->sk_error_queue
, skb
);
4217 if (!sock_flag(sk
, SOCK_DEAD
))
4218 sk
->sk_error_report(sk
);
4221 EXPORT_SYMBOL(sock_queue_err_skb
);
4223 static bool is_icmp_err_skb(const struct sk_buff
*skb
)
4225 return skb
&& (SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_ICMP
||
4226 SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_ICMP6
);
4229 struct sk_buff
*sock_dequeue_err_skb(struct sock
*sk
)
4231 struct sk_buff_head
*q
= &sk
->sk_error_queue
;
4232 struct sk_buff
*skb
, *skb_next
= NULL
;
4233 bool icmp_next
= false;
4234 unsigned long flags
;
4236 spin_lock_irqsave(&q
->lock
, flags
);
4237 skb
= __skb_dequeue(q
);
4238 if (skb
&& (skb_next
= skb_peek(q
))) {
4239 icmp_next
= is_icmp_err_skb(skb_next
);
4241 sk
->sk_err
= SKB_EXT_ERR(skb_next
)->ee
.ee_origin
;
4243 spin_unlock_irqrestore(&q
->lock
, flags
);
4245 if (is_icmp_err_skb(skb
) && !icmp_next
)
4249 sk
->sk_error_report(sk
);
4253 EXPORT_SYMBOL(sock_dequeue_err_skb
);
4256 * skb_clone_sk - create clone of skb, and take reference to socket
4257 * @skb: the skb to clone
4259 * This function creates a clone of a buffer that holds a reference on
4260 * sk_refcnt. Buffers created via this function are meant to be
4261 * returned using sock_queue_err_skb, or free via kfree_skb.
4263 * When passing buffers allocated with this function to sock_queue_err_skb
4264 * it is necessary to wrap the call with sock_hold/sock_put in order to
4265 * prevent the socket from being released prior to being enqueued on
4266 * the sk_error_queue.
4268 struct sk_buff
*skb_clone_sk(struct sk_buff
*skb
)
4270 struct sock
*sk
= skb
->sk
;
4271 struct sk_buff
*clone
;
4273 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
4276 clone
= skb_clone(skb
, GFP_ATOMIC
);
4283 clone
->destructor
= sock_efree
;
4287 EXPORT_SYMBOL(skb_clone_sk
);
4289 static void __skb_complete_tx_timestamp(struct sk_buff
*skb
,
4294 struct sock_exterr_skb
*serr
;
4297 BUILD_BUG_ON(sizeof(struct sock_exterr_skb
) > sizeof(skb
->cb
));
4299 serr
= SKB_EXT_ERR(skb
);
4300 memset(serr
, 0, sizeof(*serr
));
4301 serr
->ee
.ee_errno
= ENOMSG
;
4302 serr
->ee
.ee_origin
= SO_EE_ORIGIN_TIMESTAMPING
;
4303 serr
->ee
.ee_info
= tstype
;
4304 serr
->opt_stats
= opt_stats
;
4305 serr
->header
.h4
.iif
= skb
->dev
? skb
->dev
->ifindex
: 0;
4306 if (sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_ID
) {
4307 serr
->ee
.ee_data
= skb_shinfo(skb
)->tskey
;
4308 if (sk
->sk_protocol
== IPPROTO_TCP
&&
4309 sk
->sk_type
== SOCK_STREAM
)
4310 serr
->ee
.ee_data
-= sk
->sk_tskey
;
4313 err
= sock_queue_err_skb(sk
, skb
);
4319 static bool skb_may_tx_timestamp(struct sock
*sk
, bool tsonly
)
4323 if (likely(sysctl_tstamp_allow_data
|| tsonly
))
4326 read_lock_bh(&sk
->sk_callback_lock
);
4327 ret
= sk
->sk_socket
&& sk
->sk_socket
->file
&&
4328 file_ns_capable(sk
->sk_socket
->file
, &init_user_ns
, CAP_NET_RAW
);
4329 read_unlock_bh(&sk
->sk_callback_lock
);
4333 void skb_complete_tx_timestamp(struct sk_buff
*skb
,
4334 struct skb_shared_hwtstamps
*hwtstamps
)
4336 struct sock
*sk
= skb
->sk
;
4338 if (!skb_may_tx_timestamp(sk
, false))
4341 /* Take a reference to prevent skb_orphan() from freeing the socket,
4342 * but only if the socket refcount is not zero.
4344 if (likely(refcount_inc_not_zero(&sk
->sk_refcnt
))) {
4345 *skb_hwtstamps(skb
) = *hwtstamps
;
4346 __skb_complete_tx_timestamp(skb
, sk
, SCM_TSTAMP_SND
, false);
4354 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp
);
4356 void __skb_tstamp_tx(struct sk_buff
*orig_skb
,
4357 struct skb_shared_hwtstamps
*hwtstamps
,
4358 struct sock
*sk
, int tstype
)
4360 struct sk_buff
*skb
;
4361 bool tsonly
, opt_stats
= false;
4366 if (!hwtstamps
&& !(sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_TX_SWHW
) &&
4367 skb_shinfo(orig_skb
)->tx_flags
& SKBTX_IN_PROGRESS
)
4370 tsonly
= sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_TSONLY
;
4371 if (!skb_may_tx_timestamp(sk
, tsonly
))
4376 if ((sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_STATS
) &&
4377 sk
->sk_protocol
== IPPROTO_TCP
&&
4378 sk
->sk_type
== SOCK_STREAM
) {
4379 skb
= tcp_get_timestamping_opt_stats(sk
);
4383 skb
= alloc_skb(0, GFP_ATOMIC
);
4385 skb
= skb_clone(orig_skb
, GFP_ATOMIC
);
4391 skb_shinfo(skb
)->tx_flags
|= skb_shinfo(orig_skb
)->tx_flags
&
4393 skb_shinfo(skb
)->tskey
= skb_shinfo(orig_skb
)->tskey
;
4397 *skb_hwtstamps(skb
) = *hwtstamps
;
4399 skb
->tstamp
= ktime_get_real();
4401 __skb_complete_tx_timestamp(skb
, sk
, tstype
, opt_stats
);
4403 EXPORT_SYMBOL_GPL(__skb_tstamp_tx
);
4405 void skb_tstamp_tx(struct sk_buff
*orig_skb
,
4406 struct skb_shared_hwtstamps
*hwtstamps
)
4408 return __skb_tstamp_tx(orig_skb
, hwtstamps
, orig_skb
->sk
,
4411 EXPORT_SYMBOL_GPL(skb_tstamp_tx
);
4413 void skb_complete_wifi_ack(struct sk_buff
*skb
, bool acked
)
4415 struct sock
*sk
= skb
->sk
;
4416 struct sock_exterr_skb
*serr
;
4419 skb
->wifi_acked_valid
= 1;
4420 skb
->wifi_acked
= acked
;
4422 serr
= SKB_EXT_ERR(skb
);
4423 memset(serr
, 0, sizeof(*serr
));
4424 serr
->ee
.ee_errno
= ENOMSG
;
4425 serr
->ee
.ee_origin
= SO_EE_ORIGIN_TXSTATUS
;
4427 /* Take a reference to prevent skb_orphan() from freeing the socket,
4428 * but only if the socket refcount is not zero.
4430 if (likely(refcount_inc_not_zero(&sk
->sk_refcnt
))) {
4431 err
= sock_queue_err_skb(sk
, skb
);
4437 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack
);
4440 * skb_partial_csum_set - set up and verify partial csum values for packet
4441 * @skb: the skb to set
4442 * @start: the number of bytes after skb->data to start checksumming.
4443 * @off: the offset from start to place the checksum.
4445 * For untrusted partially-checksummed packets, we need to make sure the values
4446 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4448 * This function checks and sets those values and skb->ip_summed: if this
4449 * returns false you should drop the packet.
4451 bool skb_partial_csum_set(struct sk_buff
*skb
, u16 start
, u16 off
)
4453 if (unlikely(start
> skb_headlen(skb
)) ||
4454 unlikely((int)start
+ off
> skb_headlen(skb
) - 2)) {
4455 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
4456 start
, off
, skb_headlen(skb
));
4459 skb
->ip_summed
= CHECKSUM_PARTIAL
;
4460 skb
->csum_start
= skb_headroom(skb
) + start
;
4461 skb
->csum_offset
= off
;
4462 skb_set_transport_header(skb
, start
);
4465 EXPORT_SYMBOL_GPL(skb_partial_csum_set
);
4467 static int skb_maybe_pull_tail(struct sk_buff
*skb
, unsigned int len
,
4470 if (skb_headlen(skb
) >= len
)
4473 /* If we need to pullup then pullup to the max, so we
4474 * won't need to do it again.
4479 if (__pskb_pull_tail(skb
, max
- skb_headlen(skb
)) == NULL
)
4482 if (skb_headlen(skb
) < len
)
4488 #define MAX_TCP_HDR_LEN (15 * 4)
4490 static __sum16
*skb_checksum_setup_ip(struct sk_buff
*skb
,
4491 typeof(IPPROTO_IP
) proto
,
4498 err
= skb_maybe_pull_tail(skb
, off
+ sizeof(struct tcphdr
),
4499 off
+ MAX_TCP_HDR_LEN
);
4500 if (!err
&& !skb_partial_csum_set(skb
, off
,
4501 offsetof(struct tcphdr
,
4504 return err
? ERR_PTR(err
) : &tcp_hdr(skb
)->check
;
4507 err
= skb_maybe_pull_tail(skb
, off
+ sizeof(struct udphdr
),
4508 off
+ sizeof(struct udphdr
));
4509 if (!err
&& !skb_partial_csum_set(skb
, off
,
4510 offsetof(struct udphdr
,
4513 return err
? ERR_PTR(err
) : &udp_hdr(skb
)->check
;
4516 return ERR_PTR(-EPROTO
);
4519 /* This value should be large enough to cover a tagged ethernet header plus
4520 * maximally sized IP and TCP or UDP headers.
4522 #define MAX_IP_HDR_LEN 128
4524 static int skb_checksum_setup_ipv4(struct sk_buff
*skb
, bool recalculate
)
4533 err
= skb_maybe_pull_tail(skb
,
4534 sizeof(struct iphdr
),
4539 if (ip_hdr(skb
)->frag_off
& htons(IP_OFFSET
| IP_MF
))
4542 off
= ip_hdrlen(skb
);
4549 csum
= skb_checksum_setup_ip(skb
, ip_hdr(skb
)->protocol
, off
);
4551 return PTR_ERR(csum
);
4554 *csum
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
4557 ip_hdr(skb
)->protocol
, 0);
4564 /* This value should be large enough to cover a tagged ethernet header plus
4565 * an IPv6 header, all options, and a maximal TCP or UDP header.
4567 #define MAX_IPV6_HDR_LEN 256
4569 #define OPT_HDR(type, skb, off) \
4570 (type *)(skb_network_header(skb) + (off))
4572 static int skb_checksum_setup_ipv6(struct sk_buff
*skb
, bool recalculate
)
4585 off
= sizeof(struct ipv6hdr
);
4587 err
= skb_maybe_pull_tail(skb
, off
, MAX_IPV6_HDR_LEN
);
4591 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
4593 len
= sizeof(struct ipv6hdr
) + ntohs(ipv6_hdr(skb
)->payload_len
);
4594 while (off
<= len
&& !done
) {
4596 case IPPROTO_DSTOPTS
:
4597 case IPPROTO_HOPOPTS
:
4598 case IPPROTO_ROUTING
: {
4599 struct ipv6_opt_hdr
*hp
;
4601 err
= skb_maybe_pull_tail(skb
,
4603 sizeof(struct ipv6_opt_hdr
),
4608 hp
= OPT_HDR(struct ipv6_opt_hdr
, skb
, off
);
4609 nexthdr
= hp
->nexthdr
;
4610 off
+= ipv6_optlen(hp
);
4614 struct ip_auth_hdr
*hp
;
4616 err
= skb_maybe_pull_tail(skb
,
4618 sizeof(struct ip_auth_hdr
),
4623 hp
= OPT_HDR(struct ip_auth_hdr
, skb
, off
);
4624 nexthdr
= hp
->nexthdr
;
4625 off
+= ipv6_authlen(hp
);
4628 case IPPROTO_FRAGMENT
: {
4629 struct frag_hdr
*hp
;
4631 err
= skb_maybe_pull_tail(skb
,
4633 sizeof(struct frag_hdr
),
4638 hp
= OPT_HDR(struct frag_hdr
, skb
, off
);
4640 if (hp
->frag_off
& htons(IP6_OFFSET
| IP6_MF
))
4643 nexthdr
= hp
->nexthdr
;
4644 off
+= sizeof(struct frag_hdr
);
4655 if (!done
|| fragment
)
4658 csum
= skb_checksum_setup_ip(skb
, nexthdr
, off
);
4660 return PTR_ERR(csum
);
4663 *csum
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
4664 &ipv6_hdr(skb
)->daddr
,
4665 skb
->len
- off
, nexthdr
, 0);
4673 * skb_checksum_setup - set up partial checksum offset
4674 * @skb: the skb to set up
4675 * @recalculate: if true the pseudo-header checksum will be recalculated
4677 int skb_checksum_setup(struct sk_buff
*skb
, bool recalculate
)
4681 switch (skb
->protocol
) {
4682 case htons(ETH_P_IP
):
4683 err
= skb_checksum_setup_ipv4(skb
, recalculate
);
4686 case htons(ETH_P_IPV6
):
4687 err
= skb_checksum_setup_ipv6(skb
, recalculate
);
4697 EXPORT_SYMBOL(skb_checksum_setup
);
4700 * skb_checksum_maybe_trim - maybe trims the given skb
4701 * @skb: the skb to check
4702 * @transport_len: the data length beyond the network header
4704 * Checks whether the given skb has data beyond the given transport length.
4705 * If so, returns a cloned skb trimmed to this transport length.
4706 * Otherwise returns the provided skb. Returns NULL in error cases
4707 * (e.g. transport_len exceeds skb length or out-of-memory).
4709 * Caller needs to set the skb transport header and free any returned skb if it
4710 * differs from the provided skb.
4712 static struct sk_buff
*skb_checksum_maybe_trim(struct sk_buff
*skb
,
4713 unsigned int transport_len
)
4715 struct sk_buff
*skb_chk
;
4716 unsigned int len
= skb_transport_offset(skb
) + transport_len
;
4721 else if (skb
->len
== len
)
4724 skb_chk
= skb_clone(skb
, GFP_ATOMIC
);
4728 ret
= pskb_trim_rcsum(skb_chk
, len
);
4738 * skb_checksum_trimmed - validate checksum of an skb
4739 * @skb: the skb to check
4740 * @transport_len: the data length beyond the network header
4741 * @skb_chkf: checksum function to use
4743 * Applies the given checksum function skb_chkf to the provided skb.
4744 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4746 * If the skb has data beyond the given transport length, then a
4747 * trimmed & cloned skb is checked and returned.
4749 * Caller needs to set the skb transport header and free any returned skb if it
4750 * differs from the provided skb.
4752 struct sk_buff
*skb_checksum_trimmed(struct sk_buff
*skb
,
4753 unsigned int transport_len
,
4754 __sum16(*skb_chkf
)(struct sk_buff
*skb
))
4756 struct sk_buff
*skb_chk
;
4757 unsigned int offset
= skb_transport_offset(skb
);
4760 skb_chk
= skb_checksum_maybe_trim(skb
, transport_len
);
4764 if (!pskb_may_pull(skb_chk
, offset
))
4767 skb_pull_rcsum(skb_chk
, offset
);
4768 ret
= skb_chkf(skb_chk
);
4769 skb_push_rcsum(skb_chk
, offset
);
4777 if (skb_chk
&& skb_chk
!= skb
)
4783 EXPORT_SYMBOL(skb_checksum_trimmed
);
4785 void __skb_warn_lro_forwarding(const struct sk_buff
*skb
)
4787 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4790 EXPORT_SYMBOL(__skb_warn_lro_forwarding
);
4792 void kfree_skb_partial(struct sk_buff
*skb
, bool head_stolen
)
4795 skb_release_head_state(skb
);
4796 kmem_cache_free(skbuff_head_cache
, skb
);
4801 EXPORT_SYMBOL(kfree_skb_partial
);
4804 * skb_try_coalesce - try to merge skb to prior one
4806 * @from: buffer to add
4807 * @fragstolen: pointer to boolean
4808 * @delta_truesize: how much more was allocated than was requested
4810 bool skb_try_coalesce(struct sk_buff
*to
, struct sk_buff
*from
,
4811 bool *fragstolen
, int *delta_truesize
)
4813 struct skb_shared_info
*to_shinfo
, *from_shinfo
;
4814 int i
, delta
, len
= from
->len
;
4816 *fragstolen
= false;
4821 if (len
<= skb_tailroom(to
)) {
4823 BUG_ON(skb_copy_bits(from
, 0, skb_put(to
, len
), len
));
4824 *delta_truesize
= 0;
4828 to_shinfo
= skb_shinfo(to
);
4829 from_shinfo
= skb_shinfo(from
);
4830 if (to_shinfo
->frag_list
|| from_shinfo
->frag_list
)
4832 if (skb_zcopy(to
) || skb_zcopy(from
))
4835 if (skb_headlen(from
) != 0) {
4837 unsigned int offset
;
4839 if (to_shinfo
->nr_frags
+
4840 from_shinfo
->nr_frags
>= MAX_SKB_FRAGS
)
4843 if (skb_head_is_locked(from
))
4846 delta
= from
->truesize
- SKB_DATA_ALIGN(sizeof(struct sk_buff
));
4848 page
= virt_to_head_page(from
->head
);
4849 offset
= from
->data
- (unsigned char *)page_address(page
);
4851 skb_fill_page_desc(to
, to_shinfo
->nr_frags
,
4852 page
, offset
, skb_headlen(from
));
4855 if (to_shinfo
->nr_frags
+
4856 from_shinfo
->nr_frags
> MAX_SKB_FRAGS
)
4859 delta
= from
->truesize
- SKB_TRUESIZE(skb_end_offset(from
));
4862 WARN_ON_ONCE(delta
< len
);
4864 memcpy(to_shinfo
->frags
+ to_shinfo
->nr_frags
,
4866 from_shinfo
->nr_frags
* sizeof(skb_frag_t
));
4867 to_shinfo
->nr_frags
+= from_shinfo
->nr_frags
;
4869 if (!skb_cloned(from
))
4870 from_shinfo
->nr_frags
= 0;
4872 /* if the skb is not cloned this does nothing
4873 * since we set nr_frags to 0.
4875 for (i
= 0; i
< from_shinfo
->nr_frags
; i
++)
4876 __skb_frag_ref(&from_shinfo
->frags
[i
]);
4878 to
->truesize
+= delta
;
4880 to
->data_len
+= len
;
4882 *delta_truesize
= delta
;
4885 EXPORT_SYMBOL(skb_try_coalesce
);
4888 * skb_scrub_packet - scrub an skb
4890 * @skb: buffer to clean
4891 * @xnet: packet is crossing netns
4893 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4894 * into/from a tunnel. Some information have to be cleared during these
4896 * skb_scrub_packet can also be used to clean a skb before injecting it in
4897 * another namespace (@xnet == true). We have to clear all information in the
4898 * skb that could impact namespace isolation.
4900 void skb_scrub_packet(struct sk_buff
*skb
, bool xnet
)
4903 skb
->pkt_type
= PACKET_HOST
;
4909 nf_reset_trace(skb
);
4918 EXPORT_SYMBOL_GPL(skb_scrub_packet
);
4921 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4925 * skb_gso_transport_seglen is used to determine the real size of the
4926 * individual segments, including Layer4 headers (TCP/UDP).
4928 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4930 static unsigned int skb_gso_transport_seglen(const struct sk_buff
*skb
)
4932 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
4933 unsigned int thlen
= 0;
4935 if (skb
->encapsulation
) {
4936 thlen
= skb_inner_transport_header(skb
) -
4937 skb_transport_header(skb
);
4939 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)))
4940 thlen
+= inner_tcp_hdrlen(skb
);
4941 } else if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
4942 thlen
= tcp_hdrlen(skb
);
4943 } else if (unlikely(skb_is_gso_sctp(skb
))) {
4944 thlen
= sizeof(struct sctphdr
);
4945 } else if (shinfo
->gso_type
& SKB_GSO_UDP_L4
) {
4946 thlen
= sizeof(struct udphdr
);
4948 /* UFO sets gso_size to the size of the fragmentation
4949 * payload, i.e. the size of the L4 (UDP) header is already
4952 return thlen
+ shinfo
->gso_size
;
4956 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4960 * skb_gso_network_seglen is used to determine the real size of the
4961 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4963 * The MAC/L2 header is not accounted for.
4965 static unsigned int skb_gso_network_seglen(const struct sk_buff
*skb
)
4967 unsigned int hdr_len
= skb_transport_header(skb
) -
4968 skb_network_header(skb
);
4970 return hdr_len
+ skb_gso_transport_seglen(skb
);
4974 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4978 * skb_gso_mac_seglen is used to determine the real size of the
4979 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4980 * headers (TCP/UDP).
4982 static unsigned int skb_gso_mac_seglen(const struct sk_buff
*skb
)
4984 unsigned int hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
4986 return hdr_len
+ skb_gso_transport_seglen(skb
);
4990 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
4992 * There are a couple of instances where we have a GSO skb, and we
4993 * want to determine what size it would be after it is segmented.
4995 * We might want to check:
4996 * - L3+L4+payload size (e.g. IP forwarding)
4997 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
4999 * This is a helper to do that correctly considering GSO_BY_FRAGS.
5001 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5002 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5004 * @max_len: The maximum permissible length.
5006 * Returns true if the segmented length <= max length.
5008 static inline bool skb_gso_size_check(const struct sk_buff
*skb
,
5009 unsigned int seg_len
,
5010 unsigned int max_len
) {
5011 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
5012 const struct sk_buff
*iter
;
5014 if (shinfo
->gso_size
!= GSO_BY_FRAGS
)
5015 return seg_len
<= max_len
;
5017 /* Undo this so we can re-use header sizes */
5018 seg_len
-= GSO_BY_FRAGS
;
5020 skb_walk_frags(skb
, iter
) {
5021 if (seg_len
+ skb_headlen(iter
) > max_len
)
5029 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5032 * @mtu: MTU to validate against
5034 * skb_gso_validate_network_len validates if a given skb will fit a
5035 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5038 bool skb_gso_validate_network_len(const struct sk_buff
*skb
, unsigned int mtu
)
5040 return skb_gso_size_check(skb
, skb_gso_network_seglen(skb
), mtu
);
5042 EXPORT_SYMBOL_GPL(skb_gso_validate_network_len
);
5045 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5048 * @len: length to validate against
5050 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5051 * length once split, including L2, L3 and L4 headers and the payload.
5053 bool skb_gso_validate_mac_len(const struct sk_buff
*skb
, unsigned int len
)
5055 return skb_gso_size_check(skb
, skb_gso_mac_seglen(skb
), len
);
5057 EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len
);
5059 static struct sk_buff
*skb_reorder_vlan_header(struct sk_buff
*skb
)
5063 if (skb_cow(skb
, skb_headroom(skb
)) < 0) {
5068 mac_len
= skb
->data
- skb_mac_header(skb
);
5069 if (likely(mac_len
> VLAN_HLEN
+ ETH_TLEN
)) {
5070 memmove(skb_mac_header(skb
) + VLAN_HLEN
, skb_mac_header(skb
),
5071 mac_len
- VLAN_HLEN
- ETH_TLEN
);
5073 skb
->mac_header
+= VLAN_HLEN
;
5077 struct sk_buff
*skb_vlan_untag(struct sk_buff
*skb
)
5079 struct vlan_hdr
*vhdr
;
5082 if (unlikely(skb_vlan_tag_present(skb
))) {
5083 /* vlan_tci is already set-up so leave this for another time */
5087 skb
= skb_share_check(skb
, GFP_ATOMIC
);
5091 if (unlikely(!pskb_may_pull(skb
, VLAN_HLEN
)))
5094 vhdr
= (struct vlan_hdr
*)skb
->data
;
5095 vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
5096 __vlan_hwaccel_put_tag(skb
, skb
->protocol
, vlan_tci
);
5098 skb_pull_rcsum(skb
, VLAN_HLEN
);
5099 vlan_set_encap_proto(skb
, vhdr
);
5101 skb
= skb_reorder_vlan_header(skb
);
5105 skb_reset_network_header(skb
);
5106 skb_reset_transport_header(skb
);
5107 skb_reset_mac_len(skb
);
5115 EXPORT_SYMBOL(skb_vlan_untag
);
5117 int skb_ensure_writable(struct sk_buff
*skb
, int write_len
)
5119 if (!pskb_may_pull(skb
, write_len
))
5122 if (!skb_cloned(skb
) || skb_clone_writable(skb
, write_len
))
5125 return pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
5127 EXPORT_SYMBOL(skb_ensure_writable
);
5129 /* remove VLAN header from packet and update csum accordingly.
5130 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5132 int __skb_vlan_pop(struct sk_buff
*skb
, u16
*vlan_tci
)
5134 struct vlan_hdr
*vhdr
;
5135 int offset
= skb
->data
- skb_mac_header(skb
);
5138 if (WARN_ONCE(offset
,
5139 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5144 err
= skb_ensure_writable(skb
, VLAN_ETH_HLEN
);
5148 skb_postpull_rcsum(skb
, skb
->data
+ (2 * ETH_ALEN
), VLAN_HLEN
);
5150 vhdr
= (struct vlan_hdr
*)(skb
->data
+ ETH_HLEN
);
5151 *vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
5153 memmove(skb
->data
+ VLAN_HLEN
, skb
->data
, 2 * ETH_ALEN
);
5154 __skb_pull(skb
, VLAN_HLEN
);
5156 vlan_set_encap_proto(skb
, vhdr
);
5157 skb
->mac_header
+= VLAN_HLEN
;
5159 if (skb_network_offset(skb
) < ETH_HLEN
)
5160 skb_set_network_header(skb
, ETH_HLEN
);
5162 skb_reset_mac_len(skb
);
5166 EXPORT_SYMBOL(__skb_vlan_pop
);
5168 /* Pop a vlan tag either from hwaccel or from payload.
5169 * Expects skb->data at mac header.
5171 int skb_vlan_pop(struct sk_buff
*skb
)
5177 if (likely(skb_vlan_tag_present(skb
))) {
5180 if (unlikely(!eth_type_vlan(skb
->protocol
)))
5183 err
= __skb_vlan_pop(skb
, &vlan_tci
);
5187 /* move next vlan tag to hw accel tag */
5188 if (likely(!eth_type_vlan(skb
->protocol
)))
5191 vlan_proto
= skb
->protocol
;
5192 err
= __skb_vlan_pop(skb
, &vlan_tci
);
5196 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
5199 EXPORT_SYMBOL(skb_vlan_pop
);
5201 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5202 * Expects skb->data at mac header.
5204 int skb_vlan_push(struct sk_buff
*skb
, __be16 vlan_proto
, u16 vlan_tci
)
5206 if (skb_vlan_tag_present(skb
)) {
5207 int offset
= skb
->data
- skb_mac_header(skb
);
5210 if (WARN_ONCE(offset
,
5211 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5216 err
= __vlan_insert_tag(skb
, skb
->vlan_proto
,
5217 skb_vlan_tag_get(skb
));
5221 skb
->protocol
= skb
->vlan_proto
;
5222 skb
->mac_len
+= VLAN_HLEN
;
5224 skb_postpush_rcsum(skb
, skb
->data
+ (2 * ETH_ALEN
), VLAN_HLEN
);
5226 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
5229 EXPORT_SYMBOL(skb_vlan_push
);
5232 * alloc_skb_with_frags - allocate skb with page frags
5234 * @header_len: size of linear part
5235 * @data_len: needed length in frags
5236 * @max_page_order: max page order desired.
5237 * @errcode: pointer to error code if any
5238 * @gfp_mask: allocation mask
5240 * This can be used to allocate a paged skb, given a maximal order for frags.
5242 struct sk_buff
*alloc_skb_with_frags(unsigned long header_len
,
5243 unsigned long data_len
,
5248 int npages
= (data_len
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
5249 unsigned long chunk
;
5250 struct sk_buff
*skb
;
5255 *errcode
= -EMSGSIZE
;
5256 /* Note this test could be relaxed, if we succeed to allocate
5257 * high order pages...
5259 if (npages
> MAX_SKB_FRAGS
)
5262 gfp_head
= gfp_mask
;
5263 if (gfp_head
& __GFP_DIRECT_RECLAIM
)
5264 gfp_head
|= __GFP_RETRY_MAYFAIL
;
5266 *errcode
= -ENOBUFS
;
5267 skb
= alloc_skb(header_len
, gfp_head
);
5271 skb
->truesize
+= npages
<< PAGE_SHIFT
;
5273 for (i
= 0; npages
> 0; i
++) {
5274 int order
= max_page_order
;
5277 if (npages
>= 1 << order
) {
5278 page
= alloc_pages((gfp_mask
& ~__GFP_DIRECT_RECLAIM
) |
5284 /* Do not retry other high order allocations */
5290 page
= alloc_page(gfp_mask
);
5294 chunk
= min_t(unsigned long, data_len
,
5295 PAGE_SIZE
<< order
);
5296 skb_fill_page_desc(skb
, i
, page
, 0, chunk
);
5298 npages
-= 1 << order
;
5306 EXPORT_SYMBOL(alloc_skb_with_frags
);
5308 /* carve out the first off bytes from skb when off < headlen */
5309 static int pskb_carve_inside_header(struct sk_buff
*skb
, const u32 off
,
5310 const int headlen
, gfp_t gfp_mask
)
5313 int size
= skb_end_offset(skb
);
5314 int new_hlen
= headlen
- off
;
5317 size
= SKB_DATA_ALIGN(size
);
5319 if (skb_pfmemalloc(skb
))
5320 gfp_mask
|= __GFP_MEMALLOC
;
5321 data
= kmalloc_reserve(size
+
5322 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
5323 gfp_mask
, NUMA_NO_NODE
, NULL
);
5327 size
= SKB_WITH_OVERHEAD(ksize(data
));
5329 /* Copy real data, and all frags */
5330 skb_copy_from_linear_data_offset(skb
, off
, data
, new_hlen
);
5333 memcpy((struct skb_shared_info
*)(data
+ size
),
5335 offsetof(struct skb_shared_info
,
5336 frags
[skb_shinfo(skb
)->nr_frags
]));
5337 if (skb_cloned(skb
)) {
5338 /* drop the old head gracefully */
5339 if (skb_orphan_frags(skb
, gfp_mask
)) {
5343 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
5344 skb_frag_ref(skb
, i
);
5345 if (skb_has_frag_list(skb
))
5346 skb_clone_fraglist(skb
);
5347 skb_release_data(skb
);
5349 /* we can reuse existing recount- all we did was
5358 #ifdef NET_SKBUFF_DATA_USES_OFFSET
5361 skb
->end
= skb
->head
+ size
;
5363 skb_set_tail_pointer(skb
, skb_headlen(skb
));
5364 skb_headers_offset_update(skb
, 0);
5368 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
5373 static int pskb_carve(struct sk_buff
*skb
, const u32 off
, gfp_t gfp
);
5375 /* carve out the first eat bytes from skb's frag_list. May recurse into
5378 static int pskb_carve_frag_list(struct sk_buff
*skb
,
5379 struct skb_shared_info
*shinfo
, int eat
,
5382 struct sk_buff
*list
= shinfo
->frag_list
;
5383 struct sk_buff
*clone
= NULL
;
5384 struct sk_buff
*insp
= NULL
;
5388 pr_err("Not enough bytes to eat. Want %d\n", eat
);
5391 if (list
->len
<= eat
) {
5392 /* Eaten as whole. */
5397 /* Eaten partially. */
5398 if (skb_shared(list
)) {
5399 clone
= skb_clone(list
, gfp_mask
);
5405 /* This may be pulled without problems. */
5408 if (pskb_carve(list
, eat
, gfp_mask
) < 0) {
5416 /* Free pulled out fragments. */
5417 while ((list
= shinfo
->frag_list
) != insp
) {
5418 shinfo
->frag_list
= list
->next
;
5421 /* And insert new clone at head. */
5424 shinfo
->frag_list
= clone
;
5429 /* carve off first len bytes from skb. Split line (off) is in the
5430 * non-linear part of skb
5432 static int pskb_carve_inside_nonlinear(struct sk_buff
*skb
, const u32 off
,
5433 int pos
, gfp_t gfp_mask
)
5436 int size
= skb_end_offset(skb
);
5438 const int nfrags
= skb_shinfo(skb
)->nr_frags
;
5439 struct skb_shared_info
*shinfo
;
5441 size
= SKB_DATA_ALIGN(size
);
5443 if (skb_pfmemalloc(skb
))
5444 gfp_mask
|= __GFP_MEMALLOC
;
5445 data
= kmalloc_reserve(size
+
5446 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
5447 gfp_mask
, NUMA_NO_NODE
, NULL
);
5451 size
= SKB_WITH_OVERHEAD(ksize(data
));
5453 memcpy((struct skb_shared_info
*)(data
+ size
),
5454 skb_shinfo(skb
), offsetof(struct skb_shared_info
,
5455 frags
[skb_shinfo(skb
)->nr_frags
]));
5456 if (skb_orphan_frags(skb
, gfp_mask
)) {
5460 shinfo
= (struct skb_shared_info
*)(data
+ size
);
5461 for (i
= 0; i
< nfrags
; i
++) {
5462 int fsize
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
5464 if (pos
+ fsize
> off
) {
5465 shinfo
->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
5469 * We have two variants in this case:
5470 * 1. Move all the frag to the second
5471 * part, if it is possible. F.e.
5472 * this approach is mandatory for TUX,
5473 * where splitting is expensive.
5474 * 2. Split is accurately. We make this.
5476 shinfo
->frags
[0].page_offset
+= off
- pos
;
5477 skb_frag_size_sub(&shinfo
->frags
[0], off
- pos
);
5479 skb_frag_ref(skb
, i
);
5484 shinfo
->nr_frags
= k
;
5485 if (skb_has_frag_list(skb
))
5486 skb_clone_fraglist(skb
);
5489 /* split line is in frag list */
5490 pskb_carve_frag_list(skb
, shinfo
, off
- pos
, gfp_mask
);
5492 skb_release_data(skb
);
5497 #ifdef NET_SKBUFF_DATA_USES_OFFSET
5500 skb
->end
= skb
->head
+ size
;
5502 skb_reset_tail_pointer(skb
);
5503 skb_headers_offset_update(skb
, 0);
5508 skb
->data_len
= skb
->len
;
5509 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
5513 /* remove len bytes from the beginning of the skb */
5514 static int pskb_carve(struct sk_buff
*skb
, const u32 len
, gfp_t gfp
)
5516 int headlen
= skb_headlen(skb
);
5519 return pskb_carve_inside_header(skb
, len
, headlen
, gfp
);
5521 return pskb_carve_inside_nonlinear(skb
, len
, headlen
, gfp
);
5524 /* Extract to_copy bytes starting at off from skb, and return this in
5527 struct sk_buff
*pskb_extract(struct sk_buff
*skb
, int off
,
5528 int to_copy
, gfp_t gfp
)
5530 struct sk_buff
*clone
= skb_clone(skb
, gfp
);
5535 if (pskb_carve(clone
, off
, gfp
) < 0 ||
5536 pskb_trim(clone
, to_copy
)) {
5542 EXPORT_SYMBOL(pskb_extract
);
5545 * skb_condense - try to get rid of fragments/frag_list if possible
5548 * Can be used to save memory before skb is added to a busy queue.
5549 * If packet has bytes in frags and enough tail room in skb->head,
5550 * pull all of them, so that we can free the frags right now and adjust
5553 * We do not reallocate skb->head thus can not fail.
5554 * Caller must re-evaluate skb->truesize if needed.
5556 void skb_condense(struct sk_buff
*skb
)
5558 if (skb
->data_len
) {
5559 if (skb
->data_len
> skb
->end
- skb
->tail
||
5563 /* Nice, we can free page frag(s) right now */
5564 __pskb_pull_tail(skb
, skb
->data_len
);
5566 /* At this point, skb->truesize might be over estimated,
5567 * because skb had a fragment, and fragments do not tell
5569 * When we pulled its content into skb->head, fragment
5570 * was freed, but __pskb_pull_tail() could not possibly
5571 * adjust skb->truesize, not knowing the frag truesize.
5573 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));