2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
13 #include <linux/skbuff.h>
15 #include <net/protocol.h>
17 static DEFINE_SPINLOCK(udp_offload_lock
);
18 static struct udp_offload_priv __rcu
*udp_offload_base __read_mostly
;
20 #define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
22 struct udp_offload_priv
{
23 struct udp_offload
*offload
;
25 struct udp_offload_priv __rcu
*next
;
28 static int udp4_ufo_send_check(struct sk_buff
*skb
)
30 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
33 if (likely(!skb
->encapsulation
)) {
34 const struct iphdr
*iph
;
40 uh
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, skb
->len
,
42 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
43 skb
->csum_offset
= offsetof(struct udphdr
, check
);
44 skb
->ip_summed
= CHECKSUM_PARTIAL
;
50 static struct sk_buff
*udp4_ufo_fragment(struct sk_buff
*skb
,
51 netdev_features_t features
)
53 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
58 if (skb
->encapsulation
&&
59 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL
) {
60 segs
= skb_udp_tunnel_segment(skb
, features
);
64 mss
= skb_shinfo(skb
)->gso_size
;
65 if (unlikely(skb
->len
<= mss
))
68 if (skb_gso_ok(skb
, features
| NETIF_F_GSO_ROBUST
)) {
69 /* Packet is from an untrusted source, reset gso_segs. */
70 int type
= skb_shinfo(skb
)->gso_type
;
72 if (unlikely(type
& ~(SKB_GSO_UDP
| SKB_GSO_DODGY
|
75 SKB_GSO_GRE
| SKB_GSO_MPLS
) ||
76 !(type
& (SKB_GSO_UDP
))))
79 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(skb
->len
, mss
);
85 /* Do software UFO. Complete and fill in the UDP checksum as
86 * HW cannot do checksum of UDP packets sent as multiple
89 offset
= skb_checksum_start_offset(skb
);
90 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
91 offset
+= skb
->csum_offset
;
92 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
);
93 skb
->ip_summed
= CHECKSUM_NONE
;
95 /* Fragment the skb. IP headers of the fragments are updated in
98 segs
= skb_segment(skb
, features
);
103 int udp_add_offload(struct udp_offload
*uo
)
105 struct udp_offload_priv
*new_offload
= kzalloc(sizeof(*new_offload
), GFP_ATOMIC
);
110 new_offload
->offload
= uo
;
112 spin_lock(&udp_offload_lock
);
113 new_offload
->next
= udp_offload_base
;
114 rcu_assign_pointer(udp_offload_base
, new_offload
);
115 spin_unlock(&udp_offload_lock
);
119 EXPORT_SYMBOL(udp_add_offload
);
121 static void udp_offload_free_routine(struct rcu_head
*head
)
123 struct udp_offload_priv
*ou_priv
= container_of(head
, struct udp_offload_priv
, rcu
);
127 void udp_del_offload(struct udp_offload
*uo
)
129 struct udp_offload_priv __rcu
**head
= &udp_offload_base
;
130 struct udp_offload_priv
*uo_priv
;
132 spin_lock(&udp_offload_lock
);
134 uo_priv
= udp_deref_protected(*head
);
135 for (; uo_priv
!= NULL
;
136 uo_priv
= udp_deref_protected(*head
)) {
137 if (uo_priv
->offload
== uo
) {
138 rcu_assign_pointer(*head
,
139 udp_deref_protected(uo_priv
->next
));
142 head
= &uo_priv
->next
;
144 pr_warn("udp_del_offload: didn't find offload for port %d\n", ntohs(uo
->port
));
146 spin_unlock(&udp_offload_lock
);
148 call_rcu(&uo_priv
->rcu
, udp_offload_free_routine
);
150 EXPORT_SYMBOL(udp_del_offload
);
152 static struct sk_buff
**udp_gro_receive(struct sk_buff
**head
, struct sk_buff
*skb
)
154 struct udp_offload_priv
*uo_priv
;
155 struct sk_buff
*p
, **pp
= NULL
;
156 struct udphdr
*uh
, *uh2
;
157 unsigned int hlen
, off
;
160 if (NAPI_GRO_CB(skb
)->udp_mark
||
161 (!skb
->encapsulation
&& skb
->ip_summed
!= CHECKSUM_COMPLETE
))
164 /* mark that this skb passed once through the udp gro layer */
165 NAPI_GRO_CB(skb
)->udp_mark
= 1;
167 off
= skb_gro_offset(skb
);
168 hlen
= off
+ sizeof(*uh
);
169 uh
= skb_gro_header_fast(skb
, off
);
170 if (skb_gro_header_hard(skb
, hlen
)) {
171 uh
= skb_gro_header_slow(skb
, hlen
, off
);
177 uo_priv
= rcu_dereference(udp_offload_base
);
178 for (; uo_priv
!= NULL
; uo_priv
= rcu_dereference(uo_priv
->next
)) {
179 if (uo_priv
->offload
->port
== uh
->dest
&&
180 uo_priv
->offload
->callbacks
.gro_receive
)
188 for (p
= *head
; p
; p
= p
->next
) {
189 if (!NAPI_GRO_CB(p
)->same_flow
)
192 uh2
= (struct udphdr
*)(p
->data
+ off
);
193 if ((*(u32
*)&uh
->source
!= *(u32
*)&uh2
->source
)) {
194 NAPI_GRO_CB(p
)->same_flow
= 0;
199 skb_gro_pull(skb
, sizeof(struct udphdr
)); /* pull encapsulating udp header */
200 pp
= uo_priv
->offload
->callbacks
.gro_receive(head
, skb
);
205 NAPI_GRO_CB(skb
)->flush
|= flush
;
209 static int udp_gro_complete(struct sk_buff
*skb
, int nhoff
)
211 struct udp_offload_priv
*uo_priv
;
212 __be16 newlen
= htons(skb
->len
- nhoff
);
213 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+ nhoff
);
220 uo_priv
= rcu_dereference(udp_offload_base
);
221 for (; uo_priv
!= NULL
; uo_priv
= rcu_dereference(uo_priv
->next
)) {
222 if (uo_priv
->offload
->port
== uh
->dest
&&
223 uo_priv
->offload
->callbacks
.gro_complete
)
228 err
= uo_priv
->offload
->callbacks
.gro_complete(skb
, nhoff
+ sizeof(struct udphdr
));
234 static const struct net_offload udpv4_offload
= {
236 .gso_send_check
= udp4_ufo_send_check
,
237 .gso_segment
= udp4_ufo_fragment
,
238 .gro_receive
= udp_gro_receive
,
239 .gro_complete
= udp_gro_complete
,
243 int __init
udpv4_offload_init(void)
245 return inet_add_offload(&udpv4_offload
, IPPROTO_UDP
);