1 #include <linux/skbuff.h>
2 #include <linux/export.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
17 /* copy saddr & daddr, possibly using 64bit load/store
18 * Equivalent to : flow->src = iph->saddr;
19 * flow->dst = iph->daddr;
21 static void iph_to_flow_copy_addrs(struct flow_keys
*flow
, const struct iphdr
*iph
)
23 BUILD_BUG_ON(offsetof(typeof(*flow
), dst
) !=
24 offsetof(typeof(*flow
), src
) + sizeof(flow
->src
));
25 memcpy(&flow
->src
, &iph
->saddr
, sizeof(flow
->src
) + sizeof(flow
->dst
));
28 bool skb_flow_dissect(const struct sk_buff
*skb
, struct flow_keys
*flow
)
30 int poff
, nhoff
= skb_network_offset(skb
);
32 __be16 proto
= skb
->protocol
;
34 memset(flow
, 0, sizeof(*flow
));
38 case __constant_htons(ETH_P_IP
): {
39 const struct iphdr
*iph
;
42 iph
= skb_header_pointer(skb
, nhoff
, sizeof(_iph
), &_iph
);
46 if (ip_is_fragment(iph
))
49 ip_proto
= iph
->protocol
;
50 iph_to_flow_copy_addrs(flow
, iph
);
51 nhoff
+= iph
->ihl
* 4;
54 case __constant_htons(ETH_P_IPV6
): {
55 const struct ipv6hdr
*iph
;
58 iph
= skb_header_pointer(skb
, nhoff
, sizeof(_iph
), &_iph
);
62 ip_proto
= iph
->nexthdr
;
63 flow
->src
= (__force __be32
)ipv6_addr_hash(&iph
->saddr
);
64 flow
->dst
= (__force __be32
)ipv6_addr_hash(&iph
->daddr
);
65 nhoff
+= sizeof(struct ipv6hdr
);
68 case __constant_htons(ETH_P_8021Q
): {
69 const struct vlan_hdr
*vlan
;
70 struct vlan_hdr _vlan
;
72 vlan
= skb_header_pointer(skb
, nhoff
, sizeof(_vlan
), &_vlan
);
76 proto
= vlan
->h_vlan_encapsulated_proto
;
77 nhoff
+= sizeof(*vlan
);
80 case __constant_htons(ETH_P_PPP_SES
): {
85 hdr
= skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), &_hdr
);
89 nhoff
+= PPPOE_SES_HLEN
;
91 case __constant_htons(PPP_IP
):
93 case __constant_htons(PPP_IPV6
):
110 hdr
= skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), &_hdr
);
114 * Only look inside GRE if version zero and no
117 if (!(hdr
->flags
& (GRE_VERSION
|GRE_ROUTING
))) {
120 if (hdr
->flags
& GRE_CSUM
)
122 if (hdr
->flags
& GRE_KEY
)
124 if (hdr
->flags
& GRE_SEQ
)
126 if (proto
== htons(ETH_P_TEB
)) {
127 const struct ethhdr
*eth
;
130 eth
= skb_header_pointer(skb
, nhoff
,
131 sizeof(_eth
), &_eth
);
134 proto
= eth
->h_proto
;
135 nhoff
+= sizeof(*eth
);
147 flow
->ip_proto
= ip_proto
;
148 poff
= proto_ports_offset(ip_proto
);
150 __be32
*ports
, _ports
;
153 ports
= skb_header_pointer(skb
, nhoff
, sizeof(_ports
), &_ports
);
155 flow
->ports
= *ports
;
158 flow
->thoff
= (u16
) nhoff
;
162 EXPORT_SYMBOL(skb_flow_dissect
);
164 static u32 hashrnd __read_mostly
;
167 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
168 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
169 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
170 * if hash is a canonical 4-tuple hash over transport ports.
172 void __skb_get_rxhash(struct sk_buff
*skb
)
174 struct flow_keys keys
;
177 if (!skb_flow_dissect(skb
, &keys
))
183 /* get a consistent hash (same value on both flow directions) */
184 if (((__force u32
)keys
.dst
< (__force u32
)keys
.src
) ||
185 (((__force u32
)keys
.dst
== (__force u32
)keys
.src
) &&
186 ((__force u16
)keys
.port16
[1] < (__force u16
)keys
.port16
[0]))) {
187 swap(keys
.dst
, keys
.src
);
188 swap(keys
.port16
[0], keys
.port16
[1]);
191 hash
= jhash_3words((__force u32
)keys
.dst
,
192 (__force u32
)keys
.src
,
193 (__force u32
)keys
.ports
, hashrnd
);
199 EXPORT_SYMBOL(__skb_get_rxhash
);
202 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
203 * to be used as a distribution range.
205 u16
__skb_tx_hash(const struct net_device
*dev
, const struct sk_buff
*skb
,
206 unsigned int num_tx_queues
)
210 u16 qcount
= num_tx_queues
;
212 if (skb_rx_queue_recorded(skb
)) {
213 hash
= skb_get_rx_queue(skb
);
214 while (unlikely(hash
>= num_tx_queues
))
215 hash
-= num_tx_queues
;
220 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
221 qoffset
= dev
->tc_to_txq
[tc
].offset
;
222 qcount
= dev
->tc_to_txq
[tc
].count
;
225 if (skb
->sk
&& skb
->sk
->sk_hash
)
226 hash
= skb
->sk
->sk_hash
;
228 hash
= (__force u16
) skb
->protocol
;
229 hash
= jhash_1word(hash
, hashrnd
);
231 return (u16
) (((u64
) hash
* qcount
) >> 32) + qoffset
;
233 EXPORT_SYMBOL(__skb_tx_hash
);
235 /* __skb_get_poff() returns the offset to the payload as far as it could
236 * be dissected. The main user is currently BPF, so that we can dynamically
237 * truncate packets without needing to push actual payload to the user
238 * space and can analyze headers only, instead.
240 u32
__skb_get_poff(const struct sk_buff
*skb
)
242 struct flow_keys keys
;
245 if (!skb_flow_dissect(skb
, &keys
))
249 switch (keys
.ip_proto
) {
251 const struct tcphdr
*tcph
;
254 tcph
= skb_header_pointer(skb
, poff
, sizeof(_tcph
), &_tcph
);
258 poff
+= max_t(u32
, sizeof(struct tcphdr
), tcph
->doff
* 4);
262 case IPPROTO_UDPLITE
:
263 poff
+= sizeof(struct udphdr
);
265 /* For the rest, we do not really care about header
266 * extensions at this point for now.
269 poff
+= sizeof(struct icmphdr
);
272 poff
+= sizeof(struct icmp6hdr
);
275 poff
+= sizeof(struct igmphdr
);
278 poff
+= sizeof(struct dccp_hdr
);
281 poff
+= sizeof(struct sctphdr
);
288 static inline u16
dev_cap_txqueue(struct net_device
*dev
, u16 queue_index
)
290 if (unlikely(queue_index
>= dev
->real_num_tx_queues
)) {
291 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
292 dev
->name
, queue_index
,
293 dev
->real_num_tx_queues
);
299 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
302 struct xps_dev_maps
*dev_maps
;
304 int queue_index
= -1;
307 dev_maps
= rcu_dereference(dev
->xps_maps
);
309 map
= rcu_dereference(
310 dev_maps
->cpu_map
[raw_smp_processor_id()]);
313 queue_index
= map
->queues
[0];
316 if (skb
->sk
&& skb
->sk
->sk_hash
)
317 hash
= skb
->sk
->sk_hash
;
319 hash
= (__force u16
) skb
->protocol
^
321 hash
= jhash_1word(hash
, hashrnd
);
322 queue_index
= map
->queues
[
323 ((u64
)hash
* map
->len
) >> 32];
325 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
337 u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
339 struct sock
*sk
= skb
->sk
;
340 int queue_index
= sk_tx_queue_get(sk
);
342 if (queue_index
< 0 || skb
->ooo_okay
||
343 queue_index
>= dev
->real_num_tx_queues
) {
344 int new_index
= get_xps_queue(dev
, skb
);
346 new_index
= skb_tx_hash(dev
, skb
);
348 if (queue_index
!= new_index
&& sk
) {
349 struct dst_entry
*dst
=
350 rcu_dereference_check(sk
->sk_dst_cache
, 1);
352 if (dst
&& skb_dst(skb
) == dst
)
353 sk_tx_queue_set(sk
, queue_index
);
357 queue_index
= new_index
;
362 EXPORT_SYMBOL(__netdev_pick_tx
);
364 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
369 if (dev
->real_num_tx_queues
!= 1) {
370 const struct net_device_ops
*ops
= dev
->netdev_ops
;
371 if (ops
->ndo_select_queue
)
372 queue_index
= ops
->ndo_select_queue(dev
, skb
);
374 queue_index
= __netdev_pick_tx(dev
, skb
);
375 queue_index
= dev_cap_txqueue(dev
, queue_index
);
378 skb_set_queue_mapping(skb
, queue_index
);
379 return netdev_get_tx_queue(dev
, queue_index
);
382 static int __init
initialize_hashrnd(void)
384 get_random_bytes(&hashrnd
, sizeof(hashrnd
));
388 late_initcall_sync(initialize_hashrnd
);