1 #include <linux/skbuff.h>
2 #include <linux/export.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
16 #include <scsi/fc/fc_fcoe.h>
18 /* copy saddr & daddr, possibly using 64bit load/store
19 * Equivalent to : flow->src = iph->saddr;
20 * flow->dst = iph->daddr;
22 static void iph_to_flow_copy_addrs(struct flow_keys
*flow
, const struct iphdr
*iph
)
24 BUILD_BUG_ON(offsetof(typeof(*flow
), dst
) !=
25 offsetof(typeof(*flow
), src
) + sizeof(flow
->src
));
26 memcpy(&flow
->src
, &iph
->saddr
, sizeof(flow
->src
) + sizeof(flow
->dst
));
30 * __skb_flow_get_ports - extract the upper layer ports and return them
31 * @skb: sk_buff to extract the ports from
32 * @thoff: transport header offset
33 * @ip_proto: protocol for which to get port offset
34 * @data: raw buffer pointer to the packet, if NULL use skb->data
35 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
37 * The function will try to retrieve the ports at offset thoff + poff where poff
38 * is the protocol port offset returned from proto_ports_offset
40 __be32
__skb_flow_get_ports(const struct sk_buff
*skb
, int thoff
, u8 ip_proto
,
43 int poff
= proto_ports_offset(ip_proto
);
47 hlen
= skb_headlen(skb
);
51 __be32
*ports
, _ports
;
53 ports
= __skb_header_pointer(skb
, thoff
+ poff
,
54 sizeof(_ports
), data
, hlen
, &_ports
);
61 EXPORT_SYMBOL(__skb_flow_get_ports
);
64 * __skb_flow_dissect - extract the flow_keys struct and return it
65 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
66 * @data: raw buffer pointer to the packet, if NULL use skb->data
67 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
68 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
69 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
71 * The function will try to retrieve the struct flow_keys from either the skbuff
72 * or a raw buffer specified by the rest parameters
74 bool __skb_flow_dissect(const struct sk_buff
*skb
, struct flow_keys
*flow
,
75 void *data
, __be16 proto
, int nhoff
, int hlen
)
81 proto
= skb
->protocol
;
82 nhoff
= skb_network_offset(skb
);
83 hlen
= skb_headlen(skb
);
86 memset(flow
, 0, sizeof(*flow
));
90 case htons(ETH_P_IP
): {
91 const struct iphdr
*iph
;
94 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
95 if (!iph
|| iph
->ihl
< 5)
97 nhoff
+= iph
->ihl
* 4;
99 ip_proto
= iph
->protocol
;
100 if (ip_is_fragment(iph
))
103 /* skip the address processing if skb is NULL. The assumption
104 * here is that if there is no skb we are not looking for flow
105 * info but lengths and protocols.
110 iph_to_flow_copy_addrs(flow
, iph
);
113 case htons(ETH_P_IPV6
): {
114 const struct ipv6hdr
*iph
;
119 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
123 ip_proto
= iph
->nexthdr
;
124 nhoff
+= sizeof(struct ipv6hdr
);
126 /* see comment above in IPv4 section */
130 flow
->src
= (__force __be32
)ipv6_addr_hash(&iph
->saddr
);
131 flow
->dst
= (__force __be32
)ipv6_addr_hash(&iph
->daddr
);
133 flow_label
= ip6_flowlabel(iph
);
135 /* Awesome, IPv6 packet has a flow label so we can
136 * use that to represent the ports without any
137 * further dissection.
139 flow
->n_proto
= proto
;
140 flow
->ip_proto
= ip_proto
;
141 flow
->ports
= flow_label
;
142 flow
->thoff
= (u16
)nhoff
;
149 case htons(ETH_P_8021AD
):
150 case htons(ETH_P_8021Q
): {
151 const struct vlan_hdr
*vlan
;
152 struct vlan_hdr _vlan
;
154 vlan
= __skb_header_pointer(skb
, nhoff
, sizeof(_vlan
), data
, hlen
, &_vlan
);
158 proto
= vlan
->h_vlan_encapsulated_proto
;
159 nhoff
+= sizeof(*vlan
);
162 case htons(ETH_P_PPP_SES
): {
164 struct pppoe_hdr hdr
;
167 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
171 nhoff
+= PPPOE_SES_HLEN
;
175 case htons(PPP_IPV6
):
181 case htons(ETH_P_TIPC
): {
186 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
189 flow
->src
= hdr
->srcnode
;
191 flow
->n_proto
= proto
;
192 flow
->thoff
= (u16
)nhoff
;
195 case htons(ETH_P_FCOE
):
196 flow
->thoff
= (u16
)(nhoff
+ FCOE_HEADER_LEN
);
209 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
213 * Only look inside GRE if version zero and no
216 if (!(hdr
->flags
& (GRE_VERSION
|GRE_ROUTING
))) {
219 if (hdr
->flags
& GRE_CSUM
)
221 if (hdr
->flags
& GRE_KEY
)
223 if (hdr
->flags
& GRE_SEQ
)
225 if (proto
== htons(ETH_P_TEB
)) {
226 const struct ethhdr
*eth
;
229 eth
= __skb_header_pointer(skb
, nhoff
,
234 proto
= eth
->h_proto
;
235 nhoff
+= sizeof(*eth
);
242 proto
= htons(ETH_P_IP
);
245 proto
= htons(ETH_P_IPV6
);
251 flow
->n_proto
= proto
;
252 flow
->ip_proto
= ip_proto
;
253 flow
->thoff
= (u16
) nhoff
;
255 /* unless skb is set we don't need to record port info */
257 flow
->ports
= __skb_flow_get_ports(skb
, nhoff
, ip_proto
,
262 EXPORT_SYMBOL(__skb_flow_dissect
);
264 static u32 hashrnd __read_mostly
;
265 static __always_inline
void __flow_hash_secret_init(void)
267 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
270 static __always_inline u32
__flow_hash_3words(u32 a
, u32 b
, u32 c
)
272 __flow_hash_secret_init();
273 return jhash_3words(a
, b
, c
, hashrnd
);
276 static inline u32
__flow_hash_from_keys(struct flow_keys
*keys
)
280 /* get a consistent hash (same value on both flow directions) */
281 if (((__force u32
)keys
->dst
< (__force u32
)keys
->src
) ||
282 (((__force u32
)keys
->dst
== (__force u32
)keys
->src
) &&
283 ((__force u16
)keys
->port16
[1] < (__force u16
)keys
->port16
[0]))) {
284 swap(keys
->dst
, keys
->src
);
285 swap(keys
->port16
[0], keys
->port16
[1]);
288 hash
= __flow_hash_3words((__force u32
)keys
->dst
,
289 (__force u32
)keys
->src
,
290 (__force u32
)keys
->ports
);
297 u32
flow_hash_from_keys(struct flow_keys
*keys
)
299 return __flow_hash_from_keys(keys
);
301 EXPORT_SYMBOL(flow_hash_from_keys
);
304 * __skb_get_hash: calculate a flow hash based on src/dst addresses
305 * and src/dst port numbers. Sets hash in skb to non-zero hash value
306 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
307 * if hash is a canonical 4-tuple hash over transport ports.
309 void __skb_get_hash(struct sk_buff
*skb
)
311 struct flow_keys keys
;
313 if (!skb_flow_dissect(skb
, &keys
))
321 skb
->hash
= __flow_hash_from_keys(&keys
);
323 EXPORT_SYMBOL(__skb_get_hash
);
326 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
327 * to be used as a distribution range.
329 u16
__skb_tx_hash(const struct net_device
*dev
, struct sk_buff
*skb
,
330 unsigned int num_tx_queues
)
334 u16 qcount
= num_tx_queues
;
336 if (skb_rx_queue_recorded(skb
)) {
337 hash
= skb_get_rx_queue(skb
);
338 while (unlikely(hash
>= num_tx_queues
))
339 hash
-= num_tx_queues
;
344 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
345 qoffset
= dev
->tc_to_txq
[tc
].offset
;
346 qcount
= dev
->tc_to_txq
[tc
].count
;
349 return (u16
) reciprocal_scale(skb_get_hash(skb
), qcount
) + qoffset
;
351 EXPORT_SYMBOL(__skb_tx_hash
);
353 u32
__skb_get_poff(const struct sk_buff
*skb
, void *data
,
354 const struct flow_keys
*keys
, int hlen
)
356 u32 poff
= keys
->thoff
;
358 switch (keys
->ip_proto
) {
360 /* access doff as u8 to avoid unaligned access */
364 doff
= __skb_header_pointer(skb
, poff
+ 12, sizeof(_doff
),
369 poff
+= max_t(u32
, sizeof(struct tcphdr
), (*doff
& 0xF0) >> 2);
373 case IPPROTO_UDPLITE
:
374 poff
+= sizeof(struct udphdr
);
376 /* For the rest, we do not really care about header
377 * extensions at this point for now.
380 poff
+= sizeof(struct icmphdr
);
383 poff
+= sizeof(struct icmp6hdr
);
386 poff
+= sizeof(struct igmphdr
);
389 poff
+= sizeof(struct dccp_hdr
);
392 poff
+= sizeof(struct sctphdr
);
399 /* skb_get_poff() returns the offset to the payload as far as it could
400 * be dissected. The main user is currently BPF, so that we can dynamically
401 * truncate packets without needing to push actual payload to the user
402 * space and can analyze headers only, instead.
404 u32
skb_get_poff(const struct sk_buff
*skb
)
406 struct flow_keys keys
;
408 if (!skb_flow_dissect(skb
, &keys
))
411 return __skb_get_poff(skb
, skb
->data
, &keys
, skb_headlen(skb
));
414 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
417 struct xps_dev_maps
*dev_maps
;
419 int queue_index
= -1;
422 dev_maps
= rcu_dereference(dev
->xps_maps
);
424 map
= rcu_dereference(
425 dev_maps
->cpu_map
[skb
->sender_cpu
- 1]);
428 queue_index
= map
->queues
[0];
430 queue_index
= map
->queues
[reciprocal_scale(skb_get_hash(skb
),
432 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
444 static u16
__netdev_pick_tx(struct net_device
*dev
, struct sk_buff
*skb
)
446 struct sock
*sk
= skb
->sk
;
447 int queue_index
= sk_tx_queue_get(sk
);
449 if (queue_index
< 0 || skb
->ooo_okay
||
450 queue_index
>= dev
->real_num_tx_queues
) {
451 int new_index
= get_xps_queue(dev
, skb
);
453 new_index
= skb_tx_hash(dev
, skb
);
455 if (queue_index
!= new_index
&& sk
&&
456 rcu_access_pointer(sk
->sk_dst_cache
))
457 sk_tx_queue_set(sk
, new_index
);
459 queue_index
= new_index
;
465 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
472 if (skb
->sender_cpu
== 0)
473 skb
->sender_cpu
= raw_smp_processor_id() + 1;
476 if (dev
->real_num_tx_queues
!= 1) {
477 const struct net_device_ops
*ops
= dev
->netdev_ops
;
478 if (ops
->ndo_select_queue
)
479 queue_index
= ops
->ndo_select_queue(dev
, skb
, accel_priv
,
482 queue_index
= __netdev_pick_tx(dev
, skb
);
485 queue_index
= netdev_cap_txqueue(dev
, queue_index
);
488 skb_set_queue_mapping(skb
, queue_index
);
489 return netdev_get_tx_queue(dev
, queue_index
);