1 #include <linux/kernel.h>
2 #include <linux/skbuff.h>
3 #include <linux/export.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
9 #include <linux/igmp.h>
10 #include <linux/icmp.h>
11 #include <linux/sctp.h>
12 #include <linux/dccp.h>
13 #include <linux/if_tunnel.h>
14 #include <linux/if_pppox.h>
15 #include <linux/ppp_defs.h>
16 #include <linux/stddef.h>
17 #include <linux/if_ether.h>
18 #include <linux/mpls.h>
19 #include <net/flow_dissector.h>
20 #include <scsi/fc/fc_fcoe.h>
22 static bool dissector_uses_key(const struct flow_dissector
*flow_dissector
,
23 enum flow_dissector_key_id key_id
)
25 return flow_dissector
->used_keys
& (1 << key_id
);
28 static void dissector_set_key(struct flow_dissector
*flow_dissector
,
29 enum flow_dissector_key_id key_id
)
31 flow_dissector
->used_keys
|= (1 << key_id
);
34 static void *skb_flow_dissector_target(struct flow_dissector
*flow_dissector
,
35 enum flow_dissector_key_id key_id
,
36 void *target_container
)
38 return ((char *) target_container
) + flow_dissector
->offset
[key_id
];
41 void skb_flow_dissector_init(struct flow_dissector
*flow_dissector
,
42 const struct flow_dissector_key
*key
,
43 unsigned int key_count
)
47 memset(flow_dissector
, 0, sizeof(*flow_dissector
));
49 for (i
= 0; i
< key_count
; i
++, key
++) {
50 /* User should make sure that every key target offset is withing
51 * boundaries of unsigned short.
53 BUG_ON(key
->offset
> USHRT_MAX
);
54 BUG_ON(dissector_uses_key(flow_dissector
,
57 dissector_set_key(flow_dissector
, key
->key_id
);
58 flow_dissector
->offset
[key
->key_id
] = key
->offset
;
61 /* Ensure that the dissector always includes control and basic key.
62 * That way we are able to avoid handling lack of these in fast path.
64 BUG_ON(!dissector_uses_key(flow_dissector
,
65 FLOW_DISSECTOR_KEY_CONTROL
));
66 BUG_ON(!dissector_uses_key(flow_dissector
,
67 FLOW_DISSECTOR_KEY_BASIC
));
69 EXPORT_SYMBOL(skb_flow_dissector_init
);
72 * __skb_flow_get_ports - extract the upper layer ports and return them
73 * @skb: sk_buff to extract the ports from
74 * @thoff: transport header offset
75 * @ip_proto: protocol for which to get port offset
76 * @data: raw buffer pointer to the packet, if NULL use skb->data
77 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
79 * The function will try to retrieve the ports at offset thoff + poff where poff
80 * is the protocol port offset returned from proto_ports_offset
82 __be32
__skb_flow_get_ports(const struct sk_buff
*skb
, int thoff
, u8 ip_proto
,
85 int poff
= proto_ports_offset(ip_proto
);
89 hlen
= skb_headlen(skb
);
93 __be32
*ports
, _ports
;
95 ports
= __skb_header_pointer(skb
, thoff
+ poff
,
96 sizeof(_ports
), data
, hlen
, &_ports
);
103 EXPORT_SYMBOL(__skb_flow_get_ports
);
106 * __skb_flow_dissect - extract the flow_keys struct and return it
107 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
108 * @flow_dissector: list of keys to dissect
109 * @target_container: target structure to put dissected values into
110 * @data: raw buffer pointer to the packet, if NULL use skb->data
111 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
112 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
113 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
115 * The function will try to retrieve individual keys into target specified
116 * by flow_dissector from either the skbuff or a raw buffer specified by the
119 * Caller must take care of zeroing target container memory.
121 bool __skb_flow_dissect(const struct sk_buff
*skb
,
122 struct flow_dissector
*flow_dissector
,
123 void *target_container
,
124 void *data
, __be16 proto
, int nhoff
, int hlen
,
127 struct flow_dissector_key_control
*key_control
;
128 struct flow_dissector_key_basic
*key_basic
;
129 struct flow_dissector_key_addrs
*key_addrs
;
130 struct flow_dissector_key_ports
*key_ports
;
131 struct flow_dissector_key_tags
*key_tags
;
132 struct flow_dissector_key_keyid
*key_keyid
;
138 proto
= skb
->protocol
;
139 nhoff
= skb_network_offset(skb
);
140 hlen
= skb_headlen(skb
);
143 /* It is ensured by skb_flow_dissector_init() that control key will
146 key_control
= skb_flow_dissector_target(flow_dissector
,
147 FLOW_DISSECTOR_KEY_CONTROL
,
150 /* It is ensured by skb_flow_dissector_init() that basic key will
153 key_basic
= skb_flow_dissector_target(flow_dissector
,
154 FLOW_DISSECTOR_KEY_BASIC
,
157 if (dissector_uses_key(flow_dissector
,
158 FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
159 struct ethhdr
*eth
= eth_hdr(skb
);
160 struct flow_dissector_key_eth_addrs
*key_eth_addrs
;
162 key_eth_addrs
= skb_flow_dissector_target(flow_dissector
,
163 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
165 memcpy(key_eth_addrs
, ð
->h_dest
, sizeof(*key_eth_addrs
));
170 case htons(ETH_P_IP
): {
171 const struct iphdr
*iph
;
174 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
175 if (!iph
|| iph
->ihl
< 5)
177 nhoff
+= iph
->ihl
* 4;
179 ip_proto
= iph
->protocol
;
181 if (!dissector_uses_key(flow_dissector
,
182 FLOW_DISSECTOR_KEY_IPV4_ADDRS
))
185 key_addrs
= skb_flow_dissector_target(flow_dissector
,
186 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, target_container
);
187 memcpy(&key_addrs
->v4addrs
, &iph
->saddr
,
188 sizeof(key_addrs
->v4addrs
));
189 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
191 if (ip_is_fragment(iph
)) {
192 key_control
->flags
|= FLOW_DIS_IS_FRAGMENT
;
194 if (iph
->frag_off
& htons(IP_OFFSET
)) {
197 key_control
->flags
|= FLOW_DIS_FIRST_FRAG
;
198 if (!(flags
& FLOW_DISSECTOR_F_PARSE_1ST_FRAG
))
203 if (flags
& FLOW_DISSECTOR_F_STOP_AT_L3
)
208 case htons(ETH_P_IPV6
): {
209 const struct ipv6hdr
*iph
;
213 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
217 ip_proto
= iph
->nexthdr
;
218 nhoff
+= sizeof(struct ipv6hdr
);
220 if (dissector_uses_key(flow_dissector
,
221 FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
222 struct flow_dissector_key_ipv6_addrs
*key_ipv6_addrs
;
224 key_ipv6_addrs
= skb_flow_dissector_target(flow_dissector
,
225 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
228 memcpy(key_ipv6_addrs
, &iph
->saddr
, sizeof(*key_ipv6_addrs
));
229 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
232 if ((dissector_uses_key(flow_dissector
,
233 FLOW_DISSECTOR_KEY_FLOW_LABEL
) ||
234 (flags
& FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
)) &&
235 ip6_flowlabel(iph
)) {
236 __be32 flow_label
= ip6_flowlabel(iph
);
238 if (dissector_uses_key(flow_dissector
,
239 FLOW_DISSECTOR_KEY_FLOW_LABEL
)) {
240 key_tags
= skb_flow_dissector_target(flow_dissector
,
241 FLOW_DISSECTOR_KEY_FLOW_LABEL
,
243 key_tags
->flow_label
= ntohl(flow_label
);
245 if (flags
& FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
)
249 if (flags
& FLOW_DISSECTOR_F_STOP_AT_L3
)
254 case htons(ETH_P_8021AD
):
255 case htons(ETH_P_8021Q
): {
256 const struct vlan_hdr
*vlan
;
257 struct vlan_hdr _vlan
;
259 vlan
= __skb_header_pointer(skb
, nhoff
, sizeof(_vlan
), data
, hlen
, &_vlan
);
263 if (dissector_uses_key(flow_dissector
,
264 FLOW_DISSECTOR_KEY_VLANID
)) {
265 key_tags
= skb_flow_dissector_target(flow_dissector
,
266 FLOW_DISSECTOR_KEY_VLANID
,
269 key_tags
->vlan_id
= skb_vlan_tag_get_id(skb
);
272 proto
= vlan
->h_vlan_encapsulated_proto
;
273 nhoff
+= sizeof(*vlan
);
276 case htons(ETH_P_PPP_SES
): {
278 struct pppoe_hdr hdr
;
281 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
285 nhoff
+= PPPOE_SES_HLEN
;
289 case htons(PPP_IPV6
):
295 case htons(ETH_P_TIPC
): {
300 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
304 if (dissector_uses_key(flow_dissector
,
305 FLOW_DISSECTOR_KEY_TIPC_ADDRS
)) {
306 key_addrs
= skb_flow_dissector_target(flow_dissector
,
307 FLOW_DISSECTOR_KEY_TIPC_ADDRS
,
309 key_addrs
->tipcaddrs
.srcnode
= hdr
->srcnode
;
310 key_control
->addr_type
= FLOW_DISSECTOR_KEY_TIPC_ADDRS
;
315 case htons(ETH_P_MPLS_UC
):
316 case htons(ETH_P_MPLS_MC
): {
317 struct mpls_label
*hdr
, _hdr
[2];
319 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
,
324 if ((ntohl(hdr
[0].entry
) & MPLS_LS_LABEL_MASK
) >>
325 MPLS_LS_LABEL_SHIFT
== MPLS_LABEL_ENTROPY
) {
326 if (dissector_uses_key(flow_dissector
,
327 FLOW_DISSECTOR_KEY_MPLS_ENTROPY
)) {
328 key_keyid
= skb_flow_dissector_target(flow_dissector
,
329 FLOW_DISSECTOR_KEY_MPLS_ENTROPY
,
331 key_keyid
->keyid
= hdr
[1].entry
&
332 htonl(MPLS_LS_LABEL_MASK
);
341 case htons(ETH_P_FCOE
):
342 key_control
->thoff
= (u16
)(nhoff
+ FCOE_HEADER_LEN
);
356 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
360 * Only look inside GRE if version zero and no
363 if (hdr
->flags
& (GRE_VERSION
| GRE_ROUTING
))
368 if (hdr
->flags
& GRE_CSUM
)
370 if (hdr
->flags
& GRE_KEY
) {
374 keyid
= __skb_header_pointer(skb
, nhoff
, sizeof(_keyid
),
375 data
, hlen
, &_keyid
);
380 if (dissector_uses_key(flow_dissector
,
381 FLOW_DISSECTOR_KEY_GRE_KEYID
)) {
382 key_keyid
= skb_flow_dissector_target(flow_dissector
,
383 FLOW_DISSECTOR_KEY_GRE_KEYID
,
385 key_keyid
->keyid
= *keyid
;
389 if (hdr
->flags
& GRE_SEQ
)
391 if (proto
== htons(ETH_P_TEB
)) {
392 const struct ethhdr
*eth
;
395 eth
= __skb_header_pointer(skb
, nhoff
,
400 proto
= eth
->h_proto
;
401 nhoff
+= sizeof(*eth
);
403 /* Cap headers that we access via pointers at the
404 * end of the Ethernet header as our maximum alignment
405 * at that point is only 2 bytes.
411 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
412 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
418 case NEXTHDR_ROUTING
:
420 u8 _opthdr
[2], *opthdr
;
422 if (proto
!= htons(ETH_P_IPV6
))
425 opthdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_opthdr
),
426 data
, hlen
, &_opthdr
);
430 ip_proto
= opthdr
[0];
431 nhoff
+= (opthdr
[1] + 1) << 3;
435 case NEXTHDR_FRAGMENT
: {
436 struct frag_hdr _fh
, *fh
;
438 if (proto
!= htons(ETH_P_IPV6
))
441 fh
= __skb_header_pointer(skb
, nhoff
, sizeof(_fh
),
447 key_control
->flags
|= FLOW_DIS_IS_FRAGMENT
;
449 nhoff
+= sizeof(_fh
);
451 if (!(fh
->frag_off
& htons(IP6_OFFSET
))) {
452 key_control
->flags
|= FLOW_DIS_FIRST_FRAG
;
453 if (flags
& FLOW_DISSECTOR_F_PARSE_1ST_FRAG
) {
454 ip_proto
= fh
->nexthdr
;
461 proto
= htons(ETH_P_IP
);
463 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
464 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
469 proto
= htons(ETH_P_IPV6
);
471 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
472 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
477 proto
= htons(ETH_P_MPLS_UC
);
483 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_PORTS
) &&
484 !(key_control
->flags
& FLOW_DIS_IS_FRAGMENT
)) {
485 key_ports
= skb_flow_dissector_target(flow_dissector
,
486 FLOW_DISSECTOR_KEY_PORTS
,
488 key_ports
->ports
= __skb_flow_get_ports(skb
, nhoff
, ip_proto
,
496 key_control
->thoff
= min_t(u16
, nhoff
, skb
? skb
->len
: hlen
);
497 key_basic
->n_proto
= proto
;
498 key_basic
->ip_proto
= ip_proto
;
506 EXPORT_SYMBOL(__skb_flow_dissect
);
508 static u32 hashrnd __read_mostly
;
509 static __always_inline
void __flow_hash_secret_init(void)
511 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
514 static __always_inline u32
__flow_hash_words(const u32
*words
, u32 length
,
517 return jhash2(words
, length
, keyval
);
520 static inline const u32
*flow_keys_hash_start(const struct flow_keys
*flow
)
522 const void *p
= flow
;
524 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET
% sizeof(u32
));
525 return (const u32
*)(p
+ FLOW_KEYS_HASH_OFFSET
);
528 static inline size_t flow_keys_hash_length(const struct flow_keys
*flow
)
530 size_t diff
= FLOW_KEYS_HASH_OFFSET
+ sizeof(flow
->addrs
);
531 BUILD_BUG_ON((sizeof(*flow
) - FLOW_KEYS_HASH_OFFSET
) % sizeof(u32
));
532 BUILD_BUG_ON(offsetof(typeof(*flow
), addrs
) !=
533 sizeof(*flow
) - sizeof(flow
->addrs
));
535 switch (flow
->control
.addr_type
) {
536 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
537 diff
-= sizeof(flow
->addrs
.v4addrs
);
539 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
540 diff
-= sizeof(flow
->addrs
.v6addrs
);
542 case FLOW_DISSECTOR_KEY_TIPC_ADDRS
:
543 diff
-= sizeof(flow
->addrs
.tipcaddrs
);
546 return (sizeof(*flow
) - diff
) / sizeof(u32
);
549 __be32
flow_get_u32_src(const struct flow_keys
*flow
)
551 switch (flow
->control
.addr_type
) {
552 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
553 return flow
->addrs
.v4addrs
.src
;
554 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
555 return (__force __be32
)ipv6_addr_hash(
556 &flow
->addrs
.v6addrs
.src
);
557 case FLOW_DISSECTOR_KEY_TIPC_ADDRS
:
558 return flow
->addrs
.tipcaddrs
.srcnode
;
563 EXPORT_SYMBOL(flow_get_u32_src
);
565 __be32
flow_get_u32_dst(const struct flow_keys
*flow
)
567 switch (flow
->control
.addr_type
) {
568 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
569 return flow
->addrs
.v4addrs
.dst
;
570 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
571 return (__force __be32
)ipv6_addr_hash(
572 &flow
->addrs
.v6addrs
.dst
);
577 EXPORT_SYMBOL(flow_get_u32_dst
);
579 static inline void __flow_hash_consistentify(struct flow_keys
*keys
)
583 switch (keys
->control
.addr_type
) {
584 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
585 addr_diff
= (__force u32
)keys
->addrs
.v4addrs
.dst
-
586 (__force u32
)keys
->addrs
.v4addrs
.src
;
587 if ((addr_diff
< 0) ||
589 ((__force u16
)keys
->ports
.dst
<
590 (__force u16
)keys
->ports
.src
))) {
591 swap(keys
->addrs
.v4addrs
.src
, keys
->addrs
.v4addrs
.dst
);
592 swap(keys
->ports
.src
, keys
->ports
.dst
);
595 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
596 addr_diff
= memcmp(&keys
->addrs
.v6addrs
.dst
,
597 &keys
->addrs
.v6addrs
.src
,
598 sizeof(keys
->addrs
.v6addrs
.dst
));
599 if ((addr_diff
< 0) ||
601 ((__force u16
)keys
->ports
.dst
<
602 (__force u16
)keys
->ports
.src
))) {
603 for (i
= 0; i
< 4; i
++)
604 swap(keys
->addrs
.v6addrs
.src
.s6_addr32
[i
],
605 keys
->addrs
.v6addrs
.dst
.s6_addr32
[i
]);
606 swap(keys
->ports
.src
, keys
->ports
.dst
);
612 static inline u32
__flow_hash_from_keys(struct flow_keys
*keys
, u32 keyval
)
616 __flow_hash_consistentify(keys
);
618 hash
= __flow_hash_words(flow_keys_hash_start(keys
),
619 flow_keys_hash_length(keys
), keyval
);
626 u32
flow_hash_from_keys(struct flow_keys
*keys
)
628 __flow_hash_secret_init();
629 return __flow_hash_from_keys(keys
, hashrnd
);
631 EXPORT_SYMBOL(flow_hash_from_keys
);
633 static inline u32
___skb_get_hash(const struct sk_buff
*skb
,
634 struct flow_keys
*keys
, u32 keyval
)
636 skb_flow_dissect_flow_keys(skb
, keys
,
637 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
);
639 return __flow_hash_from_keys(keys
, keyval
);
642 struct _flow_keys_digest_data
{
651 void make_flow_keys_digest(struct flow_keys_digest
*digest
,
652 const struct flow_keys
*flow
)
654 struct _flow_keys_digest_data
*data
=
655 (struct _flow_keys_digest_data
*)digest
;
657 BUILD_BUG_ON(sizeof(*data
) > sizeof(*digest
));
659 memset(digest
, 0, sizeof(*digest
));
661 data
->n_proto
= flow
->basic
.n_proto
;
662 data
->ip_proto
= flow
->basic
.ip_proto
;
663 data
->ports
= flow
->ports
.ports
;
664 data
->src
= flow
->addrs
.v4addrs
.src
;
665 data
->dst
= flow
->addrs
.v4addrs
.dst
;
667 EXPORT_SYMBOL(make_flow_keys_digest
);
669 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly
;
671 u32
__skb_get_hash_symmetric(struct sk_buff
*skb
)
673 struct flow_keys keys
;
675 __flow_hash_secret_init();
677 memset(&keys
, 0, sizeof(keys
));
678 __skb_flow_dissect(skb
, &flow_keys_dissector_symmetric
, &keys
,
680 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
);
682 return __flow_hash_from_keys(&keys
, hashrnd
);
684 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric
);
687 * __skb_get_hash: calculate a flow hash
688 * @skb: sk_buff to calculate flow hash from
690 * This function calculates a flow hash based on src/dst addresses
691 * and src/dst port numbers. Sets hash in skb to non-zero hash value
692 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
693 * if hash is a canonical 4-tuple hash over transport ports.
695 void __skb_get_hash(struct sk_buff
*skb
)
697 struct flow_keys keys
;
699 __flow_hash_secret_init();
701 __skb_set_sw_hash(skb
, ___skb_get_hash(skb
, &keys
, hashrnd
),
702 flow_keys_have_l4(&keys
));
704 EXPORT_SYMBOL(__skb_get_hash
);
706 __u32
skb_get_hash_perturb(const struct sk_buff
*skb
, u32 perturb
)
708 struct flow_keys keys
;
710 return ___skb_get_hash(skb
, &keys
, perturb
);
712 EXPORT_SYMBOL(skb_get_hash_perturb
);
714 __u32
__skb_get_hash_flowi6(struct sk_buff
*skb
, const struct flowi6
*fl6
)
716 struct flow_keys keys
;
718 memset(&keys
, 0, sizeof(keys
));
720 memcpy(&keys
.addrs
.v6addrs
.src
, &fl6
->saddr
,
721 sizeof(keys
.addrs
.v6addrs
.src
));
722 memcpy(&keys
.addrs
.v6addrs
.dst
, &fl6
->daddr
,
723 sizeof(keys
.addrs
.v6addrs
.dst
));
724 keys
.control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
725 keys
.ports
.src
= fl6
->fl6_sport
;
726 keys
.ports
.dst
= fl6
->fl6_dport
;
727 keys
.keyid
.keyid
= fl6
->fl6_gre_key
;
728 keys
.tags
.flow_label
= (__force u32
)fl6
->flowlabel
;
729 keys
.basic
.ip_proto
= fl6
->flowi6_proto
;
731 __skb_set_sw_hash(skb
, flow_hash_from_keys(&keys
),
732 flow_keys_have_l4(&keys
));
736 EXPORT_SYMBOL(__skb_get_hash_flowi6
);
738 __u32
__skb_get_hash_flowi4(struct sk_buff
*skb
, const struct flowi4
*fl4
)
740 struct flow_keys keys
;
742 memset(&keys
, 0, sizeof(keys
));
744 keys
.addrs
.v4addrs
.src
= fl4
->saddr
;
745 keys
.addrs
.v4addrs
.dst
= fl4
->daddr
;
746 keys
.control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
747 keys
.ports
.src
= fl4
->fl4_sport
;
748 keys
.ports
.dst
= fl4
->fl4_dport
;
749 keys
.keyid
.keyid
= fl4
->fl4_gre_key
;
750 keys
.basic
.ip_proto
= fl4
->flowi4_proto
;
752 __skb_set_sw_hash(skb
, flow_hash_from_keys(&keys
),
753 flow_keys_have_l4(&keys
));
757 EXPORT_SYMBOL(__skb_get_hash_flowi4
);
759 u32
__skb_get_poff(const struct sk_buff
*skb
, void *data
,
760 const struct flow_keys
*keys
, int hlen
)
762 u32 poff
= keys
->control
.thoff
;
764 switch (keys
->basic
.ip_proto
) {
766 /* access doff as u8 to avoid unaligned access */
770 doff
= __skb_header_pointer(skb
, poff
+ 12, sizeof(_doff
),
775 poff
+= max_t(u32
, sizeof(struct tcphdr
), (*doff
& 0xF0) >> 2);
779 case IPPROTO_UDPLITE
:
780 poff
+= sizeof(struct udphdr
);
782 /* For the rest, we do not really care about header
783 * extensions at this point for now.
786 poff
+= sizeof(struct icmphdr
);
789 poff
+= sizeof(struct icmp6hdr
);
792 poff
+= sizeof(struct igmphdr
);
795 poff
+= sizeof(struct dccp_hdr
);
798 poff
+= sizeof(struct sctphdr
);
806 * skb_get_poff - get the offset to the payload
807 * @skb: sk_buff to get the payload offset from
809 * The function will get the offset to the payload as far as it could
810 * be dissected. The main user is currently BPF, so that we can dynamically
811 * truncate packets without needing to push actual payload to the user
812 * space and can analyze headers only, instead.
814 u32
skb_get_poff(const struct sk_buff
*skb
)
816 struct flow_keys keys
;
818 if (!skb_flow_dissect_flow_keys(skb
, &keys
, 0))
821 return __skb_get_poff(skb
, skb
->data
, &keys
, skb_headlen(skb
));
824 __u32
__get_hash_from_flowi6(const struct flowi6
*fl6
, struct flow_keys
*keys
)
826 memset(keys
, 0, sizeof(*keys
));
828 memcpy(&keys
->addrs
.v6addrs
.src
, &fl6
->saddr
,
829 sizeof(keys
->addrs
.v6addrs
.src
));
830 memcpy(&keys
->addrs
.v6addrs
.dst
, &fl6
->daddr
,
831 sizeof(keys
->addrs
.v6addrs
.dst
));
832 keys
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
833 keys
->ports
.src
= fl6
->fl6_sport
;
834 keys
->ports
.dst
= fl6
->fl6_dport
;
835 keys
->keyid
.keyid
= fl6
->fl6_gre_key
;
836 keys
->tags
.flow_label
= (__force u32
)fl6
->flowlabel
;
837 keys
->basic
.ip_proto
= fl6
->flowi6_proto
;
839 return flow_hash_from_keys(keys
);
841 EXPORT_SYMBOL(__get_hash_from_flowi6
);
843 __u32
__get_hash_from_flowi4(const struct flowi4
*fl4
, struct flow_keys
*keys
)
845 memset(keys
, 0, sizeof(*keys
));
847 keys
->addrs
.v4addrs
.src
= fl4
->saddr
;
848 keys
->addrs
.v4addrs
.dst
= fl4
->daddr
;
849 keys
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
850 keys
->ports
.src
= fl4
->fl4_sport
;
851 keys
->ports
.dst
= fl4
->fl4_dport
;
852 keys
->keyid
.keyid
= fl4
->fl4_gre_key
;
853 keys
->basic
.ip_proto
= fl4
->flowi4_proto
;
855 return flow_hash_from_keys(keys
);
857 EXPORT_SYMBOL(__get_hash_from_flowi4
);
859 static const struct flow_dissector_key flow_keys_dissector_keys
[] = {
861 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
862 .offset
= offsetof(struct flow_keys
, control
),
865 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
866 .offset
= offsetof(struct flow_keys
, basic
),
869 .key_id
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
870 .offset
= offsetof(struct flow_keys
, addrs
.v4addrs
),
873 .key_id
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
874 .offset
= offsetof(struct flow_keys
, addrs
.v6addrs
),
877 .key_id
= FLOW_DISSECTOR_KEY_TIPC_ADDRS
,
878 .offset
= offsetof(struct flow_keys
, addrs
.tipcaddrs
),
881 .key_id
= FLOW_DISSECTOR_KEY_PORTS
,
882 .offset
= offsetof(struct flow_keys
, ports
),
885 .key_id
= FLOW_DISSECTOR_KEY_VLANID
,
886 .offset
= offsetof(struct flow_keys
, tags
),
889 .key_id
= FLOW_DISSECTOR_KEY_FLOW_LABEL
,
890 .offset
= offsetof(struct flow_keys
, tags
),
893 .key_id
= FLOW_DISSECTOR_KEY_GRE_KEYID
,
894 .offset
= offsetof(struct flow_keys
, keyid
),
898 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys
[] = {
900 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
901 .offset
= offsetof(struct flow_keys
, control
),
904 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
905 .offset
= offsetof(struct flow_keys
, basic
),
908 .key_id
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
909 .offset
= offsetof(struct flow_keys
, addrs
.v4addrs
),
912 .key_id
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
913 .offset
= offsetof(struct flow_keys
, addrs
.v6addrs
),
916 .key_id
= FLOW_DISSECTOR_KEY_PORTS
,
917 .offset
= offsetof(struct flow_keys
, ports
),
921 static const struct flow_dissector_key flow_keys_buf_dissector_keys
[] = {
923 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
924 .offset
= offsetof(struct flow_keys
, control
),
927 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
928 .offset
= offsetof(struct flow_keys
, basic
),
932 struct flow_dissector flow_keys_dissector __read_mostly
;
933 EXPORT_SYMBOL(flow_keys_dissector
);
935 struct flow_dissector flow_keys_buf_dissector __read_mostly
;
937 static int __init
init_default_flow_dissectors(void)
939 skb_flow_dissector_init(&flow_keys_dissector
,
940 flow_keys_dissector_keys
,
941 ARRAY_SIZE(flow_keys_dissector_keys
));
942 skb_flow_dissector_init(&flow_keys_dissector_symmetric
,
943 flow_keys_dissector_symmetric_keys
,
944 ARRAY_SIZE(flow_keys_dissector_symmetric_keys
));
945 skb_flow_dissector_init(&flow_keys_buf_dissector
,
946 flow_keys_buf_dissector_keys
,
947 ARRAY_SIZE(flow_keys_buf_dissector_keys
));
951 core_initcall(init_default_flow_dissectors
);