1 #include <linux/kernel.h>
2 #include <linux/skbuff.h>
3 #include <linux/export.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
9 #include <linux/igmp.h>
10 #include <linux/icmp.h>
11 #include <linux/sctp.h>
12 #include <linux/dccp.h>
13 #include <linux/if_tunnel.h>
14 #include <linux/if_pppox.h>
15 #include <linux/ppp_defs.h>
16 #include <linux/stddef.h>
17 #include <linux/if_ether.h>
18 #include <linux/mpls.h>
19 #include <net/flow_dissector.h>
20 #include <scsi/fc/fc_fcoe.h>
22 static bool dissector_uses_key(const struct flow_dissector
*flow_dissector
,
23 enum flow_dissector_key_id key_id
)
25 return flow_dissector
->used_keys
& (1 << key_id
);
28 static void dissector_set_key(struct flow_dissector
*flow_dissector
,
29 enum flow_dissector_key_id key_id
)
31 flow_dissector
->used_keys
|= (1 << key_id
);
34 static void *skb_flow_dissector_target(struct flow_dissector
*flow_dissector
,
35 enum flow_dissector_key_id key_id
,
36 void *target_container
)
38 return ((char *) target_container
) + flow_dissector
->offset
[key_id
];
41 void skb_flow_dissector_init(struct flow_dissector
*flow_dissector
,
42 const struct flow_dissector_key
*key
,
43 unsigned int key_count
)
47 memset(flow_dissector
, 0, sizeof(*flow_dissector
));
49 for (i
= 0; i
< key_count
; i
++, key
++) {
50 /* User should make sure that every key target offset is withing
51 * boundaries of unsigned short.
53 BUG_ON(key
->offset
> USHRT_MAX
);
54 BUG_ON(dissector_uses_key(flow_dissector
,
57 dissector_set_key(flow_dissector
, key
->key_id
);
58 flow_dissector
->offset
[key
->key_id
] = key
->offset
;
61 /* Ensure that the dissector always includes control and basic key.
62 * That way we are able to avoid handling lack of these in fast path.
64 BUG_ON(!dissector_uses_key(flow_dissector
,
65 FLOW_DISSECTOR_KEY_CONTROL
));
66 BUG_ON(!dissector_uses_key(flow_dissector
,
67 FLOW_DISSECTOR_KEY_BASIC
));
69 EXPORT_SYMBOL(skb_flow_dissector_init
);
72 * __skb_flow_get_ports - extract the upper layer ports and return them
73 * @skb: sk_buff to extract the ports from
74 * @thoff: transport header offset
75 * @ip_proto: protocol for which to get port offset
76 * @data: raw buffer pointer to the packet, if NULL use skb->data
77 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
79 * The function will try to retrieve the ports at offset thoff + poff where poff
80 * is the protocol port offset returned from proto_ports_offset
82 __be32
__skb_flow_get_ports(const struct sk_buff
*skb
, int thoff
, u8 ip_proto
,
85 int poff
= proto_ports_offset(ip_proto
);
89 hlen
= skb_headlen(skb
);
93 __be32
*ports
, _ports
;
95 ports
= __skb_header_pointer(skb
, thoff
+ poff
,
96 sizeof(_ports
), data
, hlen
, &_ports
);
103 EXPORT_SYMBOL(__skb_flow_get_ports
);
106 * __skb_flow_dissect - extract the flow_keys struct and return it
107 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
108 * @flow_dissector: list of keys to dissect
109 * @target_container: target structure to put dissected values into
110 * @data: raw buffer pointer to the packet, if NULL use skb->data
111 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
112 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
113 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
115 * The function will try to retrieve individual keys into target specified
116 * by flow_dissector from either the skbuff or a raw buffer specified by the
119 * Caller must take care of zeroing target container memory.
121 bool __skb_flow_dissect(const struct sk_buff
*skb
,
122 struct flow_dissector
*flow_dissector
,
123 void *target_container
,
124 void *data
, __be16 proto
, int nhoff
, int hlen
,
127 struct flow_dissector_key_control
*key_control
;
128 struct flow_dissector_key_basic
*key_basic
;
129 struct flow_dissector_key_addrs
*key_addrs
;
130 struct flow_dissector_key_ports
*key_ports
;
131 struct flow_dissector_key_tags
*key_tags
;
132 struct flow_dissector_key_keyid
*key_keyid
;
138 proto
= skb
->protocol
;
139 nhoff
= skb_network_offset(skb
);
140 hlen
= skb_headlen(skb
);
143 /* It is ensured by skb_flow_dissector_init() that control key will
146 key_control
= skb_flow_dissector_target(flow_dissector
,
147 FLOW_DISSECTOR_KEY_CONTROL
,
150 /* It is ensured by skb_flow_dissector_init() that basic key will
153 key_basic
= skb_flow_dissector_target(flow_dissector
,
154 FLOW_DISSECTOR_KEY_BASIC
,
157 if (dissector_uses_key(flow_dissector
,
158 FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
159 struct ethhdr
*eth
= eth_hdr(skb
);
160 struct flow_dissector_key_eth_addrs
*key_eth_addrs
;
162 key_eth_addrs
= skb_flow_dissector_target(flow_dissector
,
163 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
165 memcpy(key_eth_addrs
, ð
->h_dest
, sizeof(*key_eth_addrs
));
170 case htons(ETH_P_IP
): {
171 const struct iphdr
*iph
;
174 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
175 if (!iph
|| iph
->ihl
< 5)
177 nhoff
+= iph
->ihl
* 4;
179 ip_proto
= iph
->protocol
;
181 if (!dissector_uses_key(flow_dissector
,
182 FLOW_DISSECTOR_KEY_IPV4_ADDRS
))
185 key_addrs
= skb_flow_dissector_target(flow_dissector
,
186 FLOW_DISSECTOR_KEY_IPV4_ADDRS
, target_container
);
187 memcpy(&key_addrs
->v4addrs
, &iph
->saddr
,
188 sizeof(key_addrs
->v4addrs
));
189 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
191 if (ip_is_fragment(iph
)) {
192 key_control
->flags
|= FLOW_DIS_IS_FRAGMENT
;
194 if (iph
->frag_off
& htons(IP_OFFSET
)) {
197 key_control
->flags
|= FLOW_DIS_FIRST_FRAG
;
198 if (!(flags
& FLOW_DISSECTOR_F_PARSE_1ST_FRAG
))
203 if (flags
& FLOW_DISSECTOR_F_STOP_AT_L3
)
208 case htons(ETH_P_IPV6
): {
209 const struct ipv6hdr
*iph
;
214 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
218 ip_proto
= iph
->nexthdr
;
219 nhoff
+= sizeof(struct ipv6hdr
);
221 if (dissector_uses_key(flow_dissector
,
222 FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
223 struct flow_dissector_key_ipv6_addrs
*key_ipv6_addrs
;
225 key_ipv6_addrs
= skb_flow_dissector_target(flow_dissector
,
226 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
229 memcpy(key_ipv6_addrs
, &iph
->saddr
, sizeof(*key_ipv6_addrs
));
230 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
233 flow_label
= ip6_flowlabel(iph
);
235 if (dissector_uses_key(flow_dissector
,
236 FLOW_DISSECTOR_KEY_FLOW_LABEL
)) {
237 key_tags
= skb_flow_dissector_target(flow_dissector
,
238 FLOW_DISSECTOR_KEY_FLOW_LABEL
,
240 key_tags
->flow_label
= ntohl(flow_label
);
242 if (flags
& FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
)
246 if (flags
& FLOW_DISSECTOR_F_STOP_AT_L3
)
251 case htons(ETH_P_8021AD
):
252 case htons(ETH_P_8021Q
): {
253 const struct vlan_hdr
*vlan
;
254 struct vlan_hdr _vlan
;
256 vlan
= __skb_header_pointer(skb
, nhoff
, sizeof(_vlan
), data
, hlen
, &_vlan
);
260 if (dissector_uses_key(flow_dissector
,
261 FLOW_DISSECTOR_KEY_VLANID
)) {
262 key_tags
= skb_flow_dissector_target(flow_dissector
,
263 FLOW_DISSECTOR_KEY_VLANID
,
266 key_tags
->vlan_id
= skb_vlan_tag_get_id(skb
);
269 proto
= vlan
->h_vlan_encapsulated_proto
;
270 nhoff
+= sizeof(*vlan
);
273 case htons(ETH_P_PPP_SES
): {
275 struct pppoe_hdr hdr
;
278 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
282 nhoff
+= PPPOE_SES_HLEN
;
286 case htons(PPP_IPV6
):
292 case htons(ETH_P_TIPC
): {
297 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
301 if (dissector_uses_key(flow_dissector
,
302 FLOW_DISSECTOR_KEY_TIPC_ADDRS
)) {
303 key_addrs
= skb_flow_dissector_target(flow_dissector
,
304 FLOW_DISSECTOR_KEY_TIPC_ADDRS
,
306 key_addrs
->tipcaddrs
.srcnode
= hdr
->srcnode
;
307 key_control
->addr_type
= FLOW_DISSECTOR_KEY_TIPC_ADDRS
;
312 case htons(ETH_P_MPLS_UC
):
313 case htons(ETH_P_MPLS_MC
): {
314 struct mpls_label
*hdr
, _hdr
[2];
316 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
,
321 if ((ntohl(hdr
[0].entry
) & MPLS_LS_LABEL_MASK
) >>
322 MPLS_LS_LABEL_SHIFT
== MPLS_LABEL_ENTROPY
) {
323 if (dissector_uses_key(flow_dissector
,
324 FLOW_DISSECTOR_KEY_MPLS_ENTROPY
)) {
325 key_keyid
= skb_flow_dissector_target(flow_dissector
,
326 FLOW_DISSECTOR_KEY_MPLS_ENTROPY
,
328 key_keyid
->keyid
= hdr
[1].entry
&
329 htonl(MPLS_LS_LABEL_MASK
);
338 case htons(ETH_P_FCOE
):
339 key_control
->thoff
= (u16
)(nhoff
+ FCOE_HEADER_LEN
);
353 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
357 * Only look inside GRE if version zero and no
360 if (hdr
->flags
& (GRE_VERSION
| GRE_ROUTING
))
365 if (hdr
->flags
& GRE_CSUM
)
367 if (hdr
->flags
& GRE_KEY
) {
371 keyid
= __skb_header_pointer(skb
, nhoff
, sizeof(_keyid
),
372 data
, hlen
, &_keyid
);
377 if (dissector_uses_key(flow_dissector
,
378 FLOW_DISSECTOR_KEY_GRE_KEYID
)) {
379 key_keyid
= skb_flow_dissector_target(flow_dissector
,
380 FLOW_DISSECTOR_KEY_GRE_KEYID
,
382 key_keyid
->keyid
= *keyid
;
386 if (hdr
->flags
& GRE_SEQ
)
388 if (proto
== htons(ETH_P_TEB
)) {
389 const struct ethhdr
*eth
;
392 eth
= __skb_header_pointer(skb
, nhoff
,
397 proto
= eth
->h_proto
;
398 nhoff
+= sizeof(*eth
);
401 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
402 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
408 case NEXTHDR_ROUTING
:
410 u8 _opthdr
[2], *opthdr
;
412 if (proto
!= htons(ETH_P_IPV6
))
415 opthdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_opthdr
),
416 data
, hlen
, &_opthdr
);
420 ip_proto
= opthdr
[0];
421 nhoff
+= (opthdr
[1] + 1) << 3;
425 case NEXTHDR_FRAGMENT
: {
426 struct frag_hdr _fh
, *fh
;
428 if (proto
!= htons(ETH_P_IPV6
))
431 fh
= __skb_header_pointer(skb
, nhoff
, sizeof(_fh
),
437 key_control
->flags
|= FLOW_DIS_IS_FRAGMENT
;
439 nhoff
+= sizeof(_fh
);
441 if (!(fh
->frag_off
& htons(IP6_OFFSET
))) {
442 key_control
->flags
|= FLOW_DIS_FIRST_FRAG
;
443 if (flags
& FLOW_DISSECTOR_F_PARSE_1ST_FRAG
) {
444 ip_proto
= fh
->nexthdr
;
451 proto
= htons(ETH_P_IP
);
453 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
454 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
459 proto
= htons(ETH_P_IPV6
);
461 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
462 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
467 proto
= htons(ETH_P_MPLS_UC
);
473 if (dissector_uses_key(flow_dissector
,
474 FLOW_DISSECTOR_KEY_PORTS
)) {
475 key_ports
= skb_flow_dissector_target(flow_dissector
,
476 FLOW_DISSECTOR_KEY_PORTS
,
478 key_ports
->ports
= __skb_flow_get_ports(skb
, nhoff
, ip_proto
,
486 key_basic
->n_proto
= proto
;
487 key_basic
->ip_proto
= ip_proto
;
488 key_control
->thoff
= (u16
)nhoff
;
492 EXPORT_SYMBOL(__skb_flow_dissect
);
494 static u32 hashrnd __read_mostly
;
495 static __always_inline
void __flow_hash_secret_init(void)
497 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
500 static __always_inline u32
__flow_hash_words(const u32
*words
, u32 length
,
503 return jhash2(words
, length
, keyval
);
506 static inline const u32
*flow_keys_hash_start(const struct flow_keys
*flow
)
508 const void *p
= flow
;
510 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET
% sizeof(u32
));
511 return (const u32
*)(p
+ FLOW_KEYS_HASH_OFFSET
);
514 static inline size_t flow_keys_hash_length(const struct flow_keys
*flow
)
516 size_t diff
= FLOW_KEYS_HASH_OFFSET
+ sizeof(flow
->addrs
);
517 BUILD_BUG_ON((sizeof(*flow
) - FLOW_KEYS_HASH_OFFSET
) % sizeof(u32
));
518 BUILD_BUG_ON(offsetof(typeof(*flow
), addrs
) !=
519 sizeof(*flow
) - sizeof(flow
->addrs
));
521 switch (flow
->control
.addr_type
) {
522 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
523 diff
-= sizeof(flow
->addrs
.v4addrs
);
525 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
526 diff
-= sizeof(flow
->addrs
.v6addrs
);
528 case FLOW_DISSECTOR_KEY_TIPC_ADDRS
:
529 diff
-= sizeof(flow
->addrs
.tipcaddrs
);
532 return (sizeof(*flow
) - diff
) / sizeof(u32
);
535 __be32
flow_get_u32_src(const struct flow_keys
*flow
)
537 switch (flow
->control
.addr_type
) {
538 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
539 return flow
->addrs
.v4addrs
.src
;
540 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
541 return (__force __be32
)ipv6_addr_hash(
542 &flow
->addrs
.v6addrs
.src
);
543 case FLOW_DISSECTOR_KEY_TIPC_ADDRS
:
544 return flow
->addrs
.tipcaddrs
.srcnode
;
549 EXPORT_SYMBOL(flow_get_u32_src
);
551 __be32
flow_get_u32_dst(const struct flow_keys
*flow
)
553 switch (flow
->control
.addr_type
) {
554 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
555 return flow
->addrs
.v4addrs
.dst
;
556 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
557 return (__force __be32
)ipv6_addr_hash(
558 &flow
->addrs
.v6addrs
.dst
);
563 EXPORT_SYMBOL(flow_get_u32_dst
);
565 static inline void __flow_hash_consistentify(struct flow_keys
*keys
)
569 switch (keys
->control
.addr_type
) {
570 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
571 addr_diff
= (__force u32
)keys
->addrs
.v4addrs
.dst
-
572 (__force u32
)keys
->addrs
.v4addrs
.src
;
573 if ((addr_diff
< 0) ||
575 ((__force u16
)keys
->ports
.dst
<
576 (__force u16
)keys
->ports
.src
))) {
577 swap(keys
->addrs
.v4addrs
.src
, keys
->addrs
.v4addrs
.dst
);
578 swap(keys
->ports
.src
, keys
->ports
.dst
);
581 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
582 addr_diff
= memcmp(&keys
->addrs
.v6addrs
.dst
,
583 &keys
->addrs
.v6addrs
.src
,
584 sizeof(keys
->addrs
.v6addrs
.dst
));
585 if ((addr_diff
< 0) ||
587 ((__force u16
)keys
->ports
.dst
<
588 (__force u16
)keys
->ports
.src
))) {
589 for (i
= 0; i
< 4; i
++)
590 swap(keys
->addrs
.v6addrs
.src
.s6_addr32
[i
],
591 keys
->addrs
.v6addrs
.dst
.s6_addr32
[i
]);
592 swap(keys
->ports
.src
, keys
->ports
.dst
);
598 static inline u32
__flow_hash_from_keys(struct flow_keys
*keys
, u32 keyval
)
602 __flow_hash_consistentify(keys
);
604 hash
= __flow_hash_words(flow_keys_hash_start(keys
),
605 flow_keys_hash_length(keys
), keyval
);
612 u32
flow_hash_from_keys(struct flow_keys
*keys
)
614 __flow_hash_secret_init();
615 return __flow_hash_from_keys(keys
, hashrnd
);
617 EXPORT_SYMBOL(flow_hash_from_keys
);
619 static inline u32
___skb_get_hash(const struct sk_buff
*skb
,
620 struct flow_keys
*keys
, u32 keyval
)
622 skb_flow_dissect_flow_keys(skb
, keys
,
623 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
);
625 return __flow_hash_from_keys(keys
, keyval
);
628 struct _flow_keys_digest_data
{
637 void make_flow_keys_digest(struct flow_keys_digest
*digest
,
638 const struct flow_keys
*flow
)
640 struct _flow_keys_digest_data
*data
=
641 (struct _flow_keys_digest_data
*)digest
;
643 BUILD_BUG_ON(sizeof(*data
) > sizeof(*digest
));
645 memset(digest
, 0, sizeof(*digest
));
647 data
->n_proto
= flow
->basic
.n_proto
;
648 data
->ip_proto
= flow
->basic
.ip_proto
;
649 data
->ports
= flow
->ports
.ports
;
650 data
->src
= flow
->addrs
.v4addrs
.src
;
651 data
->dst
= flow
->addrs
.v4addrs
.dst
;
653 EXPORT_SYMBOL(make_flow_keys_digest
);
656 * __skb_get_hash: calculate a flow hash
657 * @skb: sk_buff to calculate flow hash from
659 * This function calculates a flow hash based on src/dst addresses
660 * and src/dst port numbers. Sets hash in skb to non-zero hash value
661 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
662 * if hash is a canonical 4-tuple hash over transport ports.
664 void __skb_get_hash(struct sk_buff
*skb
)
666 struct flow_keys keys
;
668 __flow_hash_secret_init();
670 __skb_set_sw_hash(skb
, ___skb_get_hash(skb
, &keys
, hashrnd
),
671 flow_keys_have_l4(&keys
));
673 EXPORT_SYMBOL(__skb_get_hash
);
675 __u32
skb_get_hash_perturb(const struct sk_buff
*skb
, u32 perturb
)
677 struct flow_keys keys
;
679 return ___skb_get_hash(skb
, &keys
, perturb
);
681 EXPORT_SYMBOL(skb_get_hash_perturb
);
683 __u32
__skb_get_hash_flowi6(struct sk_buff
*skb
, const struct flowi6
*fl6
)
685 struct flow_keys keys
;
687 memset(&keys
, 0, sizeof(keys
));
689 memcpy(&keys
.addrs
.v6addrs
.src
, &fl6
->saddr
,
690 sizeof(keys
.addrs
.v6addrs
.src
));
691 memcpy(&keys
.addrs
.v6addrs
.dst
, &fl6
->daddr
,
692 sizeof(keys
.addrs
.v6addrs
.dst
));
693 keys
.control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
694 keys
.ports
.src
= fl6
->fl6_sport
;
695 keys
.ports
.dst
= fl6
->fl6_dport
;
696 keys
.keyid
.keyid
= fl6
->fl6_gre_key
;
697 keys
.tags
.flow_label
= (__force u32
)fl6
->flowlabel
;
698 keys
.basic
.ip_proto
= fl6
->flowi6_proto
;
700 __skb_set_sw_hash(skb
, flow_hash_from_keys(&keys
),
701 flow_keys_have_l4(&keys
));
705 EXPORT_SYMBOL(__skb_get_hash_flowi6
);
707 __u32
__skb_get_hash_flowi4(struct sk_buff
*skb
, const struct flowi4
*fl4
)
709 struct flow_keys keys
;
711 memset(&keys
, 0, sizeof(keys
));
713 keys
.addrs
.v4addrs
.src
= fl4
->saddr
;
714 keys
.addrs
.v4addrs
.dst
= fl4
->daddr
;
715 keys
.control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
716 keys
.ports
.src
= fl4
->fl4_sport
;
717 keys
.ports
.dst
= fl4
->fl4_dport
;
718 keys
.keyid
.keyid
= fl4
->fl4_gre_key
;
719 keys
.basic
.ip_proto
= fl4
->flowi4_proto
;
721 __skb_set_sw_hash(skb
, flow_hash_from_keys(&keys
),
722 flow_keys_have_l4(&keys
));
726 EXPORT_SYMBOL(__skb_get_hash_flowi4
);
728 u32
__skb_get_poff(const struct sk_buff
*skb
, void *data
,
729 const struct flow_keys
*keys
, int hlen
)
731 u32 poff
= keys
->control
.thoff
;
733 switch (keys
->basic
.ip_proto
) {
735 /* access doff as u8 to avoid unaligned access */
739 doff
= __skb_header_pointer(skb
, poff
+ 12, sizeof(_doff
),
744 poff
+= max_t(u32
, sizeof(struct tcphdr
), (*doff
& 0xF0) >> 2);
748 case IPPROTO_UDPLITE
:
749 poff
+= sizeof(struct udphdr
);
751 /* For the rest, we do not really care about header
752 * extensions at this point for now.
755 poff
+= sizeof(struct icmphdr
);
758 poff
+= sizeof(struct icmp6hdr
);
761 poff
+= sizeof(struct igmphdr
);
764 poff
+= sizeof(struct dccp_hdr
);
767 poff
+= sizeof(struct sctphdr
);
775 * skb_get_poff - get the offset to the payload
776 * @skb: sk_buff to get the payload offset from
778 * The function will get the offset to the payload as far as it could
779 * be dissected. The main user is currently BPF, so that we can dynamically
780 * truncate packets without needing to push actual payload to the user
781 * space and can analyze headers only, instead.
783 u32
skb_get_poff(const struct sk_buff
*skb
)
785 struct flow_keys keys
;
787 if (!skb_flow_dissect_flow_keys(skb
, &keys
, 0))
790 return __skb_get_poff(skb
, skb
->data
, &keys
, skb_headlen(skb
));
793 __u32
__get_hash_from_flowi6(const struct flowi6
*fl6
, struct flow_keys
*keys
)
795 memset(keys
, 0, sizeof(*keys
));
797 memcpy(&keys
->addrs
.v6addrs
.src
, &fl6
->saddr
,
798 sizeof(keys
->addrs
.v6addrs
.src
));
799 memcpy(&keys
->addrs
.v6addrs
.dst
, &fl6
->daddr
,
800 sizeof(keys
->addrs
.v6addrs
.dst
));
801 keys
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
802 keys
->ports
.src
= fl6
->fl6_sport
;
803 keys
->ports
.dst
= fl6
->fl6_dport
;
804 keys
->keyid
.keyid
= fl6
->fl6_gre_key
;
805 keys
->tags
.flow_label
= (__force u32
)fl6
->flowlabel
;
806 keys
->basic
.ip_proto
= fl6
->flowi6_proto
;
808 return flow_hash_from_keys(keys
);
810 EXPORT_SYMBOL(__get_hash_from_flowi6
);
812 __u32
__get_hash_from_flowi4(const struct flowi4
*fl4
, struct flow_keys
*keys
)
814 memset(keys
, 0, sizeof(*keys
));
816 keys
->addrs
.v4addrs
.src
= fl4
->saddr
;
817 keys
->addrs
.v4addrs
.dst
= fl4
->daddr
;
818 keys
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
819 keys
->ports
.src
= fl4
->fl4_sport
;
820 keys
->ports
.dst
= fl4
->fl4_dport
;
821 keys
->keyid
.keyid
= fl4
->fl4_gre_key
;
822 keys
->basic
.ip_proto
= fl4
->flowi4_proto
;
824 return flow_hash_from_keys(keys
);
826 EXPORT_SYMBOL(__get_hash_from_flowi4
);
828 static const struct flow_dissector_key flow_keys_dissector_keys
[] = {
830 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
831 .offset
= offsetof(struct flow_keys
, control
),
834 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
835 .offset
= offsetof(struct flow_keys
, basic
),
838 .key_id
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
839 .offset
= offsetof(struct flow_keys
, addrs
.v4addrs
),
842 .key_id
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
843 .offset
= offsetof(struct flow_keys
, addrs
.v6addrs
),
846 .key_id
= FLOW_DISSECTOR_KEY_TIPC_ADDRS
,
847 .offset
= offsetof(struct flow_keys
, addrs
.tipcaddrs
),
850 .key_id
= FLOW_DISSECTOR_KEY_PORTS
,
851 .offset
= offsetof(struct flow_keys
, ports
),
854 .key_id
= FLOW_DISSECTOR_KEY_VLANID
,
855 .offset
= offsetof(struct flow_keys
, tags
),
858 .key_id
= FLOW_DISSECTOR_KEY_FLOW_LABEL
,
859 .offset
= offsetof(struct flow_keys
, tags
),
862 .key_id
= FLOW_DISSECTOR_KEY_GRE_KEYID
,
863 .offset
= offsetof(struct flow_keys
, keyid
),
867 static const struct flow_dissector_key flow_keys_buf_dissector_keys
[] = {
869 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
870 .offset
= offsetof(struct flow_keys
, control
),
873 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
874 .offset
= offsetof(struct flow_keys
, basic
),
878 struct flow_dissector flow_keys_dissector __read_mostly
;
879 EXPORT_SYMBOL(flow_keys_dissector
);
881 struct flow_dissector flow_keys_buf_dissector __read_mostly
;
883 static int __init
init_default_flow_dissectors(void)
885 skb_flow_dissector_init(&flow_keys_dissector
,
886 flow_keys_dissector_keys
,
887 ARRAY_SIZE(flow_keys_dissector_keys
));
888 skb_flow_dissector_init(&flow_keys_buf_dissector
,
889 flow_keys_buf_dissector_keys
,
890 ARRAY_SIZE(flow_keys_buf_dissector_keys
));
894 late_initcall_sync(init_default_flow_dissectors
);