1 #include <linux/kernel.h>
2 #include <linux/skbuff.h>
3 #include <linux/export.h>
5 #include <linux/ipv6.h>
6 #include <linux/if_vlan.h>
8 #include <net/dst_metadata.h>
14 #include <linux/igmp.h>
15 #include <linux/icmp.h>
16 #include <linux/sctp.h>
17 #include <linux/dccp.h>
18 #include <linux/if_tunnel.h>
19 #include <linux/if_pppox.h>
20 #include <linux/ppp_defs.h>
21 #include <linux/stddef.h>
22 #include <linux/if_ether.h>
23 #include <linux/mpls.h>
24 #include <linux/tcp.h>
25 #include <net/flow_dissector.h>
26 #include <scsi/fc/fc_fcoe.h>
27 #include <uapi/linux/batadv_packet.h>
28 #include <linux/bpf.h>
30 static DEFINE_MUTEX(flow_dissector_mutex
);
32 static void dissector_set_key(struct flow_dissector
*flow_dissector
,
33 enum flow_dissector_key_id key_id
)
35 flow_dissector
->used_keys
|= (1 << key_id
);
38 void skb_flow_dissector_init(struct flow_dissector
*flow_dissector
,
39 const struct flow_dissector_key
*key
,
40 unsigned int key_count
)
44 memset(flow_dissector
, 0, sizeof(*flow_dissector
));
46 for (i
= 0; i
< key_count
; i
++, key
++) {
47 /* User should make sure that every key target offset is withing
48 * boundaries of unsigned short.
50 BUG_ON(key
->offset
> USHRT_MAX
);
51 BUG_ON(dissector_uses_key(flow_dissector
,
54 dissector_set_key(flow_dissector
, key
->key_id
);
55 flow_dissector
->offset
[key
->key_id
] = key
->offset
;
58 /* Ensure that the dissector always includes control and basic key.
59 * That way we are able to avoid handling lack of these in fast path.
61 BUG_ON(!dissector_uses_key(flow_dissector
,
62 FLOW_DISSECTOR_KEY_CONTROL
));
63 BUG_ON(!dissector_uses_key(flow_dissector
,
64 FLOW_DISSECTOR_KEY_BASIC
));
66 EXPORT_SYMBOL(skb_flow_dissector_init
);
68 int skb_flow_dissector_bpf_prog_attach(const union bpf_attr
*attr
,
69 struct bpf_prog
*prog
)
71 struct bpf_prog
*attached
;
74 net
= current
->nsproxy
->net_ns
;
75 mutex_lock(&flow_dissector_mutex
);
76 attached
= rcu_dereference_protected(net
->flow_dissector_prog
,
77 lockdep_is_held(&flow_dissector_mutex
));
79 /* Only one BPF program can be attached at a time */
80 mutex_unlock(&flow_dissector_mutex
);
83 rcu_assign_pointer(net
->flow_dissector_prog
, prog
);
84 mutex_unlock(&flow_dissector_mutex
);
88 int skb_flow_dissector_bpf_prog_detach(const union bpf_attr
*attr
)
90 struct bpf_prog
*attached
;
93 net
= current
->nsproxy
->net_ns
;
94 mutex_lock(&flow_dissector_mutex
);
95 attached
= rcu_dereference_protected(net
->flow_dissector_prog
,
96 lockdep_is_held(&flow_dissector_mutex
));
98 mutex_unlock(&flow_dissector_mutex
);
101 bpf_prog_put(attached
);
102 RCU_INIT_POINTER(net
->flow_dissector_prog
, NULL
);
103 mutex_unlock(&flow_dissector_mutex
);
107 * skb_flow_get_be16 - extract be16 entity
108 * @skb: sk_buff to extract from
109 * @poff: offset to extract at
110 * @data: raw buffer pointer to the packet
111 * @hlen: packet header length
113 * The function will try to retrieve a be32 entity at
116 static __be16
skb_flow_get_be16(const struct sk_buff
*skb
, int poff
,
117 void *data
, int hlen
)
121 u
= __skb_header_pointer(skb
, poff
, sizeof(_u
), data
, hlen
, &_u
);
129 * __skb_flow_get_ports - extract the upper layer ports and return them
130 * @skb: sk_buff to extract the ports from
131 * @thoff: transport header offset
132 * @ip_proto: protocol for which to get port offset
133 * @data: raw buffer pointer to the packet, if NULL use skb->data
134 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
136 * The function will try to retrieve the ports at offset thoff + poff where poff
137 * is the protocol port offset returned from proto_ports_offset
139 __be32
__skb_flow_get_ports(const struct sk_buff
*skb
, int thoff
, u8 ip_proto
,
140 void *data
, int hlen
)
142 int poff
= proto_ports_offset(ip_proto
);
146 hlen
= skb_headlen(skb
);
150 __be32
*ports
, _ports
;
152 ports
= __skb_header_pointer(skb
, thoff
+ poff
,
153 sizeof(_ports
), data
, hlen
, &_ports
);
160 EXPORT_SYMBOL(__skb_flow_get_ports
);
163 skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type
,
164 struct flow_dissector
*flow_dissector
,
165 void *target_container
)
167 struct flow_dissector_key_control
*ctrl
;
169 if (!dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_ENC_CONTROL
))
172 ctrl
= skb_flow_dissector_target(flow_dissector
,
173 FLOW_DISSECTOR_KEY_ENC_CONTROL
,
175 ctrl
->addr_type
= type
;
179 skb_flow_dissect_tunnel_info(const struct sk_buff
*skb
,
180 struct flow_dissector
*flow_dissector
,
181 void *target_container
)
183 struct ip_tunnel_info
*info
;
184 struct ip_tunnel_key
*key
;
186 /* A quick check to see if there might be something to do. */
187 if (!dissector_uses_key(flow_dissector
,
188 FLOW_DISSECTOR_KEY_ENC_KEYID
) &&
189 !dissector_uses_key(flow_dissector
,
190 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
) &&
191 !dissector_uses_key(flow_dissector
,
192 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
) &&
193 !dissector_uses_key(flow_dissector
,
194 FLOW_DISSECTOR_KEY_ENC_CONTROL
) &&
195 !dissector_uses_key(flow_dissector
,
196 FLOW_DISSECTOR_KEY_ENC_PORTS
) &&
197 !dissector_uses_key(flow_dissector
,
198 FLOW_DISSECTOR_KEY_ENC_IP
) &&
199 !dissector_uses_key(flow_dissector
,
200 FLOW_DISSECTOR_KEY_ENC_OPTS
))
203 info
= skb_tunnel_info(skb
);
209 switch (ip_tunnel_info_af(info
)) {
211 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
214 if (dissector_uses_key(flow_dissector
,
215 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
)) {
216 struct flow_dissector_key_ipv4_addrs
*ipv4
;
218 ipv4
= skb_flow_dissector_target(flow_dissector
,
219 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS
,
221 ipv4
->src
= key
->u
.ipv4
.src
;
222 ipv4
->dst
= key
->u
.ipv4
.dst
;
226 skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
229 if (dissector_uses_key(flow_dissector
,
230 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
)) {
231 struct flow_dissector_key_ipv6_addrs
*ipv6
;
233 ipv6
= skb_flow_dissector_target(flow_dissector
,
234 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS
,
236 ipv6
->src
= key
->u
.ipv6
.src
;
237 ipv6
->dst
= key
->u
.ipv6
.dst
;
242 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
243 struct flow_dissector_key_keyid
*keyid
;
245 keyid
= skb_flow_dissector_target(flow_dissector
,
246 FLOW_DISSECTOR_KEY_ENC_KEYID
,
248 keyid
->keyid
= tunnel_id_to_key32(key
->tun_id
);
251 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_ENC_PORTS
)) {
252 struct flow_dissector_key_ports
*tp
;
254 tp
= skb_flow_dissector_target(flow_dissector
,
255 FLOW_DISSECTOR_KEY_ENC_PORTS
,
257 tp
->src
= key
->tp_src
;
258 tp
->dst
= key
->tp_dst
;
261 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_ENC_IP
)) {
262 struct flow_dissector_key_ip
*ip
;
264 ip
= skb_flow_dissector_target(flow_dissector
,
265 FLOW_DISSECTOR_KEY_ENC_IP
,
271 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_ENC_OPTS
)) {
272 struct flow_dissector_key_enc_opts
*enc_opt
;
274 enc_opt
= skb_flow_dissector_target(flow_dissector
,
275 FLOW_DISSECTOR_KEY_ENC_OPTS
,
278 if (info
->options_len
) {
279 enc_opt
->len
= info
->options_len
;
280 ip_tunnel_info_opts_get(enc_opt
->data
, info
);
281 enc_opt
->dst_opt_type
= info
->key
.tun_flags
&
282 TUNNEL_OPTIONS_PRESENT
;
286 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info
);
288 static enum flow_dissect_ret
289 __skb_flow_dissect_mpls(const struct sk_buff
*skb
,
290 struct flow_dissector
*flow_dissector
,
291 void *target_container
, void *data
, int nhoff
, int hlen
)
293 struct flow_dissector_key_keyid
*key_keyid
;
294 struct mpls_label
*hdr
, _hdr
[2];
297 if (!dissector_uses_key(flow_dissector
,
298 FLOW_DISSECTOR_KEY_MPLS_ENTROPY
) &&
299 !dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_MPLS
))
300 return FLOW_DISSECT_RET_OUT_GOOD
;
302 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
,
305 return FLOW_DISSECT_RET_OUT_BAD
;
307 entry
= ntohl(hdr
[0].entry
);
308 label
= (entry
& MPLS_LS_LABEL_MASK
) >> MPLS_LS_LABEL_SHIFT
;
310 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_MPLS
)) {
311 struct flow_dissector_key_mpls
*key_mpls
;
313 key_mpls
= skb_flow_dissector_target(flow_dissector
,
314 FLOW_DISSECTOR_KEY_MPLS
,
316 key_mpls
->mpls_label
= label
;
317 key_mpls
->mpls_ttl
= (entry
& MPLS_LS_TTL_MASK
)
318 >> MPLS_LS_TTL_SHIFT
;
319 key_mpls
->mpls_tc
= (entry
& MPLS_LS_TC_MASK
)
321 key_mpls
->mpls_bos
= (entry
& MPLS_LS_S_MASK
)
325 if (label
== MPLS_LABEL_ENTROPY
) {
326 key_keyid
= skb_flow_dissector_target(flow_dissector
,
327 FLOW_DISSECTOR_KEY_MPLS_ENTROPY
,
329 key_keyid
->keyid
= hdr
[1].entry
& htonl(MPLS_LS_LABEL_MASK
);
331 return FLOW_DISSECT_RET_OUT_GOOD
;
334 static enum flow_dissect_ret
335 __skb_flow_dissect_arp(const struct sk_buff
*skb
,
336 struct flow_dissector
*flow_dissector
,
337 void *target_container
, void *data
, int nhoff
, int hlen
)
339 struct flow_dissector_key_arp
*key_arp
;
341 unsigned char ar_sha
[ETH_ALEN
];
342 unsigned char ar_sip
[4];
343 unsigned char ar_tha
[ETH_ALEN
];
344 unsigned char ar_tip
[4];
345 } *arp_eth
, _arp_eth
;
346 const struct arphdr
*arp
;
349 if (!dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_ARP
))
350 return FLOW_DISSECT_RET_OUT_GOOD
;
352 arp
= __skb_header_pointer(skb
, nhoff
, sizeof(_arp
), data
,
355 return FLOW_DISSECT_RET_OUT_BAD
;
357 if (arp
->ar_hrd
!= htons(ARPHRD_ETHER
) ||
358 arp
->ar_pro
!= htons(ETH_P_IP
) ||
359 arp
->ar_hln
!= ETH_ALEN
||
361 (arp
->ar_op
!= htons(ARPOP_REPLY
) &&
362 arp
->ar_op
!= htons(ARPOP_REQUEST
)))
363 return FLOW_DISSECT_RET_OUT_BAD
;
365 arp_eth
= __skb_header_pointer(skb
, nhoff
+ sizeof(_arp
),
366 sizeof(_arp_eth
), data
,
369 return FLOW_DISSECT_RET_OUT_BAD
;
371 key_arp
= skb_flow_dissector_target(flow_dissector
,
372 FLOW_DISSECTOR_KEY_ARP
,
375 memcpy(&key_arp
->sip
, arp_eth
->ar_sip
, sizeof(key_arp
->sip
));
376 memcpy(&key_arp
->tip
, arp_eth
->ar_tip
, sizeof(key_arp
->tip
));
378 /* Only store the lower byte of the opcode;
379 * this covers ARPOP_REPLY and ARPOP_REQUEST.
381 key_arp
->op
= ntohs(arp
->ar_op
) & 0xff;
383 ether_addr_copy(key_arp
->sha
, arp_eth
->ar_sha
);
384 ether_addr_copy(key_arp
->tha
, arp_eth
->ar_tha
);
386 return FLOW_DISSECT_RET_OUT_GOOD
;
389 static enum flow_dissect_ret
390 __skb_flow_dissect_gre(const struct sk_buff
*skb
,
391 struct flow_dissector_key_control
*key_control
,
392 struct flow_dissector
*flow_dissector
,
393 void *target_container
, void *data
,
394 __be16
*p_proto
, int *p_nhoff
, int *p_hlen
,
397 struct flow_dissector_key_keyid
*key_keyid
;
398 struct gre_base_hdr
*hdr
, _hdr
;
402 hdr
= __skb_header_pointer(skb
, *p_nhoff
, sizeof(_hdr
),
403 data
, *p_hlen
, &_hdr
);
405 return FLOW_DISSECT_RET_OUT_BAD
;
407 /* Only look inside GRE without routing */
408 if (hdr
->flags
& GRE_ROUTING
)
409 return FLOW_DISSECT_RET_OUT_GOOD
;
411 /* Only look inside GRE for version 0 and 1 */
412 gre_ver
= ntohs(hdr
->flags
& GRE_VERSION
);
414 return FLOW_DISSECT_RET_OUT_GOOD
;
416 *p_proto
= hdr
->protocol
;
418 /* Version1 must be PPTP, and check the flags */
419 if (!(*p_proto
== GRE_PROTO_PPP
&& (hdr
->flags
& GRE_KEY
)))
420 return FLOW_DISSECT_RET_OUT_GOOD
;
423 offset
+= sizeof(struct gre_base_hdr
);
425 if (hdr
->flags
& GRE_CSUM
)
426 offset
+= FIELD_SIZEOF(struct gre_full_hdr
, csum
) +
427 FIELD_SIZEOF(struct gre_full_hdr
, reserved1
);
429 if (hdr
->flags
& GRE_KEY
) {
433 keyid
= __skb_header_pointer(skb
, *p_nhoff
+ offset
,
435 data
, *p_hlen
, &_keyid
);
437 return FLOW_DISSECT_RET_OUT_BAD
;
439 if (dissector_uses_key(flow_dissector
,
440 FLOW_DISSECTOR_KEY_GRE_KEYID
)) {
441 key_keyid
= skb_flow_dissector_target(flow_dissector
,
442 FLOW_DISSECTOR_KEY_GRE_KEYID
,
445 key_keyid
->keyid
= *keyid
;
447 key_keyid
->keyid
= *keyid
& GRE_PPTP_KEY_MASK
;
449 offset
+= FIELD_SIZEOF(struct gre_full_hdr
, key
);
452 if (hdr
->flags
& GRE_SEQ
)
453 offset
+= FIELD_SIZEOF(struct pptp_gre_header
, seq
);
456 if (*p_proto
== htons(ETH_P_TEB
)) {
457 const struct ethhdr
*eth
;
460 eth
= __skb_header_pointer(skb
, *p_nhoff
+ offset
,
462 data
, *p_hlen
, &_eth
);
464 return FLOW_DISSECT_RET_OUT_BAD
;
465 *p_proto
= eth
->h_proto
;
466 offset
+= sizeof(*eth
);
468 /* Cap headers that we access via pointers at the
469 * end of the Ethernet header as our maximum alignment
470 * at that point is only 2 bytes.
473 *p_hlen
= *p_nhoff
+ offset
;
475 } else { /* version 1, must be PPTP */
476 u8 _ppp_hdr
[PPP_HDRLEN
];
479 if (hdr
->flags
& GRE_ACK
)
480 offset
+= FIELD_SIZEOF(struct pptp_gre_header
, ack
);
482 ppp_hdr
= __skb_header_pointer(skb
, *p_nhoff
+ offset
,
484 data
, *p_hlen
, _ppp_hdr
);
486 return FLOW_DISSECT_RET_OUT_BAD
;
488 switch (PPP_PROTOCOL(ppp_hdr
)) {
490 *p_proto
= htons(ETH_P_IP
);
493 *p_proto
= htons(ETH_P_IPV6
);
496 /* Could probably catch some more like MPLS */
500 offset
+= PPP_HDRLEN
;
504 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
505 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
506 return FLOW_DISSECT_RET_OUT_GOOD
;
508 return FLOW_DISSECT_RET_PROTO_AGAIN
;
512 * __skb_flow_dissect_batadv() - dissect batman-adv header
513 * @skb: sk_buff to with the batman-adv header
514 * @key_control: flow dissectors control key
515 * @data: raw buffer pointer to the packet, if NULL use skb->data
516 * @p_proto: pointer used to update the protocol to process next
517 * @p_nhoff: pointer used to update inner network header offset
518 * @hlen: packet header length
519 * @flags: any combination of FLOW_DISSECTOR_F_*
521 * ETH_P_BATMAN packets are tried to be dissected. Only
522 * &struct batadv_unicast packets are actually processed because they contain an
523 * inner ethernet header and are usually followed by actual network header. This
524 * allows the flow dissector to continue processing the packet.
526 * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
527 * FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
528 * otherwise FLOW_DISSECT_RET_OUT_BAD
530 static enum flow_dissect_ret
531 __skb_flow_dissect_batadv(const struct sk_buff
*skb
,
532 struct flow_dissector_key_control
*key_control
,
533 void *data
, __be16
*p_proto
, int *p_nhoff
, int hlen
,
537 struct batadv_unicast_packet batadv_unicast
;
541 hdr
= __skb_header_pointer(skb
, *p_nhoff
, sizeof(_hdr
), data
, hlen
,
544 return FLOW_DISSECT_RET_OUT_BAD
;
546 if (hdr
->batadv_unicast
.version
!= BATADV_COMPAT_VERSION
)
547 return FLOW_DISSECT_RET_OUT_BAD
;
549 if (hdr
->batadv_unicast
.packet_type
!= BATADV_UNICAST
)
550 return FLOW_DISSECT_RET_OUT_BAD
;
552 *p_proto
= hdr
->eth
.h_proto
;
553 *p_nhoff
+= sizeof(*hdr
);
555 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
556 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
)
557 return FLOW_DISSECT_RET_OUT_GOOD
;
559 return FLOW_DISSECT_RET_PROTO_AGAIN
;
563 __skb_flow_dissect_tcp(const struct sk_buff
*skb
,
564 struct flow_dissector
*flow_dissector
,
565 void *target_container
, void *data
, int thoff
, int hlen
)
567 struct flow_dissector_key_tcp
*key_tcp
;
568 struct tcphdr
*th
, _th
;
570 if (!dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_TCP
))
573 th
= __skb_header_pointer(skb
, thoff
, sizeof(_th
), data
, hlen
, &_th
);
577 if (unlikely(__tcp_hdrlen(th
) < sizeof(_th
)))
580 key_tcp
= skb_flow_dissector_target(flow_dissector
,
581 FLOW_DISSECTOR_KEY_TCP
,
583 key_tcp
->flags
= (*(__be16
*) &tcp_flag_word(th
) & htons(0x0FFF));
587 __skb_flow_dissect_ipv4(const struct sk_buff
*skb
,
588 struct flow_dissector
*flow_dissector
,
589 void *target_container
, void *data
, const struct iphdr
*iph
)
591 struct flow_dissector_key_ip
*key_ip
;
593 if (!dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_IP
))
596 key_ip
= skb_flow_dissector_target(flow_dissector
,
597 FLOW_DISSECTOR_KEY_IP
,
599 key_ip
->tos
= iph
->tos
;
600 key_ip
->ttl
= iph
->ttl
;
604 __skb_flow_dissect_ipv6(const struct sk_buff
*skb
,
605 struct flow_dissector
*flow_dissector
,
606 void *target_container
, void *data
, const struct ipv6hdr
*iph
)
608 struct flow_dissector_key_ip
*key_ip
;
610 if (!dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_IP
))
613 key_ip
= skb_flow_dissector_target(flow_dissector
,
614 FLOW_DISSECTOR_KEY_IP
,
616 key_ip
->tos
= ipv6_get_dsfield(iph
);
617 key_ip
->ttl
= iph
->hop_limit
;
620 /* Maximum number of protocol headers that can be parsed in
623 #define MAX_FLOW_DISSECT_HDRS 15
625 static bool skb_flow_dissect_allowed(int *num_hdrs
)
629 return (*num_hdrs
<= MAX_FLOW_DISSECT_HDRS
);
632 static void __skb_flow_bpf_to_target(const struct bpf_flow_keys
*flow_keys
,
633 struct flow_dissector
*flow_dissector
,
634 void *target_container
)
636 struct flow_dissector_key_control
*key_control
;
637 struct flow_dissector_key_basic
*key_basic
;
638 struct flow_dissector_key_addrs
*key_addrs
;
639 struct flow_dissector_key_ports
*key_ports
;
641 key_control
= skb_flow_dissector_target(flow_dissector
,
642 FLOW_DISSECTOR_KEY_CONTROL
,
644 key_control
->thoff
= flow_keys
->thoff
;
645 if (flow_keys
->is_frag
)
646 key_control
->flags
|= FLOW_DIS_IS_FRAGMENT
;
647 if (flow_keys
->is_first_frag
)
648 key_control
->flags
|= FLOW_DIS_FIRST_FRAG
;
649 if (flow_keys
->is_encap
)
650 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
652 key_basic
= skb_flow_dissector_target(flow_dissector
,
653 FLOW_DISSECTOR_KEY_BASIC
,
655 key_basic
->n_proto
= flow_keys
->n_proto
;
656 key_basic
->ip_proto
= flow_keys
->ip_proto
;
658 if (flow_keys
->addr_proto
== ETH_P_IP
&&
659 dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
)) {
660 key_addrs
= skb_flow_dissector_target(flow_dissector
,
661 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
663 key_addrs
->v4addrs
.src
= flow_keys
->ipv4_src
;
664 key_addrs
->v4addrs
.dst
= flow_keys
->ipv4_dst
;
665 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
666 } else if (flow_keys
->addr_proto
== ETH_P_IPV6
&&
667 dissector_uses_key(flow_dissector
,
668 FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
669 key_addrs
= skb_flow_dissector_target(flow_dissector
,
670 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
672 memcpy(&key_addrs
->v6addrs
, &flow_keys
->ipv6_src
,
673 sizeof(key_addrs
->v6addrs
));
674 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
677 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
678 key_ports
= skb_flow_dissector_target(flow_dissector
,
679 FLOW_DISSECTOR_KEY_PORTS
,
681 key_ports
->src
= flow_keys
->sport
;
682 key_ports
->dst
= flow_keys
->dport
;
686 bool __skb_flow_bpf_dissect(struct bpf_prog
*prog
,
687 const struct sk_buff
*skb
,
688 struct flow_dissector
*flow_dissector
,
689 struct bpf_flow_keys
*flow_keys
)
691 struct bpf_skb_data_end cb_saved
;
692 struct bpf_skb_data_end
*cb
;
695 /* Note that even though the const qualifier is discarded
696 * throughout the execution of the BPF program, all changes(the
697 * control block) are reverted after the BPF program returns.
698 * Therefore, __skb_flow_dissect does not alter the skb.
701 cb
= (struct bpf_skb_data_end
*)skb
->cb
;
703 /* Save Control Block */
704 memcpy(&cb_saved
, cb
, sizeof(cb_saved
));
705 memset(cb
, 0, sizeof(*cb
));
707 /* Pass parameters to the BPF program */
708 memset(flow_keys
, 0, sizeof(*flow_keys
));
709 cb
->qdisc_cb
.flow_keys
= flow_keys
;
710 flow_keys
->nhoff
= skb_network_offset(skb
);
711 flow_keys
->thoff
= flow_keys
->nhoff
;
713 bpf_compute_data_pointers((struct sk_buff
*)skb
);
714 result
= BPF_PROG_RUN(prog
, skb
);
717 memcpy(cb
, &cb_saved
, sizeof(cb_saved
));
719 flow_keys
->nhoff
= clamp_t(u16
, flow_keys
->nhoff
, 0, skb
->len
);
720 flow_keys
->thoff
= clamp_t(u16
, flow_keys
->thoff
,
721 flow_keys
->nhoff
, skb
->len
);
723 return result
== BPF_OK
;
727 * __skb_flow_dissect - extract the flow_keys struct and return it
728 * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
729 * @flow_dissector: list of keys to dissect
730 * @target_container: target structure to put dissected values into
731 * @data: raw buffer pointer to the packet, if NULL use skb->data
732 * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
733 * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
734 * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
736 * The function will try to retrieve individual keys into target specified
737 * by flow_dissector from either the skbuff or a raw buffer specified by the
740 * Caller must take care of zeroing target container memory.
742 bool __skb_flow_dissect(const struct sk_buff
*skb
,
743 struct flow_dissector
*flow_dissector
,
744 void *target_container
,
745 void *data
, __be16 proto
, int nhoff
, int hlen
,
748 struct flow_dissector_key_control
*key_control
;
749 struct flow_dissector_key_basic
*key_basic
;
750 struct flow_dissector_key_addrs
*key_addrs
;
751 struct flow_dissector_key_ports
*key_ports
;
752 struct flow_dissector_key_icmp
*key_icmp
;
753 struct flow_dissector_key_tags
*key_tags
;
754 struct flow_dissector_key_vlan
*key_vlan
;
755 enum flow_dissect_ret fdret
;
756 enum flow_dissector_key_id dissector_vlan
= FLOW_DISSECTOR_KEY_MAX
;
763 proto
= skb_vlan_tag_present(skb
) ?
764 skb
->vlan_proto
: skb
->protocol
;
765 nhoff
= skb_network_offset(skb
);
766 hlen
= skb_headlen(skb
);
767 #if IS_ENABLED(CONFIG_NET_DSA)
768 if (unlikely(skb
->dev
&& netdev_uses_dsa(skb
->dev
))) {
769 const struct dsa_device_ops
*ops
;
772 ops
= skb
->dev
->dsa_ptr
->tag_ops
;
773 if (ops
->flow_dissect
&&
774 !ops
->flow_dissect(skb
, &proto
, &offset
)) {
782 /* It is ensured by skb_flow_dissector_init() that control key will
785 key_control
= skb_flow_dissector_target(flow_dissector
,
786 FLOW_DISSECTOR_KEY_CONTROL
,
789 /* It is ensured by skb_flow_dissector_init() that basic key will
792 key_basic
= skb_flow_dissector_target(flow_dissector
,
793 FLOW_DISSECTOR_KEY_BASIC
,
797 struct bpf_flow_keys flow_keys
;
798 struct bpf_prog
*attached
= NULL
;
803 attached
= rcu_dereference(dev_net(skb
->dev
)->flow_dissector_prog
);
805 attached
= rcu_dereference(sock_net(skb
->sk
)->flow_dissector_prog
);
810 ret
= __skb_flow_bpf_dissect(attached
, skb
,
813 __skb_flow_bpf_to_target(&flow_keys
, flow_dissector
,
821 if (dissector_uses_key(flow_dissector
,
822 FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
823 struct ethhdr
*eth
= eth_hdr(skb
);
824 struct flow_dissector_key_eth_addrs
*key_eth_addrs
;
826 key_eth_addrs
= skb_flow_dissector_target(flow_dissector
,
827 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
829 memcpy(key_eth_addrs
, ð
->h_dest
, sizeof(*key_eth_addrs
));
833 fdret
= FLOW_DISSECT_RET_CONTINUE
;
836 case htons(ETH_P_IP
): {
837 const struct iphdr
*iph
;
840 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
841 if (!iph
|| iph
->ihl
< 5) {
842 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
846 nhoff
+= iph
->ihl
* 4;
848 ip_proto
= iph
->protocol
;
850 if (dissector_uses_key(flow_dissector
,
851 FLOW_DISSECTOR_KEY_IPV4_ADDRS
)) {
852 key_addrs
= skb_flow_dissector_target(flow_dissector
,
853 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
856 memcpy(&key_addrs
->v4addrs
, &iph
->saddr
,
857 sizeof(key_addrs
->v4addrs
));
858 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
861 if (ip_is_fragment(iph
)) {
862 key_control
->flags
|= FLOW_DIS_IS_FRAGMENT
;
864 if (iph
->frag_off
& htons(IP_OFFSET
)) {
865 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
868 key_control
->flags
|= FLOW_DIS_FIRST_FRAG
;
870 FLOW_DISSECTOR_F_PARSE_1ST_FRAG
)) {
871 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
877 __skb_flow_dissect_ipv4(skb
, flow_dissector
,
878 target_container
, data
, iph
);
880 if (flags
& FLOW_DISSECTOR_F_STOP_AT_L3
) {
881 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
887 case htons(ETH_P_IPV6
): {
888 const struct ipv6hdr
*iph
;
891 iph
= __skb_header_pointer(skb
, nhoff
, sizeof(_iph
), data
, hlen
, &_iph
);
893 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
897 ip_proto
= iph
->nexthdr
;
898 nhoff
+= sizeof(struct ipv6hdr
);
900 if (dissector_uses_key(flow_dissector
,
901 FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
902 key_addrs
= skb_flow_dissector_target(flow_dissector
,
903 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
906 memcpy(&key_addrs
->v6addrs
, &iph
->saddr
,
907 sizeof(key_addrs
->v6addrs
));
908 key_control
->addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
911 if ((dissector_uses_key(flow_dissector
,
912 FLOW_DISSECTOR_KEY_FLOW_LABEL
) ||
913 (flags
& FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
)) &&
914 ip6_flowlabel(iph
)) {
915 __be32 flow_label
= ip6_flowlabel(iph
);
917 if (dissector_uses_key(flow_dissector
,
918 FLOW_DISSECTOR_KEY_FLOW_LABEL
)) {
919 key_tags
= skb_flow_dissector_target(flow_dissector
,
920 FLOW_DISSECTOR_KEY_FLOW_LABEL
,
922 key_tags
->flow_label
= ntohl(flow_label
);
924 if (flags
& FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
) {
925 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
930 __skb_flow_dissect_ipv6(skb
, flow_dissector
,
931 target_container
, data
, iph
);
933 if (flags
& FLOW_DISSECTOR_F_STOP_AT_L3
)
934 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
938 case htons(ETH_P_8021AD
):
939 case htons(ETH_P_8021Q
): {
940 const struct vlan_hdr
*vlan
= NULL
;
941 struct vlan_hdr _vlan
;
942 __be16 saved_vlan_tpid
= proto
;
944 if (dissector_vlan
== FLOW_DISSECTOR_KEY_MAX
&&
945 skb
&& skb_vlan_tag_present(skb
)) {
946 proto
= skb
->protocol
;
948 vlan
= __skb_header_pointer(skb
, nhoff
, sizeof(_vlan
),
951 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
955 proto
= vlan
->h_vlan_encapsulated_proto
;
956 nhoff
+= sizeof(*vlan
);
959 if (dissector_vlan
== FLOW_DISSECTOR_KEY_MAX
) {
960 dissector_vlan
= FLOW_DISSECTOR_KEY_VLAN
;
961 } else if (dissector_vlan
== FLOW_DISSECTOR_KEY_VLAN
) {
962 dissector_vlan
= FLOW_DISSECTOR_KEY_CVLAN
;
964 fdret
= FLOW_DISSECT_RET_PROTO_AGAIN
;
968 if (dissector_uses_key(flow_dissector
, dissector_vlan
)) {
969 key_vlan
= skb_flow_dissector_target(flow_dissector
,
974 key_vlan
->vlan_id
= skb_vlan_tag_get_id(skb
);
975 key_vlan
->vlan_priority
= skb_vlan_tag_get_prio(skb
);
977 key_vlan
->vlan_id
= ntohs(vlan
->h_vlan_TCI
) &
979 key_vlan
->vlan_priority
=
980 (ntohs(vlan
->h_vlan_TCI
) &
981 VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
983 key_vlan
->vlan_tpid
= saved_vlan_tpid
;
986 fdret
= FLOW_DISSECT_RET_PROTO_AGAIN
;
989 case htons(ETH_P_PPP_SES
): {
991 struct pppoe_hdr hdr
;
994 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
), data
, hlen
, &_hdr
);
996 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
1001 nhoff
+= PPPOE_SES_HLEN
;
1004 proto
= htons(ETH_P_IP
);
1005 fdret
= FLOW_DISSECT_RET_PROTO_AGAIN
;
1007 case htons(PPP_IPV6
):
1008 proto
= htons(ETH_P_IPV6
);
1009 fdret
= FLOW_DISSECT_RET_PROTO_AGAIN
;
1012 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
1017 case htons(ETH_P_TIPC
): {
1018 struct tipc_basic_hdr
*hdr
, _hdr
;
1020 hdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_hdr
),
1023 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
1027 if (dissector_uses_key(flow_dissector
,
1028 FLOW_DISSECTOR_KEY_TIPC
)) {
1029 key_addrs
= skb_flow_dissector_target(flow_dissector
,
1030 FLOW_DISSECTOR_KEY_TIPC
,
1032 key_addrs
->tipckey
.key
= tipc_hdr_rps_key(hdr
);
1033 key_control
->addr_type
= FLOW_DISSECTOR_KEY_TIPC
;
1035 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
1039 case htons(ETH_P_MPLS_UC
):
1040 case htons(ETH_P_MPLS_MC
):
1041 fdret
= __skb_flow_dissect_mpls(skb
, flow_dissector
,
1042 target_container
, data
,
1045 case htons(ETH_P_FCOE
):
1046 if ((hlen
- nhoff
) < FCOE_HEADER_LEN
) {
1047 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
1051 nhoff
+= FCOE_HEADER_LEN
;
1052 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
1055 case htons(ETH_P_ARP
):
1056 case htons(ETH_P_RARP
):
1057 fdret
= __skb_flow_dissect_arp(skb
, flow_dissector
,
1058 target_container
, data
,
1062 case htons(ETH_P_BATMAN
):
1063 fdret
= __skb_flow_dissect_batadv(skb
, key_control
, data
,
1064 &proto
, &nhoff
, hlen
, flags
);
1068 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
1072 /* Process result of proto processing */
1074 case FLOW_DISSECT_RET_OUT_GOOD
:
1076 case FLOW_DISSECT_RET_PROTO_AGAIN
:
1077 if (skb_flow_dissect_allowed(&num_hdrs
))
1080 case FLOW_DISSECT_RET_CONTINUE
:
1081 case FLOW_DISSECT_RET_IPPROTO_AGAIN
:
1083 case FLOW_DISSECT_RET_OUT_BAD
:
1089 fdret
= FLOW_DISSECT_RET_CONTINUE
;
1093 fdret
= __skb_flow_dissect_gre(skb
, key_control
, flow_dissector
,
1094 target_container
, data
,
1095 &proto
, &nhoff
, &hlen
, flags
);
1099 case NEXTHDR_ROUTING
:
1100 case NEXTHDR_DEST
: {
1101 u8 _opthdr
[2], *opthdr
;
1103 if (proto
!= htons(ETH_P_IPV6
))
1106 opthdr
= __skb_header_pointer(skb
, nhoff
, sizeof(_opthdr
),
1107 data
, hlen
, &_opthdr
);
1109 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
1113 ip_proto
= opthdr
[0];
1114 nhoff
+= (opthdr
[1] + 1) << 3;
1116 fdret
= FLOW_DISSECT_RET_IPPROTO_AGAIN
;
1119 case NEXTHDR_FRAGMENT
: {
1120 struct frag_hdr _fh
, *fh
;
1122 if (proto
!= htons(ETH_P_IPV6
))
1125 fh
= __skb_header_pointer(skb
, nhoff
, sizeof(_fh
),
1129 fdret
= FLOW_DISSECT_RET_OUT_BAD
;
1133 key_control
->flags
|= FLOW_DIS_IS_FRAGMENT
;
1135 nhoff
+= sizeof(_fh
);
1136 ip_proto
= fh
->nexthdr
;
1138 if (!(fh
->frag_off
& htons(IP6_OFFSET
))) {
1139 key_control
->flags
|= FLOW_DIS_FIRST_FRAG
;
1140 if (flags
& FLOW_DISSECTOR_F_PARSE_1ST_FRAG
) {
1141 fdret
= FLOW_DISSECT_RET_IPPROTO_AGAIN
;
1146 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
1150 proto
= htons(ETH_P_IP
);
1152 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
1153 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
) {
1154 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
1158 fdret
= FLOW_DISSECT_RET_PROTO_AGAIN
;
1162 proto
= htons(ETH_P_IPV6
);
1164 key_control
->flags
|= FLOW_DIS_ENCAPSULATION
;
1165 if (flags
& FLOW_DISSECTOR_F_STOP_AT_ENCAP
) {
1166 fdret
= FLOW_DISSECT_RET_OUT_GOOD
;
1170 fdret
= FLOW_DISSECT_RET_PROTO_AGAIN
;
1175 proto
= htons(ETH_P_MPLS_UC
);
1176 fdret
= FLOW_DISSECT_RET_PROTO_AGAIN
;
1180 __skb_flow_dissect_tcp(skb
, flow_dissector
, target_container
,
1188 if (dissector_uses_key(flow_dissector
, FLOW_DISSECTOR_KEY_PORTS
) &&
1189 !(key_control
->flags
& FLOW_DIS_IS_FRAGMENT
)) {
1190 key_ports
= skb_flow_dissector_target(flow_dissector
,
1191 FLOW_DISSECTOR_KEY_PORTS
,
1193 key_ports
->ports
= __skb_flow_get_ports(skb
, nhoff
, ip_proto
,
1197 if (dissector_uses_key(flow_dissector
,
1198 FLOW_DISSECTOR_KEY_ICMP
)) {
1199 key_icmp
= skb_flow_dissector_target(flow_dissector
,
1200 FLOW_DISSECTOR_KEY_ICMP
,
1202 key_icmp
->icmp
= skb_flow_get_be16(skb
, nhoff
, data
, hlen
);
1205 /* Process result of IP proto processing */
1207 case FLOW_DISSECT_RET_PROTO_AGAIN
:
1208 if (skb_flow_dissect_allowed(&num_hdrs
))
1211 case FLOW_DISSECT_RET_IPPROTO_AGAIN
:
1212 if (skb_flow_dissect_allowed(&num_hdrs
))
1213 goto ip_proto_again
;
1215 case FLOW_DISSECT_RET_OUT_GOOD
:
1216 case FLOW_DISSECT_RET_CONTINUE
:
1218 case FLOW_DISSECT_RET_OUT_BAD
:
1227 key_control
->thoff
= min_t(u16
, nhoff
, skb
? skb
->len
: hlen
);
1228 key_basic
->n_proto
= proto
;
1229 key_basic
->ip_proto
= ip_proto
;
1237 EXPORT_SYMBOL(__skb_flow_dissect
);
1239 static u32 hashrnd __read_mostly
;
1240 static __always_inline
void __flow_hash_secret_init(void)
1242 net_get_random_once(&hashrnd
, sizeof(hashrnd
));
1245 static __always_inline u32
__flow_hash_words(const u32
*words
, u32 length
,
1248 return jhash2(words
, length
, keyval
);
1251 static inline const u32
*flow_keys_hash_start(const struct flow_keys
*flow
)
1253 const void *p
= flow
;
1255 BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET
% sizeof(u32
));
1256 return (const u32
*)(p
+ FLOW_KEYS_HASH_OFFSET
);
1259 static inline size_t flow_keys_hash_length(const struct flow_keys
*flow
)
1261 size_t diff
= FLOW_KEYS_HASH_OFFSET
+ sizeof(flow
->addrs
);
1262 BUILD_BUG_ON((sizeof(*flow
) - FLOW_KEYS_HASH_OFFSET
) % sizeof(u32
));
1263 BUILD_BUG_ON(offsetof(typeof(*flow
), addrs
) !=
1264 sizeof(*flow
) - sizeof(flow
->addrs
));
1266 switch (flow
->control
.addr_type
) {
1267 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
1268 diff
-= sizeof(flow
->addrs
.v4addrs
);
1270 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
1271 diff
-= sizeof(flow
->addrs
.v6addrs
);
1273 case FLOW_DISSECTOR_KEY_TIPC
:
1274 diff
-= sizeof(flow
->addrs
.tipckey
);
1277 return (sizeof(*flow
) - diff
) / sizeof(u32
);
1280 __be32
flow_get_u32_src(const struct flow_keys
*flow
)
1282 switch (flow
->control
.addr_type
) {
1283 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
1284 return flow
->addrs
.v4addrs
.src
;
1285 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
1286 return (__force __be32
)ipv6_addr_hash(
1287 &flow
->addrs
.v6addrs
.src
);
1288 case FLOW_DISSECTOR_KEY_TIPC
:
1289 return flow
->addrs
.tipckey
.key
;
1294 EXPORT_SYMBOL(flow_get_u32_src
);
1296 __be32
flow_get_u32_dst(const struct flow_keys
*flow
)
1298 switch (flow
->control
.addr_type
) {
1299 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
1300 return flow
->addrs
.v4addrs
.dst
;
1301 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
1302 return (__force __be32
)ipv6_addr_hash(
1303 &flow
->addrs
.v6addrs
.dst
);
1308 EXPORT_SYMBOL(flow_get_u32_dst
);
1310 static inline void __flow_hash_consistentify(struct flow_keys
*keys
)
1314 switch (keys
->control
.addr_type
) {
1315 case FLOW_DISSECTOR_KEY_IPV4_ADDRS
:
1316 addr_diff
= (__force u32
)keys
->addrs
.v4addrs
.dst
-
1317 (__force u32
)keys
->addrs
.v4addrs
.src
;
1318 if ((addr_diff
< 0) ||
1320 ((__force u16
)keys
->ports
.dst
<
1321 (__force u16
)keys
->ports
.src
))) {
1322 swap(keys
->addrs
.v4addrs
.src
, keys
->addrs
.v4addrs
.dst
);
1323 swap(keys
->ports
.src
, keys
->ports
.dst
);
1326 case FLOW_DISSECTOR_KEY_IPV6_ADDRS
:
1327 addr_diff
= memcmp(&keys
->addrs
.v6addrs
.dst
,
1328 &keys
->addrs
.v6addrs
.src
,
1329 sizeof(keys
->addrs
.v6addrs
.dst
));
1330 if ((addr_diff
< 0) ||
1332 ((__force u16
)keys
->ports
.dst
<
1333 (__force u16
)keys
->ports
.src
))) {
1334 for (i
= 0; i
< 4; i
++)
1335 swap(keys
->addrs
.v6addrs
.src
.s6_addr32
[i
],
1336 keys
->addrs
.v6addrs
.dst
.s6_addr32
[i
]);
1337 swap(keys
->ports
.src
, keys
->ports
.dst
);
1343 static inline u32
__flow_hash_from_keys(struct flow_keys
*keys
, u32 keyval
)
1347 __flow_hash_consistentify(keys
);
1349 hash
= __flow_hash_words(flow_keys_hash_start(keys
),
1350 flow_keys_hash_length(keys
), keyval
);
1357 u32
flow_hash_from_keys(struct flow_keys
*keys
)
1359 __flow_hash_secret_init();
1360 return __flow_hash_from_keys(keys
, hashrnd
);
1362 EXPORT_SYMBOL(flow_hash_from_keys
);
1364 static inline u32
___skb_get_hash(const struct sk_buff
*skb
,
1365 struct flow_keys
*keys
, u32 keyval
)
1367 skb_flow_dissect_flow_keys(skb
, keys
,
1368 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
);
1370 return __flow_hash_from_keys(keys
, keyval
);
1373 struct _flow_keys_digest_data
{
1382 void make_flow_keys_digest(struct flow_keys_digest
*digest
,
1383 const struct flow_keys
*flow
)
1385 struct _flow_keys_digest_data
*data
=
1386 (struct _flow_keys_digest_data
*)digest
;
1388 BUILD_BUG_ON(sizeof(*data
) > sizeof(*digest
));
1390 memset(digest
, 0, sizeof(*digest
));
1392 data
->n_proto
= flow
->basic
.n_proto
;
1393 data
->ip_proto
= flow
->basic
.ip_proto
;
1394 data
->ports
= flow
->ports
.ports
;
1395 data
->src
= flow
->addrs
.v4addrs
.src
;
1396 data
->dst
= flow
->addrs
.v4addrs
.dst
;
1398 EXPORT_SYMBOL(make_flow_keys_digest
);
1400 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly
;
1402 u32
__skb_get_hash_symmetric(const struct sk_buff
*skb
)
1404 struct flow_keys keys
;
1406 __flow_hash_secret_init();
1408 memset(&keys
, 0, sizeof(keys
));
1409 __skb_flow_dissect(skb
, &flow_keys_dissector_symmetric
, &keys
,
1411 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL
);
1413 return __flow_hash_from_keys(&keys
, hashrnd
);
1415 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric
);
1418 * __skb_get_hash: calculate a flow hash
1419 * @skb: sk_buff to calculate flow hash from
1421 * This function calculates a flow hash based on src/dst addresses
1422 * and src/dst port numbers. Sets hash in skb to non-zero hash value
1423 * on success, zero indicates no valid hash. Also, sets l4_hash in skb
1424 * if hash is a canonical 4-tuple hash over transport ports.
1426 void __skb_get_hash(struct sk_buff
*skb
)
1428 struct flow_keys keys
;
1431 __flow_hash_secret_init();
1433 hash
= ___skb_get_hash(skb
, &keys
, hashrnd
);
1435 __skb_set_sw_hash(skb
, hash
, flow_keys_have_l4(&keys
));
1437 EXPORT_SYMBOL(__skb_get_hash
);
1439 __u32
skb_get_hash_perturb(const struct sk_buff
*skb
, u32 perturb
)
1441 struct flow_keys keys
;
1443 return ___skb_get_hash(skb
, &keys
, perturb
);
1445 EXPORT_SYMBOL(skb_get_hash_perturb
);
1447 u32
__skb_get_poff(const struct sk_buff
*skb
, void *data
,
1448 const struct flow_keys_basic
*keys
, int hlen
)
1450 u32 poff
= keys
->control
.thoff
;
1452 /* skip L4 headers for fragments after the first */
1453 if ((keys
->control
.flags
& FLOW_DIS_IS_FRAGMENT
) &&
1454 !(keys
->control
.flags
& FLOW_DIS_FIRST_FRAG
))
1457 switch (keys
->basic
.ip_proto
) {
1459 /* access doff as u8 to avoid unaligned access */
1463 doff
= __skb_header_pointer(skb
, poff
+ 12, sizeof(_doff
),
1464 data
, hlen
, &_doff
);
1468 poff
+= max_t(u32
, sizeof(struct tcphdr
), (*doff
& 0xF0) >> 2);
1472 case IPPROTO_UDPLITE
:
1473 poff
+= sizeof(struct udphdr
);
1475 /* For the rest, we do not really care about header
1476 * extensions at this point for now.
1479 poff
+= sizeof(struct icmphdr
);
1481 case IPPROTO_ICMPV6
:
1482 poff
+= sizeof(struct icmp6hdr
);
1485 poff
+= sizeof(struct igmphdr
);
1488 poff
+= sizeof(struct dccp_hdr
);
1491 poff
+= sizeof(struct sctphdr
);
1499 * skb_get_poff - get the offset to the payload
1500 * @skb: sk_buff to get the payload offset from
1502 * The function will get the offset to the payload as far as it could
1503 * be dissected. The main user is currently BPF, so that we can dynamically
1504 * truncate packets without needing to push actual payload to the user
1505 * space and can analyze headers only, instead.
1507 u32
skb_get_poff(const struct sk_buff
*skb
)
1509 struct flow_keys_basic keys
;
1511 if (!skb_flow_dissect_flow_keys_basic(skb
, &keys
, NULL
, 0, 0, 0, 0))
1514 return __skb_get_poff(skb
, skb
->data
, &keys
, skb_headlen(skb
));
1517 __u32
__get_hash_from_flowi6(const struct flowi6
*fl6
, struct flow_keys
*keys
)
1519 memset(keys
, 0, sizeof(*keys
));
1521 memcpy(&keys
->addrs
.v6addrs
.src
, &fl6
->saddr
,
1522 sizeof(keys
->addrs
.v6addrs
.src
));
1523 memcpy(&keys
->addrs
.v6addrs
.dst
, &fl6
->daddr
,
1524 sizeof(keys
->addrs
.v6addrs
.dst
));
1525 keys
->control
.addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
1526 keys
->ports
.src
= fl6
->fl6_sport
;
1527 keys
->ports
.dst
= fl6
->fl6_dport
;
1528 keys
->keyid
.keyid
= fl6
->fl6_gre_key
;
1529 keys
->tags
.flow_label
= (__force u32
)flowi6_get_flowlabel(fl6
);
1530 keys
->basic
.ip_proto
= fl6
->flowi6_proto
;
1532 return flow_hash_from_keys(keys
);
1534 EXPORT_SYMBOL(__get_hash_from_flowi6
);
1536 static const struct flow_dissector_key flow_keys_dissector_keys
[] = {
1538 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
1539 .offset
= offsetof(struct flow_keys
, control
),
1542 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
1543 .offset
= offsetof(struct flow_keys
, basic
),
1546 .key_id
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
1547 .offset
= offsetof(struct flow_keys
, addrs
.v4addrs
),
1550 .key_id
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
1551 .offset
= offsetof(struct flow_keys
, addrs
.v6addrs
),
1554 .key_id
= FLOW_DISSECTOR_KEY_TIPC
,
1555 .offset
= offsetof(struct flow_keys
, addrs
.tipckey
),
1558 .key_id
= FLOW_DISSECTOR_KEY_PORTS
,
1559 .offset
= offsetof(struct flow_keys
, ports
),
1562 .key_id
= FLOW_DISSECTOR_KEY_VLAN
,
1563 .offset
= offsetof(struct flow_keys
, vlan
),
1566 .key_id
= FLOW_DISSECTOR_KEY_FLOW_LABEL
,
1567 .offset
= offsetof(struct flow_keys
, tags
),
1570 .key_id
= FLOW_DISSECTOR_KEY_GRE_KEYID
,
1571 .offset
= offsetof(struct flow_keys
, keyid
),
1575 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys
[] = {
1577 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
1578 .offset
= offsetof(struct flow_keys
, control
),
1581 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
1582 .offset
= offsetof(struct flow_keys
, basic
),
1585 .key_id
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
1586 .offset
= offsetof(struct flow_keys
, addrs
.v4addrs
),
1589 .key_id
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
1590 .offset
= offsetof(struct flow_keys
, addrs
.v6addrs
),
1593 .key_id
= FLOW_DISSECTOR_KEY_PORTS
,
1594 .offset
= offsetof(struct flow_keys
, ports
),
1598 static const struct flow_dissector_key flow_keys_basic_dissector_keys
[] = {
1600 .key_id
= FLOW_DISSECTOR_KEY_CONTROL
,
1601 .offset
= offsetof(struct flow_keys
, control
),
1604 .key_id
= FLOW_DISSECTOR_KEY_BASIC
,
1605 .offset
= offsetof(struct flow_keys
, basic
),
1609 struct flow_dissector flow_keys_dissector __read_mostly
;
1610 EXPORT_SYMBOL(flow_keys_dissector
);
1612 struct flow_dissector flow_keys_basic_dissector __read_mostly
;
1613 EXPORT_SYMBOL(flow_keys_basic_dissector
);
1615 static int __init
init_default_flow_dissectors(void)
1617 skb_flow_dissector_init(&flow_keys_dissector
,
1618 flow_keys_dissector_keys
,
1619 ARRAY_SIZE(flow_keys_dissector_keys
));
1620 skb_flow_dissector_init(&flow_keys_dissector_symmetric
,
1621 flow_keys_dissector_symmetric_keys
,
1622 ARRAY_SIZE(flow_keys_dissector_symmetric_keys
));
1623 skb_flow_dissector_init(&flow_keys_basic_dissector
,
1624 flow_keys_basic_dissector_keys
,
1625 ARRAY_SIZE(flow_keys_basic_dissector_keys
));
1629 core_initcall(init_default_flow_dissectors
);