1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
6 #include <linux/uaccess.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/llc_pdu.h>
12 #include <linux/kernel.h>
13 #include <linux/jhash.h>
14 #include <linux/jiffies.h>
15 #include <linux/llc.h>
16 #include <linux/module.h>
18 #include <linux/rcupdate.h>
19 #include <linux/cpumask.h>
20 #include <linux/if_arp.h>
22 #include <linux/ipv6.h>
23 #include <linux/mpls.h>
24 #include <linux/sctp.h>
25 #include <linux/smp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/icmp.h>
29 #include <linux/icmpv6.h>
30 #include <linux/rculist.h>
32 #include <net/ip_tunnels.h>
35 #include <net/ndisc.h>
37 #include <net/pkt_cls.h>
38 #include <net/netfilter/nf_conntrack_zones.h>
40 #include "conntrack.h"
43 #include "flow_netlink.h"
46 u64
ovs_flow_used_time(unsigned long flow_jiffies
)
48 struct timespec64 cur_ts
;
51 ktime_get_ts64(&cur_ts
);
52 idle_ms
= jiffies_to_msecs(jiffies
- flow_jiffies
);
53 cur_ms
= (u64
)(u32
)cur_ts
.tv_sec
* MSEC_PER_SEC
+
54 cur_ts
.tv_nsec
/ NSEC_PER_MSEC
;
56 return cur_ms
- idle_ms
;
59 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
61 void ovs_flow_stats_update(struct sw_flow
*flow
, __be16 tcp_flags
,
62 const struct sk_buff
*skb
)
64 struct sw_flow_stats
*stats
;
65 unsigned int cpu
= smp_processor_id();
66 int len
= skb
->len
+ (skb_vlan_tag_present(skb
) ? VLAN_HLEN
: 0);
68 stats
= rcu_dereference(flow
->stats
[cpu
]);
70 /* Check if already have CPU-specific stats. */
72 spin_lock(&stats
->lock
);
73 /* Mark if we write on the pre-allocated stats. */
74 if (cpu
== 0 && unlikely(flow
->stats_last_writer
!= cpu
))
75 flow
->stats_last_writer
= cpu
;
77 stats
= rcu_dereference(flow
->stats
[0]); /* Pre-allocated. */
78 spin_lock(&stats
->lock
);
80 /* If the current CPU is the only writer on the
81 * pre-allocated stats keep using them.
83 if (unlikely(flow
->stats_last_writer
!= cpu
)) {
84 /* A previous locker may have already allocated the
85 * stats, so we need to check again. If CPU-specific
86 * stats were already allocated, we update the pre-
87 * allocated stats as we have already locked them.
89 if (likely(flow
->stats_last_writer
!= -1) &&
90 likely(!rcu_access_pointer(flow
->stats
[cpu
]))) {
91 /* Try to allocate CPU-specific stats. */
92 struct sw_flow_stats
*new_stats
;
95 kmem_cache_alloc_node(flow_stats_cache
,
101 if (likely(new_stats
)) {
102 new_stats
->used
= jiffies
;
103 new_stats
->packet_count
= 1;
104 new_stats
->byte_count
= len
;
105 new_stats
->tcp_flags
= tcp_flags
;
106 spin_lock_init(&new_stats
->lock
);
108 rcu_assign_pointer(flow
->stats
[cpu
],
111 flow
->cpu_used_mask
);
115 flow
->stats_last_writer
= cpu
;
119 stats
->used
= jiffies
;
120 stats
->packet_count
++;
121 stats
->byte_count
+= len
;
122 stats
->tcp_flags
|= tcp_flags
;
124 spin_unlock(&stats
->lock
);
127 /* Must be called with rcu_read_lock or ovs_mutex. */
128 void ovs_flow_stats_get(const struct sw_flow
*flow
,
129 struct ovs_flow_stats
*ovs_stats
,
130 unsigned long *used
, __be16
*tcp_flags
)
136 memset(ovs_stats
, 0, sizeof(*ovs_stats
));
138 /* We open code this to make sure cpu 0 is always considered */
139 for (cpu
= 0; cpu
< nr_cpu_ids
;
140 cpu
= cpumask_next(cpu
, flow
->cpu_used_mask
)) {
141 struct sw_flow_stats
*stats
= rcu_dereference_ovsl(flow
->stats
[cpu
]);
144 /* Local CPU may write on non-local stats, so we must
145 * block bottom-halves here.
147 spin_lock_bh(&stats
->lock
);
148 if (!*used
|| time_after(stats
->used
, *used
))
150 *tcp_flags
|= stats
->tcp_flags
;
151 ovs_stats
->n_packets
+= stats
->packet_count
;
152 ovs_stats
->n_bytes
+= stats
->byte_count
;
153 spin_unlock_bh(&stats
->lock
);
158 /* Called with ovs_mutex. */
159 void ovs_flow_stats_clear(struct sw_flow
*flow
)
163 /* We open code this to make sure cpu 0 is always considered */
164 for (cpu
= 0; cpu
< nr_cpu_ids
;
165 cpu
= cpumask_next(cpu
, flow
->cpu_used_mask
)) {
166 struct sw_flow_stats
*stats
= ovsl_dereference(flow
->stats
[cpu
]);
169 spin_lock_bh(&stats
->lock
);
171 stats
->packet_count
= 0;
172 stats
->byte_count
= 0;
173 stats
->tcp_flags
= 0;
174 spin_unlock_bh(&stats
->lock
);
179 static int check_header(struct sk_buff
*skb
, int len
)
181 if (unlikely(skb
->len
< len
))
183 if (unlikely(!pskb_may_pull(skb
, len
)))
188 static bool arphdr_ok(struct sk_buff
*skb
)
190 return pskb_may_pull(skb
, skb_network_offset(skb
) +
191 sizeof(struct arp_eth_header
));
194 static int check_iphdr(struct sk_buff
*skb
)
196 unsigned int nh_ofs
= skb_network_offset(skb
);
200 err
= check_header(skb
, nh_ofs
+ sizeof(struct iphdr
));
204 ip_len
= ip_hdrlen(skb
);
205 if (unlikely(ip_len
< sizeof(struct iphdr
) ||
206 skb
->len
< nh_ofs
+ ip_len
))
209 skb_set_transport_header(skb
, nh_ofs
+ ip_len
);
213 static bool tcphdr_ok(struct sk_buff
*skb
)
215 int th_ofs
= skb_transport_offset(skb
);
218 if (unlikely(!pskb_may_pull(skb
, th_ofs
+ sizeof(struct tcphdr
))))
221 tcp_len
= tcp_hdrlen(skb
);
222 if (unlikely(tcp_len
< sizeof(struct tcphdr
) ||
223 skb
->len
< th_ofs
+ tcp_len
))
229 static bool udphdr_ok(struct sk_buff
*skb
)
231 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
232 sizeof(struct udphdr
));
235 static bool sctphdr_ok(struct sk_buff
*skb
)
237 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
238 sizeof(struct sctphdr
));
241 static bool icmphdr_ok(struct sk_buff
*skb
)
243 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
244 sizeof(struct icmphdr
));
248 * get_ipv6_ext_hdrs() - Parses packet and sets IPv6 extension header flags.
250 * @skb: buffer where extension header data starts in packet
252 * @ext_hdrs: flags are stored here
254 * OFPIEH12_UNREP is set if more than one of a given IPv6 extension header
255 * is unexpectedly encountered. (Two destination options headers may be
256 * expected and would not cause this bit to be set.)
258 * OFPIEH12_UNSEQ is set if IPv6 extension headers were not in the order
259 * preferred (but not required) by RFC 2460:
261 * When more than one extension header is used in the same packet, it is
262 * recommended that those headers appear in the following order:
264 * Hop-by-Hop Options header
265 * Destination Options header
268 * Authentication header
269 * Encapsulating Security Payload header
270 * Destination Options header
273 static void get_ipv6_ext_hdrs(struct sk_buff
*skb
, struct ipv6hdr
*nh
,
276 u8 next_type
= nh
->nexthdr
;
277 unsigned int start
= skb_network_offset(skb
) + sizeof(struct ipv6hdr
);
278 int dest_options_header_count
= 0;
282 while (ipv6_ext_hdr(next_type
)) {
283 struct ipv6_opt_hdr _hdr
, *hp
;
287 *ext_hdrs
|= OFPIEH12_NONEXT
;
292 if (*ext_hdrs
& OFPIEH12_ESP
)
293 *ext_hdrs
|= OFPIEH12_UNREP
;
294 if ((*ext_hdrs
& ~(OFPIEH12_HOP
| OFPIEH12_DEST
|
295 OFPIEH12_ROUTER
| IPPROTO_FRAGMENT
|
296 OFPIEH12_AUTH
| OFPIEH12_UNREP
)) ||
297 dest_options_header_count
>= 2) {
298 *ext_hdrs
|= OFPIEH12_UNSEQ
;
300 *ext_hdrs
|= OFPIEH12_ESP
;
304 if (*ext_hdrs
& OFPIEH12_AUTH
)
305 *ext_hdrs
|= OFPIEH12_UNREP
;
307 ~(OFPIEH12_HOP
| OFPIEH12_DEST
| OFPIEH12_ROUTER
|
308 IPPROTO_FRAGMENT
| OFPIEH12_UNREP
)) ||
309 dest_options_header_count
>= 2) {
310 *ext_hdrs
|= OFPIEH12_UNSEQ
;
312 *ext_hdrs
|= OFPIEH12_AUTH
;
315 case IPPROTO_DSTOPTS
:
316 if (dest_options_header_count
== 0) {
318 ~(OFPIEH12_HOP
| OFPIEH12_UNREP
))
319 *ext_hdrs
|= OFPIEH12_UNSEQ
;
320 *ext_hdrs
|= OFPIEH12_DEST
;
321 } else if (dest_options_header_count
== 1) {
323 ~(OFPIEH12_HOP
| OFPIEH12_DEST
|
324 OFPIEH12_ROUTER
| OFPIEH12_FRAG
|
325 OFPIEH12_AUTH
| OFPIEH12_ESP
|
327 *ext_hdrs
|= OFPIEH12_UNSEQ
;
330 *ext_hdrs
|= OFPIEH12_UNREP
;
332 dest_options_header_count
++;
335 case IPPROTO_FRAGMENT
:
336 if (*ext_hdrs
& OFPIEH12_FRAG
)
337 *ext_hdrs
|= OFPIEH12_UNREP
;
338 if ((*ext_hdrs
& ~(OFPIEH12_HOP
|
342 dest_options_header_count
>= 2) {
343 *ext_hdrs
|= OFPIEH12_UNSEQ
;
345 *ext_hdrs
|= OFPIEH12_FRAG
;
348 case IPPROTO_ROUTING
:
349 if (*ext_hdrs
& OFPIEH12_ROUTER
)
350 *ext_hdrs
|= OFPIEH12_UNREP
;
351 if ((*ext_hdrs
& ~(OFPIEH12_HOP
|
354 dest_options_header_count
>= 2) {
355 *ext_hdrs
|= OFPIEH12_UNSEQ
;
357 *ext_hdrs
|= OFPIEH12_ROUTER
;
360 case IPPROTO_HOPOPTS
:
361 if (*ext_hdrs
& OFPIEH12_HOP
)
362 *ext_hdrs
|= OFPIEH12_UNREP
;
363 /* OFPIEH12_HOP is set to 1 if a hop-by-hop IPv6
364 * extension header is present as the first
365 * extension header in the packet.
368 *ext_hdrs
|= OFPIEH12_HOP
;
370 *ext_hdrs
|= OFPIEH12_UNSEQ
;
377 hp
= skb_header_pointer(skb
, start
, sizeof(_hdr
), &_hdr
);
380 next_type
= hp
->nexthdr
;
381 start
+= ipv6_optlen(hp
);
385 static int parse_ipv6hdr(struct sk_buff
*skb
, struct sw_flow_key
*key
)
387 unsigned short frag_off
;
388 unsigned int payload_ofs
= 0;
389 unsigned int nh_ofs
= skb_network_offset(skb
);
392 int err
, nexthdr
, flags
= 0;
394 err
= check_header(skb
, nh_ofs
+ sizeof(*nh
));
400 get_ipv6_ext_hdrs(skb
, nh
, &key
->ipv6
.exthdrs
);
402 key
->ip
.proto
= NEXTHDR_NONE
;
403 key
->ip
.tos
= ipv6_get_dsfield(nh
);
404 key
->ip
.ttl
= nh
->hop_limit
;
405 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
406 key
->ipv6
.addr
.src
= nh
->saddr
;
407 key
->ipv6
.addr
.dst
= nh
->daddr
;
409 nexthdr
= ipv6_find_hdr(skb
, &payload_ofs
, -1, &frag_off
, &flags
);
410 if (flags
& IP6_FH_F_FRAG
) {
412 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
413 key
->ip
.proto
= NEXTHDR_FRAGMENT
;
416 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
418 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
421 /* Delayed handling of error in ipv6_find_hdr() as it
422 * always sets flags and frag_off to a valid value which may be
423 * used to set key->ip.frag above.
425 if (unlikely(nexthdr
< 0))
428 nh_len
= payload_ofs
- nh_ofs
;
429 skb_set_transport_header(skb
, nh_ofs
+ nh_len
);
430 key
->ip
.proto
= nexthdr
;
434 static bool icmp6hdr_ok(struct sk_buff
*skb
)
436 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
437 sizeof(struct icmp6hdr
));
441 * parse_vlan_tag - Parse vlan tag from vlan header.
442 * @skb: skb containing frame to parse
443 * @key_vh: pointer to parsed vlan tag
444 * @untag_vlan: should the vlan header be removed from the frame
446 * Return: ERROR on memory error.
447 * %0 if it encounters a non-vlan or incomplete packet.
448 * %1 after successfully parsing vlan tag.
450 static int parse_vlan_tag(struct sk_buff
*skb
, struct vlan_head
*key_vh
,
453 struct vlan_head
*vh
= (struct vlan_head
*)skb
->data
;
455 if (likely(!eth_type_vlan(vh
->tpid
)))
458 if (unlikely(skb
->len
< sizeof(struct vlan_head
) + sizeof(__be16
)))
461 if (unlikely(!pskb_may_pull(skb
, sizeof(struct vlan_head
) +
465 vh
= (struct vlan_head
*)skb
->data
;
466 key_vh
->tci
= vh
->tci
| htons(VLAN_CFI_MASK
);
467 key_vh
->tpid
= vh
->tpid
;
469 if (unlikely(untag_vlan
)) {
470 int offset
= skb
->data
- skb_mac_header(skb
);
474 __skb_push(skb
, offset
);
475 err
= __skb_vlan_pop(skb
, &tci
);
476 __skb_pull(skb
, offset
);
479 __vlan_hwaccel_put_tag(skb
, key_vh
->tpid
, tci
);
481 __skb_pull(skb
, sizeof(struct vlan_head
));
486 static void clear_vlan(struct sw_flow_key
*key
)
488 key
->eth
.vlan
.tci
= 0;
489 key
->eth
.vlan
.tpid
= 0;
490 key
->eth
.cvlan
.tci
= 0;
491 key
->eth
.cvlan
.tpid
= 0;
494 static int parse_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
498 if (skb_vlan_tag_present(skb
)) {
499 key
->eth
.vlan
.tci
= htons(skb
->vlan_tci
) | htons(VLAN_CFI_MASK
);
500 key
->eth
.vlan
.tpid
= skb
->vlan_proto
;
502 /* Parse outer vlan tag in the non-accelerated case. */
503 res
= parse_vlan_tag(skb
, &key
->eth
.vlan
, true);
508 /* Parse inner vlan tag. */
509 res
= parse_vlan_tag(skb
, &key
->eth
.cvlan
, false);
516 static __be16
parse_ethertype(struct sk_buff
*skb
)
518 struct llc_snap_hdr
{
519 u8 dsap
; /* Always 0xAA */
520 u8 ssap
; /* Always 0xAA */
525 struct llc_snap_hdr
*llc
;
528 proto
= *(__be16
*) skb
->data
;
529 __skb_pull(skb
, sizeof(__be16
));
531 if (eth_proto_is_802_3(proto
))
534 if (skb
->len
< sizeof(struct llc_snap_hdr
))
535 return htons(ETH_P_802_2
);
537 if (unlikely(!pskb_may_pull(skb
, sizeof(struct llc_snap_hdr
))))
540 llc
= (struct llc_snap_hdr
*) skb
->data
;
541 if (llc
->dsap
!= LLC_SAP_SNAP
||
542 llc
->ssap
!= LLC_SAP_SNAP
||
543 (llc
->oui
[0] | llc
->oui
[1] | llc
->oui
[2]) != 0)
544 return htons(ETH_P_802_2
);
546 __skb_pull(skb
, sizeof(struct llc_snap_hdr
));
548 if (eth_proto_is_802_3(llc
->ethertype
))
549 return llc
->ethertype
;
551 return htons(ETH_P_802_2
);
554 static int parse_icmpv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
557 struct icmp6hdr
*icmp
= icmp6_hdr(skb
);
559 /* The ICMPv6 type and code fields use the 16-bit transport port
560 * fields, so we need to store them in 16-bit network byte order.
562 key
->tp
.src
= htons(icmp
->icmp6_type
);
563 key
->tp
.dst
= htons(icmp
->icmp6_code
);
565 if (icmp
->icmp6_code
== 0 &&
566 (icmp
->icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
567 icmp
->icmp6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
568 int icmp_len
= skb
->len
- skb_transport_offset(skb
);
572 memset(&key
->ipv6
.nd
, 0, sizeof(key
->ipv6
.nd
));
574 /* In order to process neighbor discovery options, we need the
577 if (unlikely(icmp_len
< sizeof(*nd
)))
580 if (unlikely(skb_linearize(skb
)))
583 nd
= (struct nd_msg
*)skb_transport_header(skb
);
584 key
->ipv6
.nd
.target
= nd
->target
;
586 icmp_len
-= sizeof(*nd
);
588 while (icmp_len
>= 8) {
589 struct nd_opt_hdr
*nd_opt
=
590 (struct nd_opt_hdr
*)(nd
->opt
+ offset
);
591 int opt_len
= nd_opt
->nd_opt_len
* 8;
593 if (unlikely(!opt_len
|| opt_len
> icmp_len
))
596 /* Store the link layer address if the appropriate
597 * option is provided. It is considered an error if
598 * the same link layer option is specified twice.
600 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LL_ADDR
602 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.sll
)))
604 ether_addr_copy(key
->ipv6
.nd
.sll
,
605 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
606 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LL_ADDR
608 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.tll
)))
610 ether_addr_copy(key
->ipv6
.nd
.tll
,
611 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
622 memset(&key
->ipv6
.nd
.target
, 0, sizeof(key
->ipv6
.nd
.target
));
623 memset(key
->ipv6
.nd
.sll
, 0, sizeof(key
->ipv6
.nd
.sll
));
624 memset(key
->ipv6
.nd
.tll
, 0, sizeof(key
->ipv6
.nd
.tll
));
629 static int parse_nsh(struct sk_buff
*skb
, struct sw_flow_key
*key
)
632 unsigned int nh_ofs
= skb_network_offset(skb
);
636 err
= check_header(skb
, nh_ofs
+ NSH_BASE_HDR_LEN
);
641 version
= nsh_get_ver(nh
);
642 length
= nsh_hdr_len(nh
);
647 err
= check_header(skb
, nh_ofs
+ length
);
652 key
->nsh
.base
.flags
= nsh_get_flags(nh
);
653 key
->nsh
.base
.ttl
= nsh_get_ttl(nh
);
654 key
->nsh
.base
.mdtype
= nh
->mdtype
;
655 key
->nsh
.base
.np
= nh
->np
;
656 key
->nsh
.base
.path_hdr
= nh
->path_hdr
;
657 switch (key
->nsh
.base
.mdtype
) {
659 if (length
!= NSH_M_TYPE1_LEN
)
661 memcpy(key
->nsh
.context
, nh
->md1
.context
,
665 memset(key
->nsh
.context
, 0,
676 * key_extract_l3l4 - extracts L3/L4 header information.
677 * @skb: sk_buff that contains the frame, with skb->data pointing to the
679 * @key: output flow key
681 * Return: %0 if successful, otherwise a negative errno value.
683 static int key_extract_l3l4(struct sk_buff
*skb
, struct sw_flow_key
*key
)
688 if (key
->eth
.type
== htons(ETH_P_IP
)) {
692 error
= check_iphdr(skb
);
693 if (unlikely(error
)) {
694 memset(&key
->ip
, 0, sizeof(key
->ip
));
695 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
696 if (error
== -EINVAL
) {
697 skb
->transport_header
= skb
->network_header
;
704 key
->ipv4
.addr
.src
= nh
->saddr
;
705 key
->ipv4
.addr
.dst
= nh
->daddr
;
707 key
->ip
.proto
= nh
->protocol
;
708 key
->ip
.tos
= nh
->tos
;
709 key
->ip
.ttl
= nh
->ttl
;
711 offset
= nh
->frag_off
& htons(IP_OFFSET
);
713 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
714 memset(&key
->tp
, 0, sizeof(key
->tp
));
717 if (nh
->frag_off
& htons(IP_MF
) ||
718 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
719 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
721 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
723 /* Transport layer. */
724 if (key
->ip
.proto
== IPPROTO_TCP
) {
725 if (tcphdr_ok(skb
)) {
726 struct tcphdr
*tcp
= tcp_hdr(skb
);
727 key
->tp
.src
= tcp
->source
;
728 key
->tp
.dst
= tcp
->dest
;
729 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
731 memset(&key
->tp
, 0, sizeof(key
->tp
));
734 } else if (key
->ip
.proto
== IPPROTO_UDP
) {
735 if (udphdr_ok(skb
)) {
736 struct udphdr
*udp
= udp_hdr(skb
);
737 key
->tp
.src
= udp
->source
;
738 key
->tp
.dst
= udp
->dest
;
740 memset(&key
->tp
, 0, sizeof(key
->tp
));
742 } else if (key
->ip
.proto
== IPPROTO_SCTP
) {
743 if (sctphdr_ok(skb
)) {
744 struct sctphdr
*sctp
= sctp_hdr(skb
);
745 key
->tp
.src
= sctp
->source
;
746 key
->tp
.dst
= sctp
->dest
;
748 memset(&key
->tp
, 0, sizeof(key
->tp
));
750 } else if (key
->ip
.proto
== IPPROTO_ICMP
) {
751 if (icmphdr_ok(skb
)) {
752 struct icmphdr
*icmp
= icmp_hdr(skb
);
753 /* The ICMP type and code fields use the 16-bit
754 * transport port fields, so we need to store
755 * them in 16-bit network byte order. */
756 key
->tp
.src
= htons(icmp
->type
);
757 key
->tp
.dst
= htons(icmp
->code
);
759 memset(&key
->tp
, 0, sizeof(key
->tp
));
763 } else if (key
->eth
.type
== htons(ETH_P_ARP
) ||
764 key
->eth
.type
== htons(ETH_P_RARP
)) {
765 struct arp_eth_header
*arp
;
766 bool arp_available
= arphdr_ok(skb
);
768 arp
= (struct arp_eth_header
*)skb_network_header(skb
);
771 arp
->ar_hrd
== htons(ARPHRD_ETHER
) &&
772 arp
->ar_pro
== htons(ETH_P_IP
) &&
773 arp
->ar_hln
== ETH_ALEN
&&
776 /* We only match on the lower 8 bits of the opcode. */
777 if (ntohs(arp
->ar_op
) <= 0xff)
778 key
->ip
.proto
= ntohs(arp
->ar_op
);
782 memcpy(&key
->ipv4
.addr
.src
, arp
->ar_sip
, sizeof(key
->ipv4
.addr
.src
));
783 memcpy(&key
->ipv4
.addr
.dst
, arp
->ar_tip
, sizeof(key
->ipv4
.addr
.dst
));
784 ether_addr_copy(key
->ipv4
.arp
.sha
, arp
->ar_sha
);
785 ether_addr_copy(key
->ipv4
.arp
.tha
, arp
->ar_tha
);
787 memset(&key
->ip
, 0, sizeof(key
->ip
));
788 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
790 } else if (eth_p_mpls(key
->eth
.type
)) {
793 memset(&key
->mpls
, 0, sizeof(key
->mpls
));
794 skb_set_inner_network_header(skb
, skb
->mac_len
);
798 error
= check_header(skb
, skb
->mac_len
+
799 label_count
* MPLS_HLEN
);
803 memcpy(&lse
, skb_inner_network_header(skb
), MPLS_HLEN
);
805 if (label_count
<= MPLS_LABEL_DEPTH
)
806 memcpy(&key
->mpls
.lse
[label_count
- 1], &lse
,
809 skb_set_inner_network_header(skb
, skb
->mac_len
+
810 label_count
* MPLS_HLEN
);
811 if (lse
& htonl(MPLS_LS_S_MASK
))
816 if (label_count
> MPLS_LABEL_DEPTH
)
817 label_count
= MPLS_LABEL_DEPTH
;
819 key
->mpls
.num_labels_mask
= GENMASK(label_count
- 1, 0);
820 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
821 int nh_len
; /* IPv6 Header + Extensions */
823 nh_len
= parse_ipv6hdr(skb
, key
);
824 if (unlikely(nh_len
< 0)) {
827 memset(&key
->ip
, 0, sizeof(key
->ip
));
828 memset(&key
->ipv6
.addr
, 0, sizeof(key
->ipv6
.addr
));
831 skb
->transport_header
= skb
->network_header
;
840 if (key
->ip
.frag
== OVS_FRAG_TYPE_LATER
) {
841 memset(&key
->tp
, 0, sizeof(key
->tp
));
844 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
845 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
847 /* Transport layer. */
848 if (key
->ip
.proto
== NEXTHDR_TCP
) {
849 if (tcphdr_ok(skb
)) {
850 struct tcphdr
*tcp
= tcp_hdr(skb
);
851 key
->tp
.src
= tcp
->source
;
852 key
->tp
.dst
= tcp
->dest
;
853 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
855 memset(&key
->tp
, 0, sizeof(key
->tp
));
857 } else if (key
->ip
.proto
== NEXTHDR_UDP
) {
858 if (udphdr_ok(skb
)) {
859 struct udphdr
*udp
= udp_hdr(skb
);
860 key
->tp
.src
= udp
->source
;
861 key
->tp
.dst
= udp
->dest
;
863 memset(&key
->tp
, 0, sizeof(key
->tp
));
865 } else if (key
->ip
.proto
== NEXTHDR_SCTP
) {
866 if (sctphdr_ok(skb
)) {
867 struct sctphdr
*sctp
= sctp_hdr(skb
);
868 key
->tp
.src
= sctp
->source
;
869 key
->tp
.dst
= sctp
->dest
;
871 memset(&key
->tp
, 0, sizeof(key
->tp
));
873 } else if (key
->ip
.proto
== NEXTHDR_ICMP
) {
874 if (icmp6hdr_ok(skb
)) {
875 error
= parse_icmpv6(skb
, key
, nh_len
);
879 memset(&key
->tp
, 0, sizeof(key
->tp
));
882 } else if (key
->eth
.type
== htons(ETH_P_NSH
)) {
883 error
= parse_nsh(skb
, key
);
891 * key_extract - extracts a flow key from an Ethernet frame.
892 * @skb: sk_buff that contains the frame, with skb->data pointing to the
894 * @key: output flow key
896 * The caller must ensure that skb->len >= ETH_HLEN.
898 * Initializes @skb header fields as follows:
900 * - skb->mac_header: the L2 header.
902 * - skb->network_header: just past the L2 header, or just past the
903 * VLAN header, to the first byte of the L2 payload.
905 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
906 * on output, then just past the IP header, if one is present and
907 * of a correct length, otherwise the same as skb->network_header.
908 * For other key->eth.type values it is left untouched.
910 * - skb->protocol: the type of the data starting at skb->network_header.
911 * Equals to key->eth.type.
913 * Return: %0 if successful, otherwise a negative errno value.
915 static int key_extract(struct sk_buff
*skb
, struct sw_flow_key
*key
)
919 /* Flags are always used as part of stats */
922 skb_reset_mac_header(skb
);
926 if (ovs_key_mac_proto(key
) == MAC_PROTO_NONE
) {
927 if (unlikely(eth_type_vlan(skb
->protocol
)))
930 skb_reset_network_header(skb
);
931 key
->eth
.type
= skb
->protocol
;
934 ether_addr_copy(key
->eth
.src
, eth
->h_source
);
935 ether_addr_copy(key
->eth
.dst
, eth
->h_dest
);
937 __skb_pull(skb
, 2 * ETH_ALEN
);
938 /* We are going to push all headers that we pull, so no need to
939 * update skb->csum here.
942 if (unlikely(parse_vlan(skb
, key
)))
945 key
->eth
.type
= parse_ethertype(skb
);
946 if (unlikely(key
->eth
.type
== htons(0)))
949 /* Multiple tagged packets need to retain TPID to satisfy
950 * skb_vlan_pop(), which will later shift the ethertype into
953 if (key
->eth
.cvlan
.tci
& htons(VLAN_CFI_MASK
))
954 skb
->protocol
= key
->eth
.cvlan
.tpid
;
956 skb
->protocol
= key
->eth
.type
;
958 skb_reset_network_header(skb
);
959 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
962 skb_reset_mac_len(skb
);
964 /* Fill out L3/L4 key info, if any */
965 return key_extract_l3l4(skb
, key
);
968 /* In the case of conntrack fragment handling it expects L3 headers,
971 int ovs_flow_key_update_l3l4(struct sk_buff
*skb
, struct sw_flow_key
*key
)
973 return key_extract_l3l4(skb
, key
);
976 int ovs_flow_key_update(struct sk_buff
*skb
, struct sw_flow_key
*key
)
980 res
= key_extract(skb
, key
);
982 key
->mac_proto
&= ~SW_FLOW_KEY_INVALID
;
987 static int key_extract_mac_proto(struct sk_buff
*skb
)
989 switch (skb
->dev
->type
) {
991 return MAC_PROTO_ETHERNET
;
993 if (skb
->protocol
== htons(ETH_P_TEB
))
994 return MAC_PROTO_ETHERNET
;
995 return MAC_PROTO_NONE
;
1001 int ovs_flow_key_extract(const struct ip_tunnel_info
*tun_info
,
1002 struct sk_buff
*skb
, struct sw_flow_key
*key
)
1004 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1005 struct tc_skb_ext
*tc_ext
;
1007 bool post_ct
= false, post_ct_snat
= false, post_ct_dnat
= false;
1011 /* Extract metadata from packet. */
1013 key
->tun_proto
= ip_tunnel_info_af(tun_info
);
1014 memcpy(&key
->tun_key
, &tun_info
->key
, sizeof(key
->tun_key
));
1016 if (tun_info
->options_len
) {
1017 BUILD_BUG_ON((1 << (sizeof(tun_info
->options_len
) *
1019 > sizeof(key
->tun_opts
));
1021 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key
, tun_info
->options_len
),
1023 key
->tun_opts_len
= tun_info
->options_len
;
1025 key
->tun_opts_len
= 0;
1029 key
->tun_opts_len
= 0;
1030 memset(&key
->tun_key
, 0, sizeof(key
->tun_key
));
1033 key
->phy
.priority
= skb
->priority
;
1034 key
->phy
.in_port
= OVS_CB(skb
)->input_vport
->port_no
;
1035 key
->phy
.skb_mark
= skb
->mark
;
1036 key
->ovs_flow_hash
= 0;
1037 res
= key_extract_mac_proto(skb
);
1040 key
->mac_proto
= res
;
1042 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1043 if (tc_skb_ext_tc_enabled()) {
1044 tc_ext
= skb_ext_find(skb
, TC_SKB_EXT
);
1045 key
->recirc_id
= tc_ext
&& !tc_ext
->act_miss
?
1047 OVS_CB(skb
)->mru
= tc_ext
? tc_ext
->mru
: 0;
1048 post_ct
= tc_ext
? tc_ext
->post_ct
: false;
1049 post_ct_snat
= post_ct
? tc_ext
->post_ct_snat
: false;
1050 post_ct_dnat
= post_ct
? tc_ext
->post_ct_dnat
: false;
1051 zone
= post_ct
? tc_ext
->zone
: 0;
1059 err
= key_extract(skb
, key
);
1061 ovs_ct_fill_key(skb
, key
, post_ct
); /* Must be after key_extract(). */
1063 if (!skb_get_nfct(skb
)) {
1064 key
->ct_zone
= zone
;
1067 key
->ct_state
&= ~OVS_CS_F_DST_NAT
;
1069 key
->ct_state
&= ~OVS_CS_F_SRC_NAT
;
1076 int ovs_flow_key_extract_userspace(struct net
*net
, const struct nlattr
*attr
,
1077 struct sk_buff
*skb
,
1078 struct sw_flow_key
*key
, bool log
)
1080 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1];
1084 err
= parse_flow_nlattrs(attr
, a
, &attrs
, log
);
1088 /* Extract metadata from netlink attributes. */
1089 err
= ovs_nla_get_flow_metadata(net
, a
, attrs
, key
, log
);
1093 /* key_extract assumes that skb->protocol is set-up for
1094 * layer 3 packets which is the case for other callers,
1095 * in particular packets received from the network stack.
1096 * Here the correct value can be set from the metadata
1098 * For L2 packet key eth type would be zero. skb protocol
1099 * would be set to correct value later during key-extact.
1102 skb
->protocol
= key
->eth
.type
;
1103 err
= key_extract(skb
, key
);
1107 /* Check that we have conntrack original direction tuple metadata only
1108 * for packets for which it makes sense. Otherwise the key may be
1109 * corrupted due to overlapping key fields.
1111 if (attrs
& (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
) &&
1112 key
->eth
.type
!= htons(ETH_P_IP
))
1114 if (attrs
& (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
) &&
1115 (key
->eth
.type
!= htons(ETH_P_IPV6
) ||
1116 sw_flow_key_is_nd(key
)))