2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #include <linux/uaccess.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <net/llc_pdu.h>
25 #include <linux/kernel.h>
26 #include <linux/jhash.h>
27 #include <linux/jiffies.h>
28 #include <linux/llc.h>
29 #include <linux/module.h>
31 #include <linux/rcupdate.h>
32 #include <linux/if_arp.h>
34 #include <linux/ipv6.h>
35 #include <linux/mpls.h>
36 #include <linux/sctp.h>
37 #include <linux/smp.h>
38 #include <linux/tcp.h>
39 #include <linux/udp.h>
40 #include <linux/icmp.h>
41 #include <linux/icmpv6.h>
42 #include <linux/rculist.h>
44 #include <net/ip_tunnels.h>
47 #include <net/ndisc.h>
49 #include "conntrack.h"
52 #include "flow_netlink.h"
55 u64
ovs_flow_used_time(unsigned long flow_jiffies
)
57 struct timespec cur_ts
;
60 ktime_get_ts(&cur_ts
);
61 idle_ms
= jiffies_to_msecs(jiffies
- flow_jiffies
);
62 cur_ms
= (u64
)cur_ts
.tv_sec
* MSEC_PER_SEC
+
63 cur_ts
.tv_nsec
/ NSEC_PER_MSEC
;
65 return cur_ms
- idle_ms
;
68 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
70 void ovs_flow_stats_update(struct sw_flow
*flow
, __be16 tcp_flags
,
71 const struct sk_buff
*skb
)
73 struct flow_stats
*stats
;
74 int node
= numa_node_id();
75 int len
= skb
->len
+ (skb_vlan_tag_present(skb
) ? VLAN_HLEN
: 0);
77 stats
= rcu_dereference(flow
->stats
[node
]);
79 /* Check if already have node-specific stats. */
81 spin_lock(&stats
->lock
);
82 /* Mark if we write on the pre-allocated stats. */
83 if (node
== 0 && unlikely(flow
->stats_last_writer
!= node
))
84 flow
->stats_last_writer
= node
;
86 stats
= rcu_dereference(flow
->stats
[0]); /* Pre-allocated. */
87 spin_lock(&stats
->lock
);
89 /* If the current NUMA-node is the only writer on the
90 * pre-allocated stats keep using them.
92 if (unlikely(flow
->stats_last_writer
!= node
)) {
93 /* A previous locker may have already allocated the
94 * stats, so we need to check again. If node-specific
95 * stats were already allocated, we update the pre-
96 * allocated stats as we have already locked them.
98 if (likely(flow
->stats_last_writer
!= NUMA_NO_NODE
)
99 && likely(!rcu_access_pointer(flow
->stats
[node
]))) {
100 /* Try to allocate node-specific stats. */
101 struct flow_stats
*new_stats
;
104 kmem_cache_alloc_node(flow_stats_cache
,
110 if (likely(new_stats
)) {
111 new_stats
->used
= jiffies
;
112 new_stats
->packet_count
= 1;
113 new_stats
->byte_count
= len
;
114 new_stats
->tcp_flags
= tcp_flags
;
115 spin_lock_init(&new_stats
->lock
);
117 rcu_assign_pointer(flow
->stats
[node
],
122 flow
->stats_last_writer
= node
;
126 stats
->used
= jiffies
;
127 stats
->packet_count
++;
128 stats
->byte_count
+= len
;
129 stats
->tcp_flags
|= tcp_flags
;
131 spin_unlock(&stats
->lock
);
134 /* Must be called with rcu_read_lock or ovs_mutex. */
135 void ovs_flow_stats_get(const struct sw_flow
*flow
,
136 struct ovs_flow_stats
*ovs_stats
,
137 unsigned long *used
, __be16
*tcp_flags
)
143 memset(ovs_stats
, 0, sizeof(*ovs_stats
));
145 for_each_node(node
) {
146 struct flow_stats
*stats
= rcu_dereference_ovsl(flow
->stats
[node
]);
149 /* Local CPU may write on non-local stats, so we must
150 * block bottom-halves here.
152 spin_lock_bh(&stats
->lock
);
153 if (!*used
|| time_after(stats
->used
, *used
))
155 *tcp_flags
|= stats
->tcp_flags
;
156 ovs_stats
->n_packets
+= stats
->packet_count
;
157 ovs_stats
->n_bytes
+= stats
->byte_count
;
158 spin_unlock_bh(&stats
->lock
);
163 /* Called with ovs_mutex. */
164 void ovs_flow_stats_clear(struct sw_flow
*flow
)
168 for_each_node(node
) {
169 struct flow_stats
*stats
= ovsl_dereference(flow
->stats
[node
]);
172 spin_lock_bh(&stats
->lock
);
174 stats
->packet_count
= 0;
175 stats
->byte_count
= 0;
176 stats
->tcp_flags
= 0;
177 spin_unlock_bh(&stats
->lock
);
182 static int check_header(struct sk_buff
*skb
, int len
)
184 if (unlikely(skb
->len
< len
))
186 if (unlikely(!pskb_may_pull(skb
, len
)))
191 static bool arphdr_ok(struct sk_buff
*skb
)
193 return pskb_may_pull(skb
, skb_network_offset(skb
) +
194 sizeof(struct arp_eth_header
));
197 static int check_iphdr(struct sk_buff
*skb
)
199 unsigned int nh_ofs
= skb_network_offset(skb
);
203 err
= check_header(skb
, nh_ofs
+ sizeof(struct iphdr
));
207 ip_len
= ip_hdrlen(skb
);
208 if (unlikely(ip_len
< sizeof(struct iphdr
) ||
209 skb
->len
< nh_ofs
+ ip_len
))
212 skb_set_transport_header(skb
, nh_ofs
+ ip_len
);
216 static bool tcphdr_ok(struct sk_buff
*skb
)
218 int th_ofs
= skb_transport_offset(skb
);
221 if (unlikely(!pskb_may_pull(skb
, th_ofs
+ sizeof(struct tcphdr
))))
224 tcp_len
= tcp_hdrlen(skb
);
225 if (unlikely(tcp_len
< sizeof(struct tcphdr
) ||
226 skb
->len
< th_ofs
+ tcp_len
))
232 static bool udphdr_ok(struct sk_buff
*skb
)
234 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
235 sizeof(struct udphdr
));
238 static bool sctphdr_ok(struct sk_buff
*skb
)
240 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
241 sizeof(struct sctphdr
));
244 static bool icmphdr_ok(struct sk_buff
*skb
)
246 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
247 sizeof(struct icmphdr
));
250 static int parse_ipv6hdr(struct sk_buff
*skb
, struct sw_flow_key
*key
)
252 unsigned int nh_ofs
= skb_network_offset(skb
);
260 err
= check_header(skb
, nh_ofs
+ sizeof(*nh
));
265 nexthdr
= nh
->nexthdr
;
266 payload_ofs
= (u8
*)(nh
+ 1) - skb
->data
;
268 key
->ip
.proto
= NEXTHDR_NONE
;
269 key
->ip
.tos
= ipv6_get_dsfield(nh
);
270 key
->ip
.ttl
= nh
->hop_limit
;
271 key
->ipv6
.label
= *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
272 key
->ipv6
.addr
.src
= nh
->saddr
;
273 key
->ipv6
.addr
.dst
= nh
->daddr
;
275 payload_ofs
= ipv6_skip_exthdr(skb
, payload_ofs
, &nexthdr
, &frag_off
);
278 if (frag_off
& htons(~0x7))
279 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
281 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
283 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
286 /* Delayed handling of error in ipv6_skip_exthdr() as it
287 * always sets frag_off to a valid value which may be
288 * used to set key->ip.frag above.
290 if (unlikely(payload_ofs
< 0))
293 nh_len
= payload_ofs
- nh_ofs
;
294 skb_set_transport_header(skb
, nh_ofs
+ nh_len
);
295 key
->ip
.proto
= nexthdr
;
299 static bool icmp6hdr_ok(struct sk_buff
*skb
)
301 return pskb_may_pull(skb
, skb_transport_offset(skb
) +
302 sizeof(struct icmp6hdr
));
305 static int parse_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
308 __be16 eth_type
; /* ETH_P_8021Q */
311 struct qtag_prefix
*qp
;
313 if (unlikely(skb
->len
< sizeof(struct qtag_prefix
) + sizeof(__be16
)))
316 if (unlikely(!pskb_may_pull(skb
, sizeof(struct qtag_prefix
) +
320 qp
= (struct qtag_prefix
*) skb
->data
;
321 key
->eth
.tci
= qp
->tci
| htons(VLAN_TAG_PRESENT
);
322 __skb_pull(skb
, sizeof(struct qtag_prefix
));
327 static __be16
parse_ethertype(struct sk_buff
*skb
)
329 struct llc_snap_hdr
{
330 u8 dsap
; /* Always 0xAA */
331 u8 ssap
; /* Always 0xAA */
336 struct llc_snap_hdr
*llc
;
339 proto
= *(__be16
*) skb
->data
;
340 __skb_pull(skb
, sizeof(__be16
));
342 if (eth_proto_is_802_3(proto
))
345 if (skb
->len
< sizeof(struct llc_snap_hdr
))
346 return htons(ETH_P_802_2
);
348 if (unlikely(!pskb_may_pull(skb
, sizeof(struct llc_snap_hdr
))))
351 llc
= (struct llc_snap_hdr
*) skb
->data
;
352 if (llc
->dsap
!= LLC_SAP_SNAP
||
353 llc
->ssap
!= LLC_SAP_SNAP
||
354 (llc
->oui
[0] | llc
->oui
[1] | llc
->oui
[2]) != 0)
355 return htons(ETH_P_802_2
);
357 __skb_pull(skb
, sizeof(struct llc_snap_hdr
));
359 if (eth_proto_is_802_3(llc
->ethertype
))
360 return llc
->ethertype
;
362 return htons(ETH_P_802_2
);
365 static int parse_icmpv6(struct sk_buff
*skb
, struct sw_flow_key
*key
,
368 struct icmp6hdr
*icmp
= icmp6_hdr(skb
);
370 /* The ICMPv6 type and code fields use the 16-bit transport port
371 * fields, so we need to store them in 16-bit network byte order.
373 key
->tp
.src
= htons(icmp
->icmp6_type
);
374 key
->tp
.dst
= htons(icmp
->icmp6_code
);
375 memset(&key
->ipv6
.nd
, 0, sizeof(key
->ipv6
.nd
));
377 if (icmp
->icmp6_code
== 0 &&
378 (icmp
->icmp6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
379 icmp
->icmp6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
380 int icmp_len
= skb
->len
- skb_transport_offset(skb
);
384 /* In order to process neighbor discovery options, we need the
387 if (unlikely(icmp_len
< sizeof(*nd
)))
390 if (unlikely(skb_linearize(skb
)))
393 nd
= (struct nd_msg
*)skb_transport_header(skb
);
394 key
->ipv6
.nd
.target
= nd
->target
;
396 icmp_len
-= sizeof(*nd
);
398 while (icmp_len
>= 8) {
399 struct nd_opt_hdr
*nd_opt
=
400 (struct nd_opt_hdr
*)(nd
->opt
+ offset
);
401 int opt_len
= nd_opt
->nd_opt_len
* 8;
403 if (unlikely(!opt_len
|| opt_len
> icmp_len
))
406 /* Store the link layer address if the appropriate
407 * option is provided. It is considered an error if
408 * the same link layer option is specified twice.
410 if (nd_opt
->nd_opt_type
== ND_OPT_SOURCE_LL_ADDR
412 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.sll
)))
414 ether_addr_copy(key
->ipv6
.nd
.sll
,
415 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
416 } else if (nd_opt
->nd_opt_type
== ND_OPT_TARGET_LL_ADDR
418 if (unlikely(!is_zero_ether_addr(key
->ipv6
.nd
.tll
)))
420 ether_addr_copy(key
->ipv6
.nd
.tll
,
421 &nd
->opt
[offset
+sizeof(*nd_opt
)]);
432 memset(&key
->ipv6
.nd
.target
, 0, sizeof(key
->ipv6
.nd
.target
));
433 memset(key
->ipv6
.nd
.sll
, 0, sizeof(key
->ipv6
.nd
.sll
));
434 memset(key
->ipv6
.nd
.tll
, 0, sizeof(key
->ipv6
.nd
.tll
));
440 * key_extract - extracts a flow key from an Ethernet frame.
441 * @skb: sk_buff that contains the frame, with skb->data pointing to the
443 * @key: output flow key
445 * The caller must ensure that skb->len >= ETH_HLEN.
447 * Returns 0 if successful, otherwise a negative errno value.
449 * Initializes @skb header pointers as follows:
451 * - skb->mac_header: the Ethernet header.
453 * - skb->network_header: just past the Ethernet header, or just past the
454 * VLAN header, to the first byte of the Ethernet payload.
456 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
457 * on output, then just past the IP header, if one is present and
458 * of a correct length, otherwise the same as skb->network_header.
459 * For other key->eth.type values it is left untouched.
461 static int key_extract(struct sk_buff
*skb
, struct sw_flow_key
*key
)
466 /* Flags are always used as part of stats */
469 skb_reset_mac_header(skb
);
471 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
472 * header in the linear data area.
475 ether_addr_copy(key
->eth
.src
, eth
->h_source
);
476 ether_addr_copy(key
->eth
.dst
, eth
->h_dest
);
478 __skb_pull(skb
, 2 * ETH_ALEN
);
479 /* We are going to push all headers that we pull, so no need to
480 * update skb->csum here.
484 if (skb_vlan_tag_present(skb
))
485 key
->eth
.tci
= htons(skb
->vlan_tci
);
486 else if (eth
->h_proto
== htons(ETH_P_8021Q
))
487 if (unlikely(parse_vlan(skb
, key
)))
490 key
->eth
.type
= parse_ethertype(skb
);
491 if (unlikely(key
->eth
.type
== htons(0)))
494 skb_reset_network_header(skb
);
495 skb_reset_mac_len(skb
);
496 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
499 if (key
->eth
.type
== htons(ETH_P_IP
)) {
503 error
= check_iphdr(skb
);
504 if (unlikely(error
)) {
505 memset(&key
->ip
, 0, sizeof(key
->ip
));
506 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
507 if (error
== -EINVAL
) {
508 skb
->transport_header
= skb
->network_header
;
515 key
->ipv4
.addr
.src
= nh
->saddr
;
516 key
->ipv4
.addr
.dst
= nh
->daddr
;
518 key
->ip
.proto
= nh
->protocol
;
519 key
->ip
.tos
= nh
->tos
;
520 key
->ip
.ttl
= nh
->ttl
;
522 offset
= nh
->frag_off
& htons(IP_OFFSET
);
524 key
->ip
.frag
= OVS_FRAG_TYPE_LATER
;
527 if (nh
->frag_off
& htons(IP_MF
) ||
528 skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
529 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
531 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
533 /* Transport layer. */
534 if (key
->ip
.proto
== IPPROTO_TCP
) {
535 if (tcphdr_ok(skb
)) {
536 struct tcphdr
*tcp
= tcp_hdr(skb
);
537 key
->tp
.src
= tcp
->source
;
538 key
->tp
.dst
= tcp
->dest
;
539 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
541 memset(&key
->tp
, 0, sizeof(key
->tp
));
544 } else if (key
->ip
.proto
== IPPROTO_UDP
) {
545 if (udphdr_ok(skb
)) {
546 struct udphdr
*udp
= udp_hdr(skb
);
547 key
->tp
.src
= udp
->source
;
548 key
->tp
.dst
= udp
->dest
;
550 memset(&key
->tp
, 0, sizeof(key
->tp
));
552 } else if (key
->ip
.proto
== IPPROTO_SCTP
) {
553 if (sctphdr_ok(skb
)) {
554 struct sctphdr
*sctp
= sctp_hdr(skb
);
555 key
->tp
.src
= sctp
->source
;
556 key
->tp
.dst
= sctp
->dest
;
558 memset(&key
->tp
, 0, sizeof(key
->tp
));
560 } else if (key
->ip
.proto
== IPPROTO_ICMP
) {
561 if (icmphdr_ok(skb
)) {
562 struct icmphdr
*icmp
= icmp_hdr(skb
);
563 /* The ICMP type and code fields use the 16-bit
564 * transport port fields, so we need to store
565 * them in 16-bit network byte order. */
566 key
->tp
.src
= htons(icmp
->type
);
567 key
->tp
.dst
= htons(icmp
->code
);
569 memset(&key
->tp
, 0, sizeof(key
->tp
));
573 } else if (key
->eth
.type
== htons(ETH_P_ARP
) ||
574 key
->eth
.type
== htons(ETH_P_RARP
)) {
575 struct arp_eth_header
*arp
;
576 bool arp_available
= arphdr_ok(skb
);
578 arp
= (struct arp_eth_header
*)skb_network_header(skb
);
581 arp
->ar_hrd
== htons(ARPHRD_ETHER
) &&
582 arp
->ar_pro
== htons(ETH_P_IP
) &&
583 arp
->ar_hln
== ETH_ALEN
&&
586 /* We only match on the lower 8 bits of the opcode. */
587 if (ntohs(arp
->ar_op
) <= 0xff)
588 key
->ip
.proto
= ntohs(arp
->ar_op
);
592 memcpy(&key
->ipv4
.addr
.src
, arp
->ar_sip
, sizeof(key
->ipv4
.addr
.src
));
593 memcpy(&key
->ipv4
.addr
.dst
, arp
->ar_tip
, sizeof(key
->ipv4
.addr
.dst
));
594 ether_addr_copy(key
->ipv4
.arp
.sha
, arp
->ar_sha
);
595 ether_addr_copy(key
->ipv4
.arp
.tha
, arp
->ar_tha
);
597 memset(&key
->ip
, 0, sizeof(key
->ip
));
598 memset(&key
->ipv4
, 0, sizeof(key
->ipv4
));
600 } else if (eth_p_mpls(key
->eth
.type
)) {
601 size_t stack_len
= MPLS_HLEN
;
603 /* In the presence of an MPLS label stack the end of the L2
604 * header and the beginning of the L3 header differ.
606 * Advance network_header to the beginning of the L3
607 * header. mac_len corresponds to the end of the L2 header.
612 error
= check_header(skb
, skb
->mac_len
+ stack_len
);
616 memcpy(&lse
, skb_network_header(skb
), MPLS_HLEN
);
618 if (stack_len
== MPLS_HLEN
)
619 memcpy(&key
->mpls
.top_lse
, &lse
, MPLS_HLEN
);
621 skb_set_network_header(skb
, skb
->mac_len
+ stack_len
);
622 if (lse
& htonl(MPLS_LS_S_MASK
))
625 stack_len
+= MPLS_HLEN
;
627 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
628 int nh_len
; /* IPv6 Header + Extensions */
630 nh_len
= parse_ipv6hdr(skb
, key
);
631 if (unlikely(nh_len
< 0)) {
634 memset(&key
->ip
, 0, sizeof(key
->ip
));
635 memset(&key
->ipv6
.addr
, 0, sizeof(key
->ipv6
.addr
));
638 skb
->transport_header
= skb
->network_header
;
647 if (key
->ip
.frag
== OVS_FRAG_TYPE_LATER
)
649 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP
)
650 key
->ip
.frag
= OVS_FRAG_TYPE_FIRST
;
652 /* Transport layer. */
653 if (key
->ip
.proto
== NEXTHDR_TCP
) {
654 if (tcphdr_ok(skb
)) {
655 struct tcphdr
*tcp
= tcp_hdr(skb
);
656 key
->tp
.src
= tcp
->source
;
657 key
->tp
.dst
= tcp
->dest
;
658 key
->tp
.flags
= TCP_FLAGS_BE16(tcp
);
660 memset(&key
->tp
, 0, sizeof(key
->tp
));
662 } else if (key
->ip
.proto
== NEXTHDR_UDP
) {
663 if (udphdr_ok(skb
)) {
664 struct udphdr
*udp
= udp_hdr(skb
);
665 key
->tp
.src
= udp
->source
;
666 key
->tp
.dst
= udp
->dest
;
668 memset(&key
->tp
, 0, sizeof(key
->tp
));
670 } else if (key
->ip
.proto
== NEXTHDR_SCTP
) {
671 if (sctphdr_ok(skb
)) {
672 struct sctphdr
*sctp
= sctp_hdr(skb
);
673 key
->tp
.src
= sctp
->source
;
674 key
->tp
.dst
= sctp
->dest
;
676 memset(&key
->tp
, 0, sizeof(key
->tp
));
678 } else if (key
->ip
.proto
== NEXTHDR_ICMP
) {
679 if (icmp6hdr_ok(skb
)) {
680 error
= parse_icmpv6(skb
, key
, nh_len
);
684 memset(&key
->tp
, 0, sizeof(key
->tp
));
691 int ovs_flow_key_update(struct sk_buff
*skb
, struct sw_flow_key
*key
)
693 return key_extract(skb
, key
);
696 int ovs_flow_key_extract(const struct ip_tunnel_info
*tun_info
,
697 struct sk_buff
*skb
, struct sw_flow_key
*key
)
699 /* Extract metadata from packet. */
701 key
->tun_proto
= ip_tunnel_info_af(tun_info
);
702 memcpy(&key
->tun_key
, &tun_info
->key
, sizeof(key
->tun_key
));
704 if (tun_info
->options_len
) {
705 BUILD_BUG_ON((1 << (sizeof(tun_info
->options_len
) *
707 > sizeof(key
->tun_opts
));
709 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key
, tun_info
->options_len
),
711 key
->tun_opts_len
= tun_info
->options_len
;
713 key
->tun_opts_len
= 0;
717 key
->tun_opts_len
= 0;
718 memset(&key
->tun_key
, 0, sizeof(key
->tun_key
));
721 key
->phy
.priority
= skb
->priority
;
722 key
->phy
.in_port
= OVS_CB(skb
)->input_vport
->port_no
;
723 key
->phy
.skb_mark
= skb
->mark
;
724 ovs_ct_fill_key(skb
, key
);
725 key
->ovs_flow_hash
= 0;
728 return key_extract(skb
, key
);
731 int ovs_flow_key_extract_userspace(struct net
*net
, const struct nlattr
*attr
,
733 struct sw_flow_key
*key
, bool log
)
737 memset(key
, 0, OVS_SW_FLOW_KEY_METADATA_SIZE
);
739 /* Extract metadata from netlink attributes. */
740 err
= ovs_nla_get_flow_metadata(net
, attr
, key
, log
);
744 return key_extract(skb
, key
);