2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
61 int ovs_net_id __read_mostly
;
62 EXPORT_SYMBOL_GPL(ovs_net_id
);
64 static struct genl_family dp_packet_genl_family
;
65 static struct genl_family dp_flow_genl_family
;
66 static struct genl_family dp_datapath_genl_family
;
68 static const struct nla_policy flow_policy
[];
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group
= {
71 .name
= OVS_FLOW_MCGROUP
,
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group
= {
75 .name
= OVS_DATAPATH_MCGROUP
,
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group
= {
79 .name
= OVS_VPORT_MCGROUP
,
82 /* Check if need to build a reply message.
83 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family
*family
, struct genl_info
*info
,
87 return info
->nlhdr
->nlmsg_flags
& NLM_F_ECHO
||
88 genl_has_listeners(family
, genl_info_net(info
), group
);
91 static void ovs_notify(struct genl_family
*family
,
92 struct sk_buff
*skb
, struct genl_info
*info
)
94 genl_notify(family
, skb
, info
, 0, GFP_KERNEL
);
100 * All writes e.g. Writes to device state (add/remove datapath, port, set
101 * operations on vports, etc.), Writes to other state (flow table
102 * modifications, set miscellaneous datapath parameters, etc.) are protected
105 * Reads are protected by RCU.
107 * There are a few special cases (mostly stats) that have their own
108 * synchronization but they nest under all of above and don't interact with
111 * The RTNL lock nests inside ovs_mutex.
114 static DEFINE_MUTEX(ovs_mutex
);
118 mutex_lock(&ovs_mutex
);
121 void ovs_unlock(void)
123 mutex_unlock(&ovs_mutex
);
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
130 return lockdep_is_held(&ovs_mutex
);
134 EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held
);
137 static struct vport
*new_vport(const struct vport_parms
*);
138 static int queue_gso_packets(struct datapath
*dp
, struct sk_buff
*,
139 const struct sw_flow_key
*,
140 const struct dp_upcall_info
*);
141 static int queue_userspace_packet(struct datapath
*dp
, struct sk_buff
*,
142 const struct sw_flow_key
*,
143 const struct dp_upcall_info
*);
145 /* Must be called with rcu_read_lock. */
146 static struct datapath
*get_dp_rcu(struct net
*net
, int dp_ifindex
)
148 struct net_device
*dev
= dev_get_by_index_rcu(net
, dp_ifindex
);
151 struct vport
*vport
= ovs_internal_dev_get_vport(dev
);
159 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
160 * returned dp pointer valid.
162 static inline struct datapath
*get_dp(struct net
*net
, int dp_ifindex
)
166 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
168 dp
= get_dp_rcu(net
, dp_ifindex
);
174 /* Must be called with rcu_read_lock or ovs_mutex. */
175 const char *ovs_dp_name(const struct datapath
*dp
)
177 struct vport
*vport
= ovs_vport_ovsl_rcu(dp
, OVSP_LOCAL
);
178 return ovs_vport_name(vport
);
181 static int get_dpifindex(const struct datapath
*dp
)
188 local
= ovs_vport_rcu(dp
, OVSP_LOCAL
);
190 ifindex
= local
->dev
->ifindex
;
199 static void destroy_dp_rcu(struct rcu_head
*rcu
)
201 struct datapath
*dp
= container_of(rcu
, struct datapath
, rcu
);
203 ovs_flow_tbl_destroy(&dp
->table
);
204 free_percpu(dp
->stats_percpu
);
209 static struct hlist_head
*vport_hash_bucket(const struct datapath
*dp
,
212 return &dp
->ports
[port_no
& (DP_VPORT_HASH_BUCKETS
- 1)];
215 /* Called with ovs_mutex or RCU read lock. */
216 struct vport
*ovs_lookup_vport(const struct datapath
*dp
, u16 port_no
)
219 struct hlist_head
*head
;
221 head
= vport_hash_bucket(dp
, port_no
);
222 hlist_for_each_entry_rcu(vport
, head
, dp_hash_node
) {
223 if (vport
->port_no
== port_no
)
229 /* Called with ovs_mutex. */
230 static struct vport
*new_vport(const struct vport_parms
*parms
)
234 vport
= ovs_vport_add(parms
);
235 if (!IS_ERR(vport
)) {
236 struct datapath
*dp
= parms
->dp
;
237 struct hlist_head
*head
= vport_hash_bucket(dp
, vport
->port_no
);
239 hlist_add_head_rcu(&vport
->dp_hash_node
, head
);
244 void ovs_dp_detach_port(struct vport
*p
)
248 /* First drop references to device. */
249 hlist_del_rcu(&p
->dp_hash_node
);
251 /* Then destroy it. */
255 /* Must be called with rcu_read_lock. */
256 void ovs_dp_process_packet(struct sk_buff
*skb
, struct sw_flow_key
*key
)
258 const struct vport
*p
= OVS_CB(skb
)->input_vport
;
259 struct datapath
*dp
= p
->dp
;
260 struct sw_flow
*flow
;
261 struct sw_flow_actions
*sf_acts
;
262 struct dp_stats_percpu
*stats
;
266 stats
= this_cpu_ptr(dp
->stats_percpu
);
269 flow
= ovs_flow_tbl_lookup_stats(&dp
->table
, key
, &n_mask_hit
);
270 if (unlikely(!flow
)) {
271 struct dp_upcall_info upcall
;
274 memset(&upcall
, 0, sizeof(upcall
));
275 upcall
.cmd
= OVS_PACKET_CMD_MISS
;
276 upcall
.portid
= ovs_vport_find_upcall_portid(p
, skb
);
277 upcall
.mru
= OVS_CB(skb
)->mru
;
278 error
= ovs_dp_upcall(dp
, skb
, key
, &upcall
);
283 stats_counter
= &stats
->n_missed
;
287 ovs_flow_stats_update(flow
, key
->tp
.flags
, skb
);
288 sf_acts
= rcu_dereference(flow
->sf_acts
);
289 ovs_execute_actions(dp
, skb
, sf_acts
, key
);
291 stats_counter
= &stats
->n_hit
;
294 /* Update datapath statistics. */
295 u64_stats_update_begin(&stats
->syncp
);
297 stats
->n_mask_hit
+= n_mask_hit
;
298 u64_stats_update_end(&stats
->syncp
);
301 int ovs_dp_upcall(struct datapath
*dp
, struct sk_buff
*skb
,
302 const struct sw_flow_key
*key
,
303 const struct dp_upcall_info
*upcall_info
)
305 struct dp_stats_percpu
*stats
;
308 if (upcall_info
->portid
== 0) {
313 if (!skb_is_gso(skb
))
314 err
= queue_userspace_packet(dp
, skb
, key
, upcall_info
);
316 err
= queue_gso_packets(dp
, skb
, key
, upcall_info
);
323 stats
= this_cpu_ptr(dp
->stats_percpu
);
325 u64_stats_update_begin(&stats
->syncp
);
327 u64_stats_update_end(&stats
->syncp
);
332 static int queue_gso_packets(struct datapath
*dp
, struct sk_buff
*skb
,
333 const struct sw_flow_key
*key
,
334 const struct dp_upcall_info
*upcall_info
)
336 unsigned short gso_type
= skb_shinfo(skb
)->gso_type
;
337 struct sw_flow_key later_key
;
338 struct sk_buff
*segs
, *nskb
;
339 struct ovs_skb_cb ovs_cb
;
342 ovs_cb
= *OVS_CB(skb
);
343 segs
= __skb_gso_segment(skb
, NETIF_F_SG
, false);
344 *OVS_CB(skb
) = ovs_cb
;
346 return PTR_ERR(segs
);
350 if (gso_type
& SKB_GSO_UDP
) {
351 /* The initial flow key extracted by ovs_flow_key_extract()
352 * in this case is for a first fragment, so we need to
353 * properly mark later fragments.
356 later_key
.ip
.frag
= OVS_FRAG_TYPE_LATER
;
359 /* Queue all of the segments. */
362 *OVS_CB(skb
) = ovs_cb
;
363 if (gso_type
& SKB_GSO_UDP
&& skb
!= segs
)
366 err
= queue_userspace_packet(dp
, skb
, key
, upcall_info
);
370 } while ((skb
= skb
->next
));
372 /* Free all of the segments. */
380 } while ((skb
= nskb
));
384 static size_t upcall_msg_size(const struct dp_upcall_info
*upcall_info
,
387 size_t size
= NLMSG_ALIGN(sizeof(struct ovs_header
))
388 + nla_total_size(hdrlen
) /* OVS_PACKET_ATTR_PACKET */
389 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
391 /* OVS_PACKET_ATTR_USERDATA */
392 if (upcall_info
->userdata
)
393 size
+= NLA_ALIGN(upcall_info
->userdata
->nla_len
);
395 /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
396 if (upcall_info
->egress_tun_info
)
397 size
+= nla_total_size(ovs_tun_key_attr_size());
399 /* OVS_PACKET_ATTR_ACTIONS */
400 if (upcall_info
->actions_len
)
401 size
+= nla_total_size(upcall_info
->actions_len
);
403 /* OVS_PACKET_ATTR_MRU */
404 if (upcall_info
->mru
)
405 size
+= nla_total_size(sizeof(upcall_info
->mru
));
410 static void pad_packet(struct datapath
*dp
, struct sk_buff
*skb
)
412 if (!(dp
->user_features
& OVS_DP_F_UNALIGNED
)) {
413 size_t plen
= NLA_ALIGN(skb
->len
) - skb
->len
;
416 memset(skb_put(skb
, plen
), 0, plen
);
420 static int queue_userspace_packet(struct datapath
*dp
, struct sk_buff
*skb
,
421 const struct sw_flow_key
*key
,
422 const struct dp_upcall_info
*upcall_info
)
424 struct ovs_header
*upcall
;
425 struct sk_buff
*nskb
= NULL
;
426 struct sk_buff
*user_skb
= NULL
; /* to be queued to userspace */
428 struct genl_info info
= {
429 .dst_sk
= ovs_dp_get_net(dp
)->genl_sock
,
430 .snd_portid
= upcall_info
->portid
,
436 dp_ifindex
= get_dpifindex(dp
);
440 if (skb_vlan_tag_present(skb
)) {
441 nskb
= skb_clone(skb
, GFP_ATOMIC
);
445 nskb
= __vlan_hwaccel_push_inside(nskb
);
452 if (nla_attr_size(skb
->len
) > USHRT_MAX
) {
457 /* Complete checksum if needed */
458 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
459 (err
= skb_checksum_help(skb
)))
462 /* Older versions of OVS user space enforce alignment of the last
463 * Netlink attribute to NLA_ALIGNTO which would require extensive
464 * padding logic. Only perform zerocopy if padding is not required.
466 if (dp
->user_features
& OVS_DP_F_UNALIGNED
)
467 hlen
= skb_zerocopy_headlen(skb
);
471 len
= upcall_msg_size(upcall_info
, hlen
);
472 user_skb
= genlmsg_new_unicast(len
, &info
, GFP_ATOMIC
);
478 upcall
= genlmsg_put(user_skb
, 0, 0, &dp_packet_genl_family
,
479 0, upcall_info
->cmd
);
480 upcall
->dp_ifindex
= dp_ifindex
;
482 err
= ovs_nla_put_key(key
, key
, OVS_PACKET_ATTR_KEY
, false, user_skb
);
485 if (upcall_info
->userdata
)
486 __nla_put(user_skb
, OVS_PACKET_ATTR_USERDATA
,
487 nla_len(upcall_info
->userdata
),
488 nla_data(upcall_info
->userdata
));
490 if (upcall_info
->egress_tun_info
) {
491 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_EGRESS_TUN_KEY
);
492 err
= ovs_nla_put_tunnel_info(user_skb
,
493 upcall_info
->egress_tun_info
);
495 nla_nest_end(user_skb
, nla
);
498 if (upcall_info
->actions_len
) {
499 nla
= nla_nest_start(user_skb
, OVS_PACKET_ATTR_ACTIONS
);
500 err
= ovs_nla_put_actions(upcall_info
->actions
,
501 upcall_info
->actions_len
,
504 nla_nest_end(user_skb
, nla
);
506 nla_nest_cancel(user_skb
, nla
);
509 /* Add OVS_PACKET_ATTR_MRU */
510 if (upcall_info
->mru
) {
511 if (nla_put_u16(user_skb
, OVS_PACKET_ATTR_MRU
,
516 pad_packet(dp
, user_skb
);
519 /* Only reserve room for attribute header, packet data is added
520 * in skb_zerocopy() */
521 if (!(nla
= nla_reserve(user_skb
, OVS_PACKET_ATTR_PACKET
, 0))) {
525 nla
->nla_len
= nla_attr_size(skb
->len
);
527 err
= skb_zerocopy(user_skb
, skb
, skb
->len
, hlen
);
531 /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
532 pad_packet(dp
, user_skb
);
534 ((struct nlmsghdr
*) user_skb
->data
)->nlmsg_len
= user_skb
->len
;
536 err
= genlmsg_unicast(ovs_dp_get_net(dp
), user_skb
, upcall_info
->portid
);
546 static int ovs_packet_cmd_execute(struct sk_buff
*skb
, struct genl_info
*info
)
548 struct ovs_header
*ovs_header
= info
->userhdr
;
549 struct net
*net
= sock_net(skb
->sk
);
550 struct nlattr
**a
= info
->attrs
;
551 struct sw_flow_actions
*acts
;
552 struct sk_buff
*packet
;
553 struct sw_flow
*flow
;
554 struct sw_flow_actions
*sf_acts
;
557 struct vport
*input_vport
;
561 bool log
= !a
[OVS_PACKET_ATTR_PROBE
];
564 if (!a
[OVS_PACKET_ATTR_PACKET
] || !a
[OVS_PACKET_ATTR_KEY
] ||
565 !a
[OVS_PACKET_ATTR_ACTIONS
])
568 len
= nla_len(a
[OVS_PACKET_ATTR_PACKET
]);
569 packet
= __dev_alloc_skb(NET_IP_ALIGN
+ len
, GFP_KERNEL
);
573 skb_reserve(packet
, NET_IP_ALIGN
);
575 nla_memcpy(__skb_put(packet
, len
), a
[OVS_PACKET_ATTR_PACKET
], len
);
577 skb_reset_mac_header(packet
);
578 eth
= eth_hdr(packet
);
580 /* Normally, setting the skb 'protocol' field would be handled by a
581 * call to eth_type_trans(), but it assumes there's a sending
582 * device, which we may not have. */
583 if (eth_proto_is_802_3(eth
->h_proto
))
584 packet
->protocol
= eth
->h_proto
;
586 packet
->protocol
= htons(ETH_P_802_2
);
588 /* Set packet's mru */
589 if (a
[OVS_PACKET_ATTR_MRU
]) {
590 mru
= nla_get_u16(a
[OVS_PACKET_ATTR_MRU
]);
591 packet
->ignore_df
= 1;
593 OVS_CB(packet
)->mru
= mru
;
595 /* Build an sw_flow for sending this packet. */
596 flow
= ovs_flow_alloc();
601 err
= ovs_flow_key_extract_userspace(net
, a
[OVS_PACKET_ATTR_KEY
],
602 packet
, &flow
->key
, log
);
606 err
= ovs_nla_copy_actions(net
, a
[OVS_PACKET_ATTR_ACTIONS
],
607 &flow
->key
, &acts
, log
);
611 rcu_assign_pointer(flow
->sf_acts
, acts
);
612 packet
->priority
= flow
->key
.phy
.priority
;
613 packet
->mark
= flow
->key
.phy
.skb_mark
;
616 dp
= get_dp_rcu(net
, ovs_header
->dp_ifindex
);
621 input_vport
= ovs_vport_rcu(dp
, flow
->key
.phy
.in_port
);
623 input_vport
= ovs_vport_rcu(dp
, OVSP_LOCAL
);
628 packet
->dev
= input_vport
->dev
;
629 OVS_CB(packet
)->input_vport
= input_vport
;
630 sf_acts
= rcu_dereference(flow
->sf_acts
);
633 err
= ovs_execute_actions(dp
, packet
, sf_acts
, &flow
->key
);
637 ovs_flow_free(flow
, false);
643 ovs_flow_free(flow
, false);
650 static const struct nla_policy packet_policy
[OVS_PACKET_ATTR_MAX
+ 1] = {
651 [OVS_PACKET_ATTR_PACKET
] = { .len
= ETH_HLEN
},
652 [OVS_PACKET_ATTR_KEY
] = { .type
= NLA_NESTED
},
653 [OVS_PACKET_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
654 [OVS_PACKET_ATTR_PROBE
] = { .type
= NLA_FLAG
},
655 [OVS_PACKET_ATTR_MRU
] = { .type
= NLA_U16
},
658 static const struct genl_ops dp_packet_genl_ops
[] = {
659 { .cmd
= OVS_PACKET_CMD_EXECUTE
,
660 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
661 .policy
= packet_policy
,
662 .doit
= ovs_packet_cmd_execute
666 static struct genl_family dp_packet_genl_family
= {
667 .id
= GENL_ID_GENERATE
,
668 .hdrsize
= sizeof(struct ovs_header
),
669 .name
= OVS_PACKET_FAMILY
,
670 .version
= OVS_PACKET_VERSION
,
671 .maxattr
= OVS_PACKET_ATTR_MAX
,
673 .parallel_ops
= true,
674 .ops
= dp_packet_genl_ops
,
675 .n_ops
= ARRAY_SIZE(dp_packet_genl_ops
),
678 static void get_dp_stats(const struct datapath
*dp
, struct ovs_dp_stats
*stats
,
679 struct ovs_dp_megaflow_stats
*mega_stats
)
683 memset(mega_stats
, 0, sizeof(*mega_stats
));
685 stats
->n_flows
= ovs_flow_tbl_count(&dp
->table
);
686 mega_stats
->n_masks
= ovs_flow_tbl_num_masks(&dp
->table
);
688 stats
->n_hit
= stats
->n_missed
= stats
->n_lost
= 0;
690 for_each_possible_cpu(i
) {
691 const struct dp_stats_percpu
*percpu_stats
;
692 struct dp_stats_percpu local_stats
;
695 percpu_stats
= per_cpu_ptr(dp
->stats_percpu
, i
);
698 start
= u64_stats_fetch_begin_irq(&percpu_stats
->syncp
);
699 local_stats
= *percpu_stats
;
700 } while (u64_stats_fetch_retry_irq(&percpu_stats
->syncp
, start
));
702 stats
->n_hit
+= local_stats
.n_hit
;
703 stats
->n_missed
+= local_stats
.n_missed
;
704 stats
->n_lost
+= local_stats
.n_lost
;
705 mega_stats
->n_mask_hit
+= local_stats
.n_mask_hit
;
709 static bool should_fill_key(const struct sw_flow_id
*sfid
, uint32_t ufid_flags
)
711 return ovs_identifier_is_ufid(sfid
) &&
712 !(ufid_flags
& OVS_UFID_F_OMIT_KEY
);
715 static bool should_fill_mask(uint32_t ufid_flags
)
717 return !(ufid_flags
& OVS_UFID_F_OMIT_MASK
);
720 static bool should_fill_actions(uint32_t ufid_flags
)
722 return !(ufid_flags
& OVS_UFID_F_OMIT_ACTIONS
);
725 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions
*acts
,
726 const struct sw_flow_id
*sfid
,
729 size_t len
= NLMSG_ALIGN(sizeof(struct ovs_header
));
731 /* OVS_FLOW_ATTR_UFID */
732 if (sfid
&& ovs_identifier_is_ufid(sfid
))
733 len
+= nla_total_size(sfid
->ufid_len
);
735 /* OVS_FLOW_ATTR_KEY */
736 if (!sfid
|| should_fill_key(sfid
, ufid_flags
))
737 len
+= nla_total_size(ovs_key_attr_size());
739 /* OVS_FLOW_ATTR_MASK */
740 if (should_fill_mask(ufid_flags
))
741 len
+= nla_total_size(ovs_key_attr_size());
743 /* OVS_FLOW_ATTR_ACTIONS */
744 if (should_fill_actions(ufid_flags
))
745 len
+= nla_total_size(acts
->orig_len
);
748 + nla_total_size(sizeof(struct ovs_flow_stats
)) /* OVS_FLOW_ATTR_STATS */
749 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
750 + nla_total_size(8); /* OVS_FLOW_ATTR_USED */
753 /* Called with ovs_mutex or RCU read lock. */
754 static int ovs_flow_cmd_fill_stats(const struct sw_flow
*flow
,
757 struct ovs_flow_stats stats
;
761 ovs_flow_stats_get(flow
, &stats
, &used
, &tcp_flags
);
764 nla_put_u64(skb
, OVS_FLOW_ATTR_USED
, ovs_flow_used_time(used
)))
767 if (stats
.n_packets
&&
768 nla_put(skb
, OVS_FLOW_ATTR_STATS
, sizeof(struct ovs_flow_stats
), &stats
))
771 if ((u8
)ntohs(tcp_flags
) &&
772 nla_put_u8(skb
, OVS_FLOW_ATTR_TCP_FLAGS
, (u8
)ntohs(tcp_flags
)))
778 /* Called with ovs_mutex or RCU read lock. */
779 static int ovs_flow_cmd_fill_actions(const struct sw_flow
*flow
,
780 struct sk_buff
*skb
, int skb_orig_len
)
782 struct nlattr
*start
;
785 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
786 * this is the first flow to be dumped into 'skb'. This is unusual for
787 * Netlink but individual action lists can be longer than
788 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
789 * The userspace caller can always fetch the actions separately if it
790 * really wants them. (Most userspace callers in fact don't care.)
792 * This can only fail for dump operations because the skb is always
793 * properly sized for single flows.
795 start
= nla_nest_start(skb
, OVS_FLOW_ATTR_ACTIONS
);
797 const struct sw_flow_actions
*sf_acts
;
799 sf_acts
= rcu_dereference_ovsl(flow
->sf_acts
);
800 err
= ovs_nla_put_actions(sf_acts
->actions
,
801 sf_acts
->actions_len
, skb
);
804 nla_nest_end(skb
, start
);
809 nla_nest_cancel(skb
, start
);
811 } else if (skb_orig_len
) {
818 /* Called with ovs_mutex or RCU read lock. */
819 static int ovs_flow_cmd_fill_info(const struct sw_flow
*flow
, int dp_ifindex
,
820 struct sk_buff
*skb
, u32 portid
,
821 u32 seq
, u32 flags
, u8 cmd
, u32 ufid_flags
)
823 const int skb_orig_len
= skb
->len
;
824 struct ovs_header
*ovs_header
;
827 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_flow_genl_family
,
832 ovs_header
->dp_ifindex
= dp_ifindex
;
834 err
= ovs_nla_put_identifier(flow
, skb
);
838 if (should_fill_key(&flow
->id
, ufid_flags
)) {
839 err
= ovs_nla_put_masked_key(flow
, skb
);
844 if (should_fill_mask(ufid_flags
)) {
845 err
= ovs_nla_put_mask(flow
, skb
);
850 err
= ovs_flow_cmd_fill_stats(flow
, skb
);
854 if (should_fill_actions(ufid_flags
)) {
855 err
= ovs_flow_cmd_fill_actions(flow
, skb
, skb_orig_len
);
860 genlmsg_end(skb
, ovs_header
);
864 genlmsg_cancel(skb
, ovs_header
);
868 /* May not be called with RCU read lock. */
869 static struct sk_buff
*ovs_flow_cmd_alloc_info(const struct sw_flow_actions
*acts
,
870 const struct sw_flow_id
*sfid
,
871 struct genl_info
*info
,
878 if (!always
&& !ovs_must_notify(&dp_flow_genl_family
, info
, 0))
881 len
= ovs_flow_cmd_msg_size(acts
, sfid
, ufid_flags
);
882 skb
= genlmsg_new_unicast(len
, info
, GFP_KERNEL
);
884 return ERR_PTR(-ENOMEM
);
889 /* Called with ovs_mutex. */
890 static struct sk_buff
*ovs_flow_cmd_build_info(const struct sw_flow
*flow
,
892 struct genl_info
*info
, u8 cmd
,
893 bool always
, u32 ufid_flags
)
898 skb
= ovs_flow_cmd_alloc_info(ovsl_dereference(flow
->sf_acts
),
899 &flow
->id
, info
, always
, ufid_flags
);
900 if (IS_ERR_OR_NULL(skb
))
903 retval
= ovs_flow_cmd_fill_info(flow
, dp_ifindex
, skb
,
904 info
->snd_portid
, info
->snd_seq
, 0,
910 static int ovs_flow_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
912 struct net
*net
= sock_net(skb
->sk
);
913 struct nlattr
**a
= info
->attrs
;
914 struct ovs_header
*ovs_header
= info
->userhdr
;
915 struct sw_flow
*flow
= NULL
, *new_flow
;
916 struct sw_flow_mask mask
;
917 struct sk_buff
*reply
;
919 struct sw_flow_key key
;
920 struct sw_flow_actions
*acts
;
921 struct sw_flow_match match
;
922 u32 ufid_flags
= ovs_nla_get_ufid_flags(a
[OVS_FLOW_ATTR_UFID_FLAGS
]);
924 bool log
= !a
[OVS_FLOW_ATTR_PROBE
];
926 /* Must have key and actions. */
928 if (!a
[OVS_FLOW_ATTR_KEY
]) {
929 OVS_NLERR(log
, "Flow key attr not present in new flow.");
932 if (!a
[OVS_FLOW_ATTR_ACTIONS
]) {
933 OVS_NLERR(log
, "Flow actions attr not present in new flow.");
937 /* Most of the time we need to allocate a new flow, do it before
940 new_flow
= ovs_flow_alloc();
941 if (IS_ERR(new_flow
)) {
942 error
= PTR_ERR(new_flow
);
947 ovs_match_init(&match
, &key
, &mask
);
948 error
= ovs_nla_get_match(net
, &match
, a
[OVS_FLOW_ATTR_KEY
],
949 a
[OVS_FLOW_ATTR_MASK
], log
);
953 ovs_flow_mask_key(&new_flow
->key
, &key
, true, &mask
);
955 /* Extract flow identifier. */
956 error
= ovs_nla_get_identifier(&new_flow
->id
, a
[OVS_FLOW_ATTR_UFID
],
961 /* Validate actions. */
962 error
= ovs_nla_copy_actions(net
, a
[OVS_FLOW_ATTR_ACTIONS
],
963 &new_flow
->key
, &acts
, log
);
965 OVS_NLERR(log
, "Flow actions may not be safe on all matching packets.");
969 reply
= ovs_flow_cmd_alloc_info(acts
, &new_flow
->id
, info
, false,
972 error
= PTR_ERR(reply
);
977 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
983 /* Check if this is a duplicate flow */
984 if (ovs_identifier_is_ufid(&new_flow
->id
))
985 flow
= ovs_flow_tbl_lookup_ufid(&dp
->table
, &new_flow
->id
);
987 flow
= ovs_flow_tbl_lookup(&dp
->table
, &key
);
989 rcu_assign_pointer(new_flow
->sf_acts
, acts
);
991 /* Put flow in bucket. */
992 error
= ovs_flow_tbl_insert(&dp
->table
, new_flow
, &mask
);
993 if (unlikely(error
)) {
998 if (unlikely(reply
)) {
999 error
= ovs_flow_cmd_fill_info(new_flow
,
1000 ovs_header
->dp_ifindex
,
1001 reply
, info
->snd_portid
,
1009 struct sw_flow_actions
*old_acts
;
1011 /* Bail out if we're not allowed to modify an existing flow.
1012 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1013 * because Generic Netlink treats the latter as a dump
1014 * request. We also accept NLM_F_EXCL in case that bug ever
1017 if (unlikely(info
->nlhdr
->nlmsg_flags
& (NLM_F_CREATE
1020 goto err_unlock_ovs
;
1022 /* The flow identifier has to be the same for flow updates.
1023 * Look for any overlapping flow.
1025 if (unlikely(!ovs_flow_cmp(flow
, &match
))) {
1026 if (ovs_identifier_is_key(&flow
->id
))
1027 flow
= ovs_flow_tbl_lookup_exact(&dp
->table
,
1029 else /* UFID matches but key is different */
1033 goto err_unlock_ovs
;
1036 /* Update actions. */
1037 old_acts
= ovsl_dereference(flow
->sf_acts
);
1038 rcu_assign_pointer(flow
->sf_acts
, acts
);
1040 if (unlikely(reply
)) {
1041 error
= ovs_flow_cmd_fill_info(flow
,
1042 ovs_header
->dp_ifindex
,
1043 reply
, info
->snd_portid
,
1051 ovs_nla_free_flow_actions_rcu(old_acts
);
1052 ovs_flow_free(new_flow
, false);
1056 ovs_notify(&dp_flow_genl_family
, reply
, info
);
1063 ovs_nla_free_flow_actions(acts
);
1065 ovs_flow_free(new_flow
, false);
1070 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1071 static struct sw_flow_actions
*get_flow_actions(struct net
*net
,
1072 const struct nlattr
*a
,
1073 const struct sw_flow_key
*key
,
1074 const struct sw_flow_mask
*mask
,
1077 struct sw_flow_actions
*acts
;
1078 struct sw_flow_key masked_key
;
1081 ovs_flow_mask_key(&masked_key
, key
, true, mask
);
1082 error
= ovs_nla_copy_actions(net
, a
, &masked_key
, &acts
, log
);
1085 "Actions may not be safe on all matching packets");
1086 return ERR_PTR(error
);
1092 static int ovs_flow_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1094 struct net
*net
= sock_net(skb
->sk
);
1095 struct nlattr
**a
= info
->attrs
;
1096 struct ovs_header
*ovs_header
= info
->userhdr
;
1097 struct sw_flow_key key
;
1098 struct sw_flow
*flow
;
1099 struct sw_flow_mask mask
;
1100 struct sk_buff
*reply
= NULL
;
1101 struct datapath
*dp
;
1102 struct sw_flow_actions
*old_acts
= NULL
, *acts
= NULL
;
1103 struct sw_flow_match match
;
1104 struct sw_flow_id sfid
;
1105 u32 ufid_flags
= ovs_nla_get_ufid_flags(a
[OVS_FLOW_ATTR_UFID_FLAGS
]);
1107 bool log
= !a
[OVS_FLOW_ATTR_PROBE
];
1112 if (!a
[OVS_FLOW_ATTR_KEY
]) {
1113 OVS_NLERR(log
, "Flow key attribute not present in set flow.");
1117 ufid_present
= ovs_nla_get_ufid(&sfid
, a
[OVS_FLOW_ATTR_UFID
], log
);
1118 ovs_match_init(&match
, &key
, &mask
);
1119 error
= ovs_nla_get_match(net
, &match
, a
[OVS_FLOW_ATTR_KEY
],
1120 a
[OVS_FLOW_ATTR_MASK
], log
);
1124 /* Validate actions. */
1125 if (a
[OVS_FLOW_ATTR_ACTIONS
]) {
1126 acts
= get_flow_actions(net
, a
[OVS_FLOW_ATTR_ACTIONS
], &key
,
1129 error
= PTR_ERR(acts
);
1133 /* Can allocate before locking if have acts. */
1134 reply
= ovs_flow_cmd_alloc_info(acts
, &sfid
, info
, false,
1136 if (IS_ERR(reply
)) {
1137 error
= PTR_ERR(reply
);
1138 goto err_kfree_acts
;
1143 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1144 if (unlikely(!dp
)) {
1146 goto err_unlock_ovs
;
1148 /* Check that the flow exists. */
1150 flow
= ovs_flow_tbl_lookup_ufid(&dp
->table
, &sfid
);
1152 flow
= ovs_flow_tbl_lookup_exact(&dp
->table
, &match
);
1153 if (unlikely(!flow
)) {
1155 goto err_unlock_ovs
;
1158 /* Update actions, if present. */
1160 old_acts
= ovsl_dereference(flow
->sf_acts
);
1161 rcu_assign_pointer(flow
->sf_acts
, acts
);
1163 if (unlikely(reply
)) {
1164 error
= ovs_flow_cmd_fill_info(flow
,
1165 ovs_header
->dp_ifindex
,
1166 reply
, info
->snd_portid
,
1173 /* Could not alloc without acts before locking. */
1174 reply
= ovs_flow_cmd_build_info(flow
, ovs_header
->dp_ifindex
,
1175 info
, OVS_FLOW_CMD_NEW
, false,
1178 if (IS_ERR(reply
)) {
1179 error
= PTR_ERR(reply
);
1180 goto err_unlock_ovs
;
1185 if (a
[OVS_FLOW_ATTR_CLEAR
])
1186 ovs_flow_stats_clear(flow
);
1190 ovs_notify(&dp_flow_genl_family
, reply
, info
);
1192 ovs_nla_free_flow_actions_rcu(old_acts
);
1200 ovs_nla_free_flow_actions(acts
);
1205 static int ovs_flow_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1207 struct nlattr
**a
= info
->attrs
;
1208 struct ovs_header
*ovs_header
= info
->userhdr
;
1209 struct net
*net
= sock_net(skb
->sk
);
1210 struct sw_flow_key key
;
1211 struct sk_buff
*reply
;
1212 struct sw_flow
*flow
;
1213 struct datapath
*dp
;
1214 struct sw_flow_match match
;
1215 struct sw_flow_id ufid
;
1216 u32 ufid_flags
= ovs_nla_get_ufid_flags(a
[OVS_FLOW_ATTR_UFID_FLAGS
]);
1218 bool log
= !a
[OVS_FLOW_ATTR_PROBE
];
1221 ufid_present
= ovs_nla_get_ufid(&ufid
, a
[OVS_FLOW_ATTR_UFID
], log
);
1222 if (a
[OVS_FLOW_ATTR_KEY
]) {
1223 ovs_match_init(&match
, &key
, NULL
);
1224 err
= ovs_nla_get_match(net
, &match
, a
[OVS_FLOW_ATTR_KEY
], NULL
,
1226 } else if (!ufid_present
) {
1228 "Flow get message rejected, Key attribute missing.");
1235 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1242 flow
= ovs_flow_tbl_lookup_ufid(&dp
->table
, &ufid
);
1244 flow
= ovs_flow_tbl_lookup_exact(&dp
->table
, &match
);
1250 reply
= ovs_flow_cmd_build_info(flow
, ovs_header
->dp_ifindex
, info
,
1251 OVS_FLOW_CMD_NEW
, true, ufid_flags
);
1252 if (IS_ERR(reply
)) {
1253 err
= PTR_ERR(reply
);
1258 return genlmsg_reply(reply
, info
);
1264 static int ovs_flow_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1266 struct nlattr
**a
= info
->attrs
;
1267 struct ovs_header
*ovs_header
= info
->userhdr
;
1268 struct net
*net
= sock_net(skb
->sk
);
1269 struct sw_flow_key key
;
1270 struct sk_buff
*reply
;
1271 struct sw_flow
*flow
= NULL
;
1272 struct datapath
*dp
;
1273 struct sw_flow_match match
;
1274 struct sw_flow_id ufid
;
1275 u32 ufid_flags
= ovs_nla_get_ufid_flags(a
[OVS_FLOW_ATTR_UFID_FLAGS
]);
1277 bool log
= !a
[OVS_FLOW_ATTR_PROBE
];
1280 ufid_present
= ovs_nla_get_ufid(&ufid
, a
[OVS_FLOW_ATTR_UFID
], log
);
1281 if (a
[OVS_FLOW_ATTR_KEY
]) {
1282 ovs_match_init(&match
, &key
, NULL
);
1283 err
= ovs_nla_get_match(net
, &match
, a
[OVS_FLOW_ATTR_KEY
],
1290 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1291 if (unlikely(!dp
)) {
1296 if (unlikely(!a
[OVS_FLOW_ATTR_KEY
] && !ufid_present
)) {
1297 err
= ovs_flow_tbl_flush(&dp
->table
);
1302 flow
= ovs_flow_tbl_lookup_ufid(&dp
->table
, &ufid
);
1304 flow
= ovs_flow_tbl_lookup_exact(&dp
->table
, &match
);
1305 if (unlikely(!flow
)) {
1310 ovs_flow_tbl_remove(&dp
->table
, flow
);
1313 reply
= ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force
*) flow
->sf_acts
,
1314 &flow
->id
, info
, false, ufid_flags
);
1315 if (likely(reply
)) {
1316 if (likely(!IS_ERR(reply
))) {
1317 rcu_read_lock(); /*To keep RCU checker happy. */
1318 err
= ovs_flow_cmd_fill_info(flow
, ovs_header
->dp_ifindex
,
1319 reply
, info
->snd_portid
,
1326 ovs_notify(&dp_flow_genl_family
, reply
, info
);
1328 netlink_set_err(sock_net(skb
->sk
)->genl_sock
, 0, 0, PTR_ERR(reply
));
1332 ovs_flow_free(flow
, true);
1339 static int ovs_flow_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1341 struct nlattr
*a
[__OVS_FLOW_ATTR_MAX
];
1342 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
1343 struct table_instance
*ti
;
1344 struct datapath
*dp
;
1348 err
= genlmsg_parse(cb
->nlh
, &dp_flow_genl_family
, a
,
1349 OVS_FLOW_ATTR_MAX
, flow_policy
);
1352 ufid_flags
= ovs_nla_get_ufid_flags(a
[OVS_FLOW_ATTR_UFID_FLAGS
]);
1355 dp
= get_dp_rcu(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1361 ti
= rcu_dereference(dp
->table
.ti
);
1363 struct sw_flow
*flow
;
1366 bucket
= cb
->args
[0];
1368 flow
= ovs_flow_tbl_dump_next(ti
, &bucket
, &obj
);
1372 if (ovs_flow_cmd_fill_info(flow
, ovs_header
->dp_ifindex
, skb
,
1373 NETLINK_CB(cb
->skb
).portid
,
1374 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1375 OVS_FLOW_CMD_NEW
, ufid_flags
) < 0)
1378 cb
->args
[0] = bucket
;
1385 static const struct nla_policy flow_policy
[OVS_FLOW_ATTR_MAX
+ 1] = {
1386 [OVS_FLOW_ATTR_KEY
] = { .type
= NLA_NESTED
},
1387 [OVS_FLOW_ATTR_MASK
] = { .type
= NLA_NESTED
},
1388 [OVS_FLOW_ATTR_ACTIONS
] = { .type
= NLA_NESTED
},
1389 [OVS_FLOW_ATTR_CLEAR
] = { .type
= NLA_FLAG
},
1390 [OVS_FLOW_ATTR_PROBE
] = { .type
= NLA_FLAG
},
1391 [OVS_FLOW_ATTR_UFID
] = { .type
= NLA_UNSPEC
, .len
= 1 },
1392 [OVS_FLOW_ATTR_UFID_FLAGS
] = { .type
= NLA_U32
},
1395 static const struct genl_ops dp_flow_genl_ops
[] = {
1396 { .cmd
= OVS_FLOW_CMD_NEW
,
1397 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1398 .policy
= flow_policy
,
1399 .doit
= ovs_flow_cmd_new
1401 { .cmd
= OVS_FLOW_CMD_DEL
,
1402 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1403 .policy
= flow_policy
,
1404 .doit
= ovs_flow_cmd_del
1406 { .cmd
= OVS_FLOW_CMD_GET
,
1407 .flags
= 0, /* OK for unprivileged users. */
1408 .policy
= flow_policy
,
1409 .doit
= ovs_flow_cmd_get
,
1410 .dumpit
= ovs_flow_cmd_dump
1412 { .cmd
= OVS_FLOW_CMD_SET
,
1413 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1414 .policy
= flow_policy
,
1415 .doit
= ovs_flow_cmd_set
,
1419 static struct genl_family dp_flow_genl_family
= {
1420 .id
= GENL_ID_GENERATE
,
1421 .hdrsize
= sizeof(struct ovs_header
),
1422 .name
= OVS_FLOW_FAMILY
,
1423 .version
= OVS_FLOW_VERSION
,
1424 .maxattr
= OVS_FLOW_ATTR_MAX
,
1426 .parallel_ops
= true,
1427 .ops
= dp_flow_genl_ops
,
1428 .n_ops
= ARRAY_SIZE(dp_flow_genl_ops
),
1429 .mcgrps
= &ovs_dp_flow_multicast_group
,
1433 static size_t ovs_dp_cmd_msg_size(void)
1435 size_t msgsize
= NLMSG_ALIGN(sizeof(struct ovs_header
));
1437 msgsize
+= nla_total_size(IFNAMSIZ
);
1438 msgsize
+= nla_total_size(sizeof(struct ovs_dp_stats
));
1439 msgsize
+= nla_total_size(sizeof(struct ovs_dp_megaflow_stats
));
1440 msgsize
+= nla_total_size(sizeof(u32
)); /* OVS_DP_ATTR_USER_FEATURES */
1445 /* Called with ovs_mutex. */
1446 static int ovs_dp_cmd_fill_info(struct datapath
*dp
, struct sk_buff
*skb
,
1447 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1449 struct ovs_header
*ovs_header
;
1450 struct ovs_dp_stats dp_stats
;
1451 struct ovs_dp_megaflow_stats dp_megaflow_stats
;
1454 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_datapath_genl_family
,
1459 ovs_header
->dp_ifindex
= get_dpifindex(dp
);
1461 err
= nla_put_string(skb
, OVS_DP_ATTR_NAME
, ovs_dp_name(dp
));
1463 goto nla_put_failure
;
1465 get_dp_stats(dp
, &dp_stats
, &dp_megaflow_stats
);
1466 if (nla_put(skb
, OVS_DP_ATTR_STATS
, sizeof(struct ovs_dp_stats
),
1468 goto nla_put_failure
;
1470 if (nla_put(skb
, OVS_DP_ATTR_MEGAFLOW_STATS
,
1471 sizeof(struct ovs_dp_megaflow_stats
),
1472 &dp_megaflow_stats
))
1473 goto nla_put_failure
;
1475 if (nla_put_u32(skb
, OVS_DP_ATTR_USER_FEATURES
, dp
->user_features
))
1476 goto nla_put_failure
;
1478 genlmsg_end(skb
, ovs_header
);
1482 genlmsg_cancel(skb
, ovs_header
);
1487 static struct sk_buff
*ovs_dp_cmd_alloc_info(struct genl_info
*info
)
1489 return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info
, GFP_KERNEL
);
1492 /* Called with rcu_read_lock or ovs_mutex. */
1493 static struct datapath
*lookup_datapath(struct net
*net
,
1494 const struct ovs_header
*ovs_header
,
1495 struct nlattr
*a
[OVS_DP_ATTR_MAX
+ 1])
1497 struct datapath
*dp
;
1499 if (!a
[OVS_DP_ATTR_NAME
])
1500 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1502 struct vport
*vport
;
1504 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_DP_ATTR_NAME
]));
1505 dp
= vport
&& vport
->port_no
== OVSP_LOCAL
? vport
->dp
: NULL
;
1507 return dp
? dp
: ERR_PTR(-ENODEV
);
1510 static void ovs_dp_reset_user_features(struct sk_buff
*skb
, struct genl_info
*info
)
1512 struct datapath
*dp
;
1514 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1518 WARN(dp
->user_features
, "Dropping previously announced user features\n");
1519 dp
->user_features
= 0;
1522 static void ovs_dp_change(struct datapath
*dp
, struct nlattr
*a
[])
1524 if (a
[OVS_DP_ATTR_USER_FEATURES
])
1525 dp
->user_features
= nla_get_u32(a
[OVS_DP_ATTR_USER_FEATURES
]);
1528 static int ovs_dp_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1530 struct nlattr
**a
= info
->attrs
;
1531 struct vport_parms parms
;
1532 struct sk_buff
*reply
;
1533 struct datapath
*dp
;
1534 struct vport
*vport
;
1535 struct ovs_net
*ovs_net
;
1539 if (!a
[OVS_DP_ATTR_NAME
] || !a
[OVS_DP_ATTR_UPCALL_PID
])
1542 reply
= ovs_dp_cmd_alloc_info(info
);
1547 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1549 goto err_free_reply
;
1551 ovs_dp_set_net(dp
, sock_net(skb
->sk
));
1553 /* Allocate table. */
1554 err
= ovs_flow_tbl_init(&dp
->table
);
1558 dp
->stats_percpu
= netdev_alloc_pcpu_stats(struct dp_stats_percpu
);
1559 if (!dp
->stats_percpu
) {
1561 goto err_destroy_table
;
1564 dp
->ports
= kmalloc(DP_VPORT_HASH_BUCKETS
* sizeof(struct hlist_head
),
1568 goto err_destroy_percpu
;
1571 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++)
1572 INIT_HLIST_HEAD(&dp
->ports
[i
]);
1574 /* Set up our datapath device. */
1575 parms
.name
= nla_data(a
[OVS_DP_ATTR_NAME
]);
1576 parms
.type
= OVS_VPORT_TYPE_INTERNAL
;
1577 parms
.options
= NULL
;
1579 parms
.port_no
= OVSP_LOCAL
;
1580 parms
.upcall_portids
= a
[OVS_DP_ATTR_UPCALL_PID
];
1582 ovs_dp_change(dp
, a
);
1584 /* So far only local changes have been made, now need the lock. */
1587 vport
= new_vport(&parms
);
1588 if (IS_ERR(vport
)) {
1589 err
= PTR_ERR(vport
);
1593 if (err
== -EEXIST
) {
1594 /* An outdated user space instance that does not understand
1595 * the concept of user_features has attempted to create a new
1596 * datapath and is likely to reuse it. Drop all user features.
1598 if (info
->genlhdr
->version
< OVS_DP_VER_FEATURES
)
1599 ovs_dp_reset_user_features(skb
, info
);
1602 goto err_destroy_ports_array
;
1605 err
= ovs_dp_cmd_fill_info(dp
, reply
, info
->snd_portid
,
1606 info
->snd_seq
, 0, OVS_DP_CMD_NEW
);
1609 ovs_net
= net_generic(ovs_dp_get_net(dp
), ovs_net_id
);
1610 list_add_tail_rcu(&dp
->list_node
, &ovs_net
->dps
);
1614 ovs_notify(&dp_datapath_genl_family
, reply
, info
);
1617 err_destroy_ports_array
:
1621 free_percpu(dp
->stats_percpu
);
1623 ovs_flow_tbl_destroy(&dp
->table
);
1632 /* Called with ovs_mutex. */
1633 static void __dp_destroy(struct datapath
*dp
)
1637 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
1638 struct vport
*vport
;
1639 struct hlist_node
*n
;
1641 hlist_for_each_entry_safe(vport
, n
, &dp
->ports
[i
], dp_hash_node
)
1642 if (vport
->port_no
!= OVSP_LOCAL
)
1643 ovs_dp_detach_port(vport
);
1646 list_del_rcu(&dp
->list_node
);
1648 /* OVSP_LOCAL is datapath internal port. We need to make sure that
1649 * all ports in datapath are destroyed first before freeing datapath.
1651 ovs_dp_detach_port(ovs_vport_ovsl(dp
, OVSP_LOCAL
));
1653 /* RCU destroy the flow table */
1654 call_rcu(&dp
->rcu
, destroy_dp_rcu
);
1657 static int ovs_dp_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1659 struct sk_buff
*reply
;
1660 struct datapath
*dp
;
1663 reply
= ovs_dp_cmd_alloc_info(info
);
1668 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1671 goto err_unlock_free
;
1673 err
= ovs_dp_cmd_fill_info(dp
, reply
, info
->snd_portid
,
1674 info
->snd_seq
, 0, OVS_DP_CMD_DEL
);
1680 ovs_notify(&dp_datapath_genl_family
, reply
, info
);
1690 static int ovs_dp_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1692 struct sk_buff
*reply
;
1693 struct datapath
*dp
;
1696 reply
= ovs_dp_cmd_alloc_info(info
);
1701 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1704 goto err_unlock_free
;
1706 ovs_dp_change(dp
, info
->attrs
);
1708 err
= ovs_dp_cmd_fill_info(dp
, reply
, info
->snd_portid
,
1709 info
->snd_seq
, 0, OVS_DP_CMD_NEW
);
1713 ovs_notify(&dp_datapath_genl_family
, reply
, info
);
1723 static int ovs_dp_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1725 struct sk_buff
*reply
;
1726 struct datapath
*dp
;
1729 reply
= ovs_dp_cmd_alloc_info(info
);
1734 dp
= lookup_datapath(sock_net(skb
->sk
), info
->userhdr
, info
->attrs
);
1737 goto err_unlock_free
;
1739 err
= ovs_dp_cmd_fill_info(dp
, reply
, info
->snd_portid
,
1740 info
->snd_seq
, 0, OVS_DP_CMD_NEW
);
1744 return genlmsg_reply(reply
, info
);
1752 static int ovs_dp_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1754 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1755 struct datapath
*dp
;
1756 int skip
= cb
->args
[0];
1760 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
1762 ovs_dp_cmd_fill_info(dp
, skb
, NETLINK_CB(cb
->skb
).portid
,
1763 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1764 OVS_DP_CMD_NEW
) < 0)
1775 static const struct nla_policy datapath_policy
[OVS_DP_ATTR_MAX
+ 1] = {
1776 [OVS_DP_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
1777 [OVS_DP_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
1778 [OVS_DP_ATTR_USER_FEATURES
] = { .type
= NLA_U32
},
1781 static const struct genl_ops dp_datapath_genl_ops
[] = {
1782 { .cmd
= OVS_DP_CMD_NEW
,
1783 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1784 .policy
= datapath_policy
,
1785 .doit
= ovs_dp_cmd_new
1787 { .cmd
= OVS_DP_CMD_DEL
,
1788 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1789 .policy
= datapath_policy
,
1790 .doit
= ovs_dp_cmd_del
1792 { .cmd
= OVS_DP_CMD_GET
,
1793 .flags
= 0, /* OK for unprivileged users. */
1794 .policy
= datapath_policy
,
1795 .doit
= ovs_dp_cmd_get
,
1796 .dumpit
= ovs_dp_cmd_dump
1798 { .cmd
= OVS_DP_CMD_SET
,
1799 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
1800 .policy
= datapath_policy
,
1801 .doit
= ovs_dp_cmd_set
,
1805 static struct genl_family dp_datapath_genl_family
= {
1806 .id
= GENL_ID_GENERATE
,
1807 .hdrsize
= sizeof(struct ovs_header
),
1808 .name
= OVS_DATAPATH_FAMILY
,
1809 .version
= OVS_DATAPATH_VERSION
,
1810 .maxattr
= OVS_DP_ATTR_MAX
,
1812 .parallel_ops
= true,
1813 .ops
= dp_datapath_genl_ops
,
1814 .n_ops
= ARRAY_SIZE(dp_datapath_genl_ops
),
1815 .mcgrps
= &ovs_dp_datapath_multicast_group
,
1819 /* Called with ovs_mutex or RCU read lock. */
1820 static int ovs_vport_cmd_fill_info(struct vport
*vport
, struct sk_buff
*skb
,
1821 u32 portid
, u32 seq
, u32 flags
, u8 cmd
)
1823 struct ovs_header
*ovs_header
;
1824 struct ovs_vport_stats vport_stats
;
1827 ovs_header
= genlmsg_put(skb
, portid
, seq
, &dp_vport_genl_family
,
1832 ovs_header
->dp_ifindex
= get_dpifindex(vport
->dp
);
1834 if (nla_put_u32(skb
, OVS_VPORT_ATTR_PORT_NO
, vport
->port_no
) ||
1835 nla_put_u32(skb
, OVS_VPORT_ATTR_TYPE
, vport
->ops
->type
) ||
1836 nla_put_string(skb
, OVS_VPORT_ATTR_NAME
,
1837 ovs_vport_name(vport
)))
1838 goto nla_put_failure
;
1840 ovs_vport_get_stats(vport
, &vport_stats
);
1841 if (nla_put(skb
, OVS_VPORT_ATTR_STATS
, sizeof(struct ovs_vport_stats
),
1843 goto nla_put_failure
;
1845 if (ovs_vport_get_upcall_portids(vport
, skb
))
1846 goto nla_put_failure
;
1848 err
= ovs_vport_get_options(vport
, skb
);
1849 if (err
== -EMSGSIZE
)
1852 genlmsg_end(skb
, ovs_header
);
1858 genlmsg_cancel(skb
, ovs_header
);
1862 static struct sk_buff
*ovs_vport_cmd_alloc_info(void)
1864 return nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1867 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1868 struct sk_buff
*ovs_vport_cmd_build_info(struct vport
*vport
, u32 portid
,
1871 struct sk_buff
*skb
;
1874 skb
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_ATOMIC
);
1876 return ERR_PTR(-ENOMEM
);
1878 retval
= ovs_vport_cmd_fill_info(vport
, skb
, portid
, seq
, 0, cmd
);
1884 /* Called with ovs_mutex or RCU read lock. */
1885 static struct vport
*lookup_vport(struct net
*net
,
1886 const struct ovs_header
*ovs_header
,
1887 struct nlattr
*a
[OVS_VPORT_ATTR_MAX
+ 1])
1889 struct datapath
*dp
;
1890 struct vport
*vport
;
1892 if (a
[OVS_VPORT_ATTR_NAME
]) {
1893 vport
= ovs_vport_locate(net
, nla_data(a
[OVS_VPORT_ATTR_NAME
]));
1895 return ERR_PTR(-ENODEV
);
1896 if (ovs_header
->dp_ifindex
&&
1897 ovs_header
->dp_ifindex
!= get_dpifindex(vport
->dp
))
1898 return ERR_PTR(-ENODEV
);
1900 } else if (a
[OVS_VPORT_ATTR_PORT_NO
]) {
1901 u32 port_no
= nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]);
1903 if (port_no
>= DP_MAX_PORTS
)
1904 return ERR_PTR(-EFBIG
);
1906 dp
= get_dp(net
, ovs_header
->dp_ifindex
);
1908 return ERR_PTR(-ENODEV
);
1910 vport
= ovs_vport_ovsl_rcu(dp
, port_no
);
1912 return ERR_PTR(-ENODEV
);
1915 return ERR_PTR(-EINVAL
);
1918 static int ovs_vport_cmd_new(struct sk_buff
*skb
, struct genl_info
*info
)
1920 struct nlattr
**a
= info
->attrs
;
1921 struct ovs_header
*ovs_header
= info
->userhdr
;
1922 struct vport_parms parms
;
1923 struct sk_buff
*reply
;
1924 struct vport
*vport
;
1925 struct datapath
*dp
;
1929 if (!a
[OVS_VPORT_ATTR_NAME
] || !a
[OVS_VPORT_ATTR_TYPE
] ||
1930 !a
[OVS_VPORT_ATTR_UPCALL_PID
])
1933 port_no
= a
[OVS_VPORT_ATTR_PORT_NO
]
1934 ? nla_get_u32(a
[OVS_VPORT_ATTR_PORT_NO
]) : 0;
1935 if (port_no
>= DP_MAX_PORTS
)
1938 reply
= ovs_vport_cmd_alloc_info();
1944 dp
= get_dp(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
1947 goto exit_unlock_free
;
1950 vport
= ovs_vport_ovsl(dp
, port_no
);
1953 goto exit_unlock_free
;
1955 for (port_no
= 1; ; port_no
++) {
1956 if (port_no
>= DP_MAX_PORTS
) {
1958 goto exit_unlock_free
;
1960 vport
= ovs_vport_ovsl(dp
, port_no
);
1966 parms
.name
= nla_data(a
[OVS_VPORT_ATTR_NAME
]);
1967 parms
.type
= nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]);
1968 parms
.options
= a
[OVS_VPORT_ATTR_OPTIONS
];
1970 parms
.port_no
= port_no
;
1971 parms
.upcall_portids
= a
[OVS_VPORT_ATTR_UPCALL_PID
];
1973 vport
= new_vport(&parms
);
1974 err
= PTR_ERR(vport
);
1975 if (IS_ERR(vport
)) {
1978 goto exit_unlock_free
;
1981 err
= ovs_vport_cmd_fill_info(vport
, reply
, info
->snd_portid
,
1982 info
->snd_seq
, 0, OVS_VPORT_CMD_NEW
);
1986 ovs_notify(&dp_vport_genl_family
, reply
, info
);
1995 static int ovs_vport_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1997 struct nlattr
**a
= info
->attrs
;
1998 struct sk_buff
*reply
;
1999 struct vport
*vport
;
2002 reply
= ovs_vport_cmd_alloc_info();
2007 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
2008 err
= PTR_ERR(vport
);
2010 goto exit_unlock_free
;
2012 if (a
[OVS_VPORT_ATTR_TYPE
] &&
2013 nla_get_u32(a
[OVS_VPORT_ATTR_TYPE
]) != vport
->ops
->type
) {
2015 goto exit_unlock_free
;
2018 if (a
[OVS_VPORT_ATTR_OPTIONS
]) {
2019 err
= ovs_vport_set_options(vport
, a
[OVS_VPORT_ATTR_OPTIONS
]);
2021 goto exit_unlock_free
;
2025 if (a
[OVS_VPORT_ATTR_UPCALL_PID
]) {
2026 struct nlattr
*ids
= a
[OVS_VPORT_ATTR_UPCALL_PID
];
2028 err
= ovs_vport_set_upcall_portids(vport
, ids
);
2030 goto exit_unlock_free
;
2033 err
= ovs_vport_cmd_fill_info(vport
, reply
, info
->snd_portid
,
2034 info
->snd_seq
, 0, OVS_VPORT_CMD_NEW
);
2038 ovs_notify(&dp_vport_genl_family
, reply
, info
);
2047 static int ovs_vport_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
2049 struct nlattr
**a
= info
->attrs
;
2050 struct sk_buff
*reply
;
2051 struct vport
*vport
;
2054 reply
= ovs_vport_cmd_alloc_info();
2059 vport
= lookup_vport(sock_net(skb
->sk
), info
->userhdr
, a
);
2060 err
= PTR_ERR(vport
);
2062 goto exit_unlock_free
;
2064 if (vport
->port_no
== OVSP_LOCAL
) {
2066 goto exit_unlock_free
;
2069 err
= ovs_vport_cmd_fill_info(vport
, reply
, info
->snd_portid
,
2070 info
->snd_seq
, 0, OVS_VPORT_CMD_DEL
);
2072 ovs_dp_detach_port(vport
);
2075 ovs_notify(&dp_vport_genl_family
, reply
, info
);
2084 static int ovs_vport_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
2086 struct nlattr
**a
= info
->attrs
;
2087 struct ovs_header
*ovs_header
= info
->userhdr
;
2088 struct sk_buff
*reply
;
2089 struct vport
*vport
;
2092 reply
= ovs_vport_cmd_alloc_info();
2097 vport
= lookup_vport(sock_net(skb
->sk
), ovs_header
, a
);
2098 err
= PTR_ERR(vport
);
2100 goto exit_unlock_free
;
2101 err
= ovs_vport_cmd_fill_info(vport
, reply
, info
->snd_portid
,
2102 info
->snd_seq
, 0, OVS_VPORT_CMD_NEW
);
2106 return genlmsg_reply(reply
, info
);
2114 static int ovs_vport_cmd_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2116 struct ovs_header
*ovs_header
= genlmsg_data(nlmsg_data(cb
->nlh
));
2117 struct datapath
*dp
;
2118 int bucket
= cb
->args
[0], skip
= cb
->args
[1];
2122 dp
= get_dp_rcu(sock_net(skb
->sk
), ovs_header
->dp_ifindex
);
2127 for (i
= bucket
; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
2128 struct vport
*vport
;
2131 hlist_for_each_entry_rcu(vport
, &dp
->ports
[i
], dp_hash_node
) {
2133 ovs_vport_cmd_fill_info(vport
, skb
,
2134 NETLINK_CB(cb
->skb
).portid
,
2137 OVS_VPORT_CMD_NEW
) < 0)
2153 static const struct nla_policy vport_policy
[OVS_VPORT_ATTR_MAX
+ 1] = {
2154 [OVS_VPORT_ATTR_NAME
] = { .type
= NLA_NUL_STRING
, .len
= IFNAMSIZ
- 1 },
2155 [OVS_VPORT_ATTR_STATS
] = { .len
= sizeof(struct ovs_vport_stats
) },
2156 [OVS_VPORT_ATTR_PORT_NO
] = { .type
= NLA_U32
},
2157 [OVS_VPORT_ATTR_TYPE
] = { .type
= NLA_U32
},
2158 [OVS_VPORT_ATTR_UPCALL_PID
] = { .type
= NLA_U32
},
2159 [OVS_VPORT_ATTR_OPTIONS
] = { .type
= NLA_NESTED
},
2162 static const struct genl_ops dp_vport_genl_ops
[] = {
2163 { .cmd
= OVS_VPORT_CMD_NEW
,
2164 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
2165 .policy
= vport_policy
,
2166 .doit
= ovs_vport_cmd_new
2168 { .cmd
= OVS_VPORT_CMD_DEL
,
2169 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
2170 .policy
= vport_policy
,
2171 .doit
= ovs_vport_cmd_del
2173 { .cmd
= OVS_VPORT_CMD_GET
,
2174 .flags
= 0, /* OK for unprivileged users. */
2175 .policy
= vport_policy
,
2176 .doit
= ovs_vport_cmd_get
,
2177 .dumpit
= ovs_vport_cmd_dump
2179 { .cmd
= OVS_VPORT_CMD_SET
,
2180 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN privilege. */
2181 .policy
= vport_policy
,
2182 .doit
= ovs_vport_cmd_set
,
2186 struct genl_family dp_vport_genl_family
= {
2187 .id
= GENL_ID_GENERATE
,
2188 .hdrsize
= sizeof(struct ovs_header
),
2189 .name
= OVS_VPORT_FAMILY
,
2190 .version
= OVS_VPORT_VERSION
,
2191 .maxattr
= OVS_VPORT_ATTR_MAX
,
2193 .parallel_ops
= true,
2194 .ops
= dp_vport_genl_ops
,
2195 .n_ops
= ARRAY_SIZE(dp_vport_genl_ops
),
2196 .mcgrps
= &ovs_dp_vport_multicast_group
,
2200 static struct genl_family
* const dp_genl_families
[] = {
2201 &dp_datapath_genl_family
,
2202 &dp_vport_genl_family
,
2203 &dp_flow_genl_family
,
2204 &dp_packet_genl_family
,
2207 static void dp_unregister_genl(int n_families
)
2211 for (i
= 0; i
< n_families
; i
++)
2212 genl_unregister_family(dp_genl_families
[i
]);
2215 static int dp_register_genl(void)
2220 for (i
= 0; i
< ARRAY_SIZE(dp_genl_families
); i
++) {
2222 err
= genl_register_family(dp_genl_families
[i
]);
2230 dp_unregister_genl(i
);
2234 static int __net_init
ovs_init_net(struct net
*net
)
2236 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2238 INIT_LIST_HEAD(&ovs_net
->dps
);
2239 INIT_WORK(&ovs_net
->dp_notify_work
, ovs_dp_notify_wq
);
2244 static void __net_exit
list_vports_from_net(struct net
*net
, struct net
*dnet
,
2245 struct list_head
*head
)
2247 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2248 struct datapath
*dp
;
2250 list_for_each_entry(dp
, &ovs_net
->dps
, list_node
) {
2253 for (i
= 0; i
< DP_VPORT_HASH_BUCKETS
; i
++) {
2254 struct vport
*vport
;
2256 hlist_for_each_entry(vport
, &dp
->ports
[i
], dp_hash_node
) {
2257 if (vport
->ops
->type
!= OVS_VPORT_TYPE_INTERNAL
)
2260 if (dev_net(vport
->dev
) == dnet
)
2261 list_add(&vport
->detach_list
, head
);
2267 static void __net_exit
ovs_exit_net(struct net
*dnet
)
2269 struct datapath
*dp
, *dp_next
;
2270 struct ovs_net
*ovs_net
= net_generic(dnet
, ovs_net_id
);
2271 struct vport
*vport
, *vport_next
;
2277 list_for_each_entry_safe(dp
, dp_next
, &ovs_net
->dps
, list_node
)
2282 list_vports_from_net(net
, dnet
, &head
);
2285 /* Detach all vports from given namespace. */
2286 list_for_each_entry_safe(vport
, vport_next
, &head
, detach_list
) {
2287 list_del(&vport
->detach_list
);
2288 ovs_dp_detach_port(vport
);
2293 cancel_work_sync(&ovs_net
->dp_notify_work
);
2296 static struct pernet_operations ovs_net_ops
= {
2297 .init
= ovs_init_net
,
2298 .exit
= ovs_exit_net
,
2300 .size
= sizeof(struct ovs_net
),
2303 static int __init
dp_init(void)
2307 BUILD_BUG_ON(sizeof(struct ovs_skb_cb
) > FIELD_SIZEOF(struct sk_buff
, cb
));
2309 pr_info("Open vSwitch switching datapath\n");
2311 err
= action_fifos_init();
2315 err
= ovs_internal_dev_rtnl_link_register();
2317 goto error_action_fifos_exit
;
2319 err
= ovs_flow_init();
2321 goto error_unreg_rtnl_link
;
2323 err
= ovs_vport_init();
2325 goto error_flow_exit
;
2327 err
= register_pernet_device(&ovs_net_ops
);
2329 goto error_vport_exit
;
2331 err
= register_netdevice_notifier(&ovs_dp_device_notifier
);
2333 goto error_netns_exit
;
2335 err
= ovs_netdev_init();
2337 goto error_unreg_notifier
;
2339 err
= dp_register_genl();
2341 goto error_unreg_netdev
;
2347 error_unreg_notifier
:
2348 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2350 unregister_pernet_device(&ovs_net_ops
);
2355 error_unreg_rtnl_link
:
2356 ovs_internal_dev_rtnl_link_unregister();
2357 error_action_fifos_exit
:
2358 action_fifos_exit();
2363 static void dp_cleanup(void)
2365 dp_unregister_genl(ARRAY_SIZE(dp_genl_families
));
2367 unregister_netdevice_notifier(&ovs_dp_device_notifier
);
2368 unregister_pernet_device(&ovs_net_ops
);
2372 ovs_internal_dev_rtnl_link_unregister();
2373 action_fifos_exit();
2376 module_init(dp_init
);
2377 module_exit(dp_cleanup
);
2379 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2380 MODULE_LICENSE("GPL");