1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 Nicira, Inc.
6 #include <linux/module.h>
7 #include <linux/openvswitch.h>
10 #include <linux/sctp.h>
11 #include <linux/static_key.h>
13 #include <net/genetlink.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_count.h>
16 #include <net/netfilter/nf_conntrack_helper.h>
17 #include <net/netfilter/nf_conntrack_labels.h>
18 #include <net/netfilter/nf_conntrack_seqadj.h>
19 #include <net/netfilter/nf_conntrack_timeout.h>
20 #include <net/netfilter/nf_conntrack_zones.h>
21 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
22 #include <net/ipv6_frag.h>
24 #if IS_ENABLED(CONFIG_NF_NAT)
25 #include <net/netfilter/nf_nat.h>
29 #include "conntrack.h"
31 #include "flow_netlink.h"
33 struct ovs_ct_len_tbl
{
38 /* Metadata mark for masked write to conntrack mark */
44 /* Metadata label for masked write to conntrack label. */
46 struct ovs_key_ct_labels value
;
47 struct ovs_key_ct_labels mask
;
51 OVS_CT_NAT
= 1 << 0, /* NAT for committed connections only. */
52 OVS_CT_SRC_NAT
= 1 << 1, /* Source NAT for NEW connections. */
53 OVS_CT_DST_NAT
= 1 << 2, /* Destination NAT for NEW connections. */
56 /* Conntrack action context for execution. */
57 struct ovs_conntrack_info
{
58 struct nf_conntrack_helper
*helper
;
59 struct nf_conntrack_zone zone
;
62 u8 nat
: 3; /* enum ovs_ct_nat */
64 u8 have_eventmask
: 1;
66 u32 eventmask
; /* Mask of 1 << IPCT_*. */
68 struct md_labels labels
;
69 char timeout
[CTNL_TIMEOUT_NAME_MAX
];
70 struct nf_ct_timeout
*nf_ct_timeout
;
71 #if IS_ENABLED(CONFIG_NF_NAT)
72 struct nf_nat_range2 range
; /* Only present for SRC NAT and DST NAT. */
76 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
77 #define OVS_CT_LIMIT_UNLIMITED 0
78 #define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
79 #define CT_LIMIT_HASH_BUCKETS 512
80 static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled
);
83 /* Elements in ovs_ct_limit_info->limits hash table */
84 struct hlist_node hlist_node
;
90 struct ovs_ct_limit_info
{
92 struct hlist_head
*limits
;
93 struct nf_conncount_data
*data
;
96 static const struct nla_policy ct_limit_policy
[OVS_CT_LIMIT_ATTR_MAX
+ 1] = {
97 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT
] = { .type
= NLA_NESTED
, },
101 static bool labels_nonzero(const struct ovs_key_ct_labels
*labels
);
103 static void __ovs_ct_free_action(struct ovs_conntrack_info
*ct_info
);
105 static u16
key_to_nfproto(const struct sw_flow_key
*key
)
107 switch (ntohs(key
->eth
.type
)) {
113 return NFPROTO_UNSPEC
;
117 /* Map SKB connection state into the values used by flow definition. */
118 static u8
ovs_ct_get_state(enum ip_conntrack_info ctinfo
)
120 u8 ct_state
= OVS_CS_F_TRACKED
;
123 case IP_CT_ESTABLISHED_REPLY
:
124 case IP_CT_RELATED_REPLY
:
125 ct_state
|= OVS_CS_F_REPLY_DIR
;
132 case IP_CT_ESTABLISHED
:
133 case IP_CT_ESTABLISHED_REPLY
:
134 ct_state
|= OVS_CS_F_ESTABLISHED
;
137 case IP_CT_RELATED_REPLY
:
138 ct_state
|= OVS_CS_F_RELATED
;
141 ct_state
|= OVS_CS_F_NEW
;
150 static u32
ovs_ct_get_mark(const struct nf_conn
*ct
)
152 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
153 return ct
? ct
->mark
: 0;
159 /* Guard against conntrack labels max size shrinking below 128 bits. */
160 #if NF_CT_LABELS_MAX_SIZE < 16
161 #error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
164 static void ovs_ct_get_labels(const struct nf_conn
*ct
,
165 struct ovs_key_ct_labels
*labels
)
167 struct nf_conn_labels
*cl
= ct
? nf_ct_labels_find(ct
) : NULL
;
170 memcpy(labels
, cl
->bits
, OVS_CT_LABELS_LEN
);
172 memset(labels
, 0, OVS_CT_LABELS_LEN
);
175 static void __ovs_ct_update_key_orig_tp(struct sw_flow_key
*key
,
176 const struct nf_conntrack_tuple
*orig
,
179 key
->ct_orig_proto
= orig
->dst
.protonum
;
180 if (orig
->dst
.protonum
== icmp_proto
) {
181 key
->ct
.orig_tp
.src
= htons(orig
->dst
.u
.icmp
.type
);
182 key
->ct
.orig_tp
.dst
= htons(orig
->dst
.u
.icmp
.code
);
184 key
->ct
.orig_tp
.src
= orig
->src
.u
.all
;
185 key
->ct
.orig_tp
.dst
= orig
->dst
.u
.all
;
189 static void __ovs_ct_update_key(struct sw_flow_key
*key
, u8 state
,
190 const struct nf_conntrack_zone
*zone
,
191 const struct nf_conn
*ct
)
193 key
->ct_state
= state
;
194 key
->ct_zone
= zone
->id
;
195 key
->ct
.mark
= ovs_ct_get_mark(ct
);
196 ovs_ct_get_labels(ct
, &key
->ct
.labels
);
199 const struct nf_conntrack_tuple
*orig
;
201 /* Use the master if we have one. */
204 orig
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
206 /* IP version must match with the master connection. */
207 if (key
->eth
.type
== htons(ETH_P_IP
) &&
208 nf_ct_l3num(ct
) == NFPROTO_IPV4
) {
209 key
->ipv4
.ct_orig
.src
= orig
->src
.u3
.ip
;
210 key
->ipv4
.ct_orig
.dst
= orig
->dst
.u3
.ip
;
211 __ovs_ct_update_key_orig_tp(key
, orig
, IPPROTO_ICMP
);
213 } else if (key
->eth
.type
== htons(ETH_P_IPV6
) &&
214 !sw_flow_key_is_nd(key
) &&
215 nf_ct_l3num(ct
) == NFPROTO_IPV6
) {
216 key
->ipv6
.ct_orig
.src
= orig
->src
.u3
.in6
;
217 key
->ipv6
.ct_orig
.dst
= orig
->dst
.u3
.in6
;
218 __ovs_ct_update_key_orig_tp(key
, orig
, NEXTHDR_ICMP
);
222 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
223 * original direction key fields.
225 key
->ct_orig_proto
= 0;
228 /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
229 * previously sent the packet to conntrack via the ct action. If
230 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
231 * initialized from the connection status.
233 static void ovs_ct_update_key(const struct sk_buff
*skb
,
234 const struct ovs_conntrack_info
*info
,
235 struct sw_flow_key
*key
, bool post_ct
,
238 const struct nf_conntrack_zone
*zone
= &nf_ct_zone_dflt
;
239 enum ip_conntrack_info ctinfo
;
243 ct
= nf_ct_get(skb
, &ctinfo
);
245 state
= ovs_ct_get_state(ctinfo
);
246 /* All unconfirmed entries are NEW connections. */
247 if (!nf_ct_is_confirmed(ct
))
248 state
|= OVS_CS_F_NEW
;
249 /* OVS persists the related flag for the duration of the
253 state
|= OVS_CS_F_RELATED
;
254 if (keep_nat_flags
) {
255 state
|= key
->ct_state
& OVS_CS_F_NAT_MASK
;
257 if (ct
->status
& IPS_SRC_NAT
)
258 state
|= OVS_CS_F_SRC_NAT
;
259 if (ct
->status
& IPS_DST_NAT
)
260 state
|= OVS_CS_F_DST_NAT
;
262 zone
= nf_ct_zone(ct
);
263 } else if (post_ct
) {
264 state
= OVS_CS_F_TRACKED
| OVS_CS_F_INVALID
;
268 __ovs_ct_update_key(key
, state
, zone
, ct
);
271 /* This is called to initialize CT key fields possibly coming in from the local
274 void ovs_ct_fill_key(const struct sk_buff
*skb
, struct sw_flow_key
*key
)
276 ovs_ct_update_key(skb
, NULL
, key
, false, false);
279 int ovs_ct_put_key(const struct sw_flow_key
*swkey
,
280 const struct sw_flow_key
*output
, struct sk_buff
*skb
)
282 if (nla_put_u32(skb
, OVS_KEY_ATTR_CT_STATE
, output
->ct_state
))
285 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
286 nla_put_u16(skb
, OVS_KEY_ATTR_CT_ZONE
, output
->ct_zone
))
289 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) &&
290 nla_put_u32(skb
, OVS_KEY_ATTR_CT_MARK
, output
->ct
.mark
))
293 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
294 nla_put(skb
, OVS_KEY_ATTR_CT_LABELS
, sizeof(output
->ct
.labels
),
298 if (swkey
->ct_orig_proto
) {
299 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
300 struct ovs_key_ct_tuple_ipv4 orig
;
302 memset(&orig
, 0, sizeof(orig
));
303 orig
.ipv4_src
= output
->ipv4
.ct_orig
.src
;
304 orig
.ipv4_dst
= output
->ipv4
.ct_orig
.dst
;
305 orig
.src_port
= output
->ct
.orig_tp
.src
;
306 orig
.dst_port
= output
->ct
.orig_tp
.dst
;
307 orig
.ipv4_proto
= output
->ct_orig_proto
;
309 if (nla_put(skb
, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
,
310 sizeof(orig
), &orig
))
312 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
313 struct ovs_key_ct_tuple_ipv6 orig
;
315 memset(&orig
, 0, sizeof(orig
));
316 memcpy(orig
.ipv6_src
, output
->ipv6
.ct_orig
.src
.s6_addr32
,
317 sizeof(orig
.ipv6_src
));
318 memcpy(orig
.ipv6_dst
, output
->ipv6
.ct_orig
.dst
.s6_addr32
,
319 sizeof(orig
.ipv6_dst
));
320 orig
.src_port
= output
->ct
.orig_tp
.src
;
321 orig
.dst_port
= output
->ct
.orig_tp
.dst
;
322 orig
.ipv6_proto
= output
->ct_orig_proto
;
324 if (nla_put(skb
, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
,
325 sizeof(orig
), &orig
))
333 static int ovs_ct_set_mark(struct nf_conn
*ct
, struct sw_flow_key
*key
,
334 u32 ct_mark
, u32 mask
)
336 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
339 new_mark
= ct_mark
| (ct
->mark
& ~(mask
));
340 if (ct
->mark
!= new_mark
) {
342 if (nf_ct_is_confirmed(ct
))
343 nf_conntrack_event_cache(IPCT_MARK
, ct
);
344 key
->ct
.mark
= new_mark
;
353 static struct nf_conn_labels
*ovs_ct_get_conn_labels(struct nf_conn
*ct
)
355 struct nf_conn_labels
*cl
;
357 cl
= nf_ct_labels_find(ct
);
359 nf_ct_labels_ext_add(ct
);
360 cl
= nf_ct_labels_find(ct
);
366 /* Initialize labels for a new, yet to be committed conntrack entry. Note that
367 * since the new connection is not yet confirmed, and thus no-one else has
368 * access to it's labels, we simply write them over.
370 static int ovs_ct_init_labels(struct nf_conn
*ct
, struct sw_flow_key
*key
,
371 const struct ovs_key_ct_labels
*labels
,
372 const struct ovs_key_ct_labels
*mask
)
374 struct nf_conn_labels
*cl
, *master_cl
;
375 bool have_mask
= labels_nonzero(mask
);
377 /* Inherit master's labels to the related connection? */
378 master_cl
= ct
->master
? nf_ct_labels_find(ct
->master
) : NULL
;
380 if (!master_cl
&& !have_mask
)
381 return 0; /* Nothing to do. */
383 cl
= ovs_ct_get_conn_labels(ct
);
387 /* Inherit the master's labels, if any. */
392 u32
*dst
= (u32
*)cl
->bits
;
395 for (i
= 0; i
< OVS_CT_LABELS_LEN_32
; i
++)
396 dst
[i
] = (dst
[i
] & ~mask
->ct_labels_32
[i
]) |
397 (labels
->ct_labels_32
[i
]
398 & mask
->ct_labels_32
[i
]);
401 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
402 * IPCT_LABEL bit is set in the event cache.
404 nf_conntrack_event_cache(IPCT_LABEL
, ct
);
406 memcpy(&key
->ct
.labels
, cl
->bits
, OVS_CT_LABELS_LEN
);
411 static int ovs_ct_set_labels(struct nf_conn
*ct
, struct sw_flow_key
*key
,
412 const struct ovs_key_ct_labels
*labels
,
413 const struct ovs_key_ct_labels
*mask
)
415 struct nf_conn_labels
*cl
;
418 cl
= ovs_ct_get_conn_labels(ct
);
422 err
= nf_connlabels_replace(ct
, labels
->ct_labels_32
,
424 OVS_CT_LABELS_LEN_32
);
428 memcpy(&key
->ct
.labels
, cl
->bits
, OVS_CT_LABELS_LEN
);
433 /* 'skb' should already be pulled to nh_ofs. */
434 static int ovs_ct_helper(struct sk_buff
*skb
, u16 proto
)
436 const struct nf_conntrack_helper
*helper
;
437 const struct nf_conn_help
*help
;
438 enum ip_conntrack_info ctinfo
;
439 unsigned int protoff
;
443 ct
= nf_ct_get(skb
, &ctinfo
);
444 if (!ct
|| ctinfo
== IP_CT_RELATED_REPLY
)
447 help
= nfct_help(ct
);
451 helper
= rcu_dereference(help
->helper
);
457 protoff
= ip_hdrlen(skb
);
460 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
464 ofs
= ipv6_skip_exthdr(skb
, sizeof(struct ipv6hdr
), &nexthdr
,
466 if (ofs
< 0 || (frag_off
& htons(~0x7)) != 0) {
467 pr_debug("proto header not found\n");
474 WARN_ONCE(1, "helper invoked on non-IP family!");
478 err
= helper
->help(skb
, protoff
, ct
, ctinfo
);
479 if (err
!= NF_ACCEPT
)
482 /* Adjust seqs after helper. This is needed due to some helpers (e.g.,
483 * FTP with NAT) adusting the TCP payload size when mangling IP
484 * addresses and/or port numbers in the text-based control connection.
486 if (test_bit(IPS_SEQ_ADJUST_BIT
, &ct
->status
) &&
487 !nf_ct_seq_adjust(skb
, ct
, ctinfo
, protoff
))
492 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
493 * value if 'skb' is freed.
495 static int handle_fragments(struct net
*net
, struct sw_flow_key
*key
,
496 u16 zone
, struct sk_buff
*skb
)
498 struct ovs_skb_cb ovs_cb
= *OVS_CB(skb
);
501 if (key
->eth
.type
== htons(ETH_P_IP
)) {
502 enum ip_defrag_users user
= IP_DEFRAG_CONNTRACK_IN
+ zone
;
504 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
505 err
= ip_defrag(net
, skb
, user
);
509 ovs_cb
.mru
= IPCB(skb
)->frag_max_size
;
510 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
511 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
512 enum ip6_defrag_users user
= IP6_DEFRAG_CONNTRACK_IN
+ zone
;
514 memset(IP6CB(skb
), 0, sizeof(struct inet6_skb_parm
));
515 err
= nf_ct_frag6_gather(net
, skb
, user
);
517 if (err
!= -EINPROGRESS
)
522 key
->ip
.proto
= ipv6_hdr(skb
)->nexthdr
;
523 ovs_cb
.mru
= IP6CB(skb
)->frag_max_size
;
527 return -EPFNOSUPPORT
;
530 /* The key extracted from the fragment that completed this datagram
531 * likely didn't have an L4 header, so regenerate it.
533 ovs_flow_key_update_l3l4(skb
, key
);
535 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
538 *OVS_CB(skb
) = ovs_cb
;
543 static struct nf_conntrack_expect
*
544 ovs_ct_expect_find(struct net
*net
, const struct nf_conntrack_zone
*zone
,
545 u16 proto
, const struct sk_buff
*skb
)
547 struct nf_conntrack_tuple tuple
;
548 struct nf_conntrack_expect
*exp
;
550 if (!nf_ct_get_tuplepr(skb
, skb_network_offset(skb
), proto
, net
, &tuple
))
553 exp
= __nf_ct_expect_find(net
, zone
, &tuple
);
555 struct nf_conntrack_tuple_hash
*h
;
557 /* Delete existing conntrack entry, if it clashes with the
558 * expectation. This can happen since conntrack ALGs do not
559 * check for clashes between (new) expectations and existing
560 * conntrack entries. nf_conntrack_in() will check the
561 * expectations only if a conntrack entry can not be found,
562 * which can lead to OVS finding the expectation (here) in the
563 * init direction, but which will not be removed by the
564 * nf_conntrack_in() call, if a matching conntrack entry is
565 * found instead. In this case all init direction packets
566 * would be reported as new related packets, while reply
567 * direction packets would be reported as un-related
568 * established packets.
570 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
572 struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(h
);
574 nf_ct_delete(ct
, 0, 0);
575 nf_conntrack_put(&ct
->ct_general
);
582 /* This replicates logic from nf_conntrack_core.c that is not exported. */
583 static enum ip_conntrack_info
584 ovs_ct_get_info(const struct nf_conntrack_tuple_hash
*h
)
586 const struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(h
);
588 if (NF_CT_DIRECTION(h
) == IP_CT_DIR_REPLY
)
589 return IP_CT_ESTABLISHED_REPLY
;
590 /* Once we've had two way comms, always ESTABLISHED. */
591 if (test_bit(IPS_SEEN_REPLY_BIT
, &ct
->status
))
592 return IP_CT_ESTABLISHED
;
593 if (test_bit(IPS_EXPECTED_BIT
, &ct
->status
))
594 return IP_CT_RELATED
;
598 /* Find an existing connection which this packet belongs to without
599 * re-attributing statistics or modifying the connection state. This allows an
600 * skb->_nfct lost due to an upcall to be recovered during actions execution.
602 * Must be called with rcu_read_lock.
604 * On success, populates skb->_nfct and returns the connection. Returns NULL
605 * if there is no existing entry.
607 static struct nf_conn
*
608 ovs_ct_find_existing(struct net
*net
, const struct nf_conntrack_zone
*zone
,
609 u8 l3num
, struct sk_buff
*skb
, bool natted
)
611 struct nf_conntrack_tuple tuple
;
612 struct nf_conntrack_tuple_hash
*h
;
615 if (!nf_ct_get_tuplepr(skb
, skb_network_offset(skb
), l3num
,
617 pr_debug("ovs_ct_find_existing: Can't get tuple\n");
621 /* Must invert the tuple if skb has been transformed by NAT. */
623 struct nf_conntrack_tuple inverse
;
625 if (!nf_ct_invert_tuple(&inverse
, &tuple
)) {
626 pr_debug("ovs_ct_find_existing: Inversion failed!\n");
632 /* look for tuple match */
633 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
635 return NULL
; /* Not found. */
637 ct
= nf_ct_tuplehash_to_ctrack(h
);
639 /* Inverted packet tuple matches the reverse direction conntrack tuple,
640 * select the other tuplehash to get the right 'ctinfo' bits for this
644 h
= &ct
->tuplehash
[!h
->tuple
.dst
.dir
];
646 nf_ct_set(skb
, ct
, ovs_ct_get_info(h
));
651 struct nf_conn
*ovs_ct_executed(struct net
*net
,
652 const struct sw_flow_key
*key
,
653 const struct ovs_conntrack_info
*info
,
657 struct nf_conn
*ct
= NULL
;
659 /* If no ct, check if we have evidence that an existing conntrack entry
660 * might be found for this skb. This happens when we lose a skb->_nfct
661 * due to an upcall, or if the direction is being forced. If the
662 * connection was not confirmed, it is not cached and needs to be run
663 * through conntrack again.
665 *ct_executed
= (key
->ct_state
& OVS_CS_F_TRACKED
) &&
666 !(key
->ct_state
& OVS_CS_F_INVALID
) &&
667 (key
->ct_zone
== info
->zone
.id
);
669 if (*ct_executed
|| (!key
->ct_state
&& info
->force
)) {
670 ct
= ovs_ct_find_existing(net
, &info
->zone
, info
->family
, skb
,
678 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
679 static bool skb_nfct_cached(struct net
*net
,
680 const struct sw_flow_key
*key
,
681 const struct ovs_conntrack_info
*info
,
684 enum ip_conntrack_info ctinfo
;
686 bool ct_executed
= true;
688 ct
= nf_ct_get(skb
, &ctinfo
);
690 ct
= ovs_ct_executed(net
, key
, info
, skb
, &ct_executed
);
693 nf_ct_get(skb
, &ctinfo
);
697 if (!net_eq(net
, read_pnet(&ct
->ct_net
)))
699 if (!nf_ct_zone_equal_any(info
->ct
, nf_ct_zone(ct
)))
702 struct nf_conn_help
*help
;
704 help
= nf_ct_ext_find(ct
, NF_CT_EXT_HELPER
);
705 if (help
&& rcu_access_pointer(help
->helper
) != info
->helper
)
708 if (info
->nf_ct_timeout
) {
709 struct nf_conn_timeout
*timeout_ext
;
711 timeout_ext
= nf_ct_timeout_find(ct
);
712 if (!timeout_ext
|| info
->nf_ct_timeout
!=
713 rcu_dereference(timeout_ext
->timeout
))
716 /* Force conntrack entry direction to the current packet? */
717 if (info
->force
&& CTINFO2DIR(ctinfo
) != IP_CT_DIR_ORIGINAL
) {
718 /* Delete the conntrack entry if confirmed, else just release
721 if (nf_ct_is_confirmed(ct
))
722 nf_ct_delete(ct
, 0, 0);
724 nf_conntrack_put(&ct
->ct_general
);
725 nf_ct_set(skb
, NULL
, 0);
732 #if IS_ENABLED(CONFIG_NF_NAT)
733 /* Modelled after nf_nat_ipv[46]_fn().
734 * range is only used for new, uninitialized NAT state.
735 * Returns either NF_ACCEPT or NF_DROP.
737 static int ovs_ct_nat_execute(struct sk_buff
*skb
, struct nf_conn
*ct
,
738 enum ip_conntrack_info ctinfo
,
739 const struct nf_nat_range2
*range
,
740 enum nf_nat_manip_type maniptype
)
742 int hooknum
, nh_off
, err
= NF_ACCEPT
;
744 nh_off
= skb_network_offset(skb
);
745 skb_pull_rcsum(skb
, nh_off
);
747 /* See HOOK2MANIP(). */
748 if (maniptype
== NF_NAT_MANIP_SRC
)
749 hooknum
= NF_INET_LOCAL_IN
; /* Source NAT */
751 hooknum
= NF_INET_LOCAL_OUT
; /* Destination NAT */
755 case IP_CT_RELATED_REPLY
:
756 if (IS_ENABLED(CONFIG_NF_NAT
) &&
757 skb
->protocol
== htons(ETH_P_IP
) &&
758 ip_hdr(skb
)->protocol
== IPPROTO_ICMP
) {
759 if (!nf_nat_icmp_reply_translation(skb
, ct
, ctinfo
,
763 } else if (IS_ENABLED(CONFIG_IPV6
) &&
764 skb
->protocol
== htons(ETH_P_IPV6
)) {
766 u8 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
767 int hdrlen
= ipv6_skip_exthdr(skb
,
768 sizeof(struct ipv6hdr
),
769 &nexthdr
, &frag_off
);
771 if (hdrlen
>= 0 && nexthdr
== IPPROTO_ICMPV6
) {
772 if (!nf_nat_icmpv6_reply_translation(skb
, ct
,
780 /* Non-ICMP, fall thru to initialize if needed. */
783 /* Seen it before? This can happen for loopback, retrans,
786 if (!nf_nat_initialized(ct
, maniptype
)) {
787 /* Initialize according to the NAT action. */
788 err
= (range
&& range
->flags
& NF_NAT_RANGE_MAP_IPS
)
789 /* Action is set up to establish a new
792 ? nf_nat_setup_info(ct
, range
, maniptype
)
793 : nf_nat_alloc_null_binding(ct
, hooknum
);
794 if (err
!= NF_ACCEPT
)
799 case IP_CT_ESTABLISHED
:
800 case IP_CT_ESTABLISHED_REPLY
:
808 err
= nf_nat_packet(ct
, ctinfo
, hooknum
, skb
);
810 skb_push(skb
, nh_off
);
811 skb_postpush_rcsum(skb
, skb
->data
, nh_off
);
816 static void ovs_nat_update_key(struct sw_flow_key
*key
,
817 const struct sk_buff
*skb
,
818 enum nf_nat_manip_type maniptype
)
820 if (maniptype
== NF_NAT_MANIP_SRC
) {
823 key
->ct_state
|= OVS_CS_F_SRC_NAT
;
824 if (key
->eth
.type
== htons(ETH_P_IP
))
825 key
->ipv4
.addr
.src
= ip_hdr(skb
)->saddr
;
826 else if (key
->eth
.type
== htons(ETH_P_IPV6
))
827 memcpy(&key
->ipv6
.addr
.src
, &ipv6_hdr(skb
)->saddr
,
828 sizeof(key
->ipv6
.addr
.src
));
832 if (key
->ip
.proto
== IPPROTO_UDP
)
833 src
= udp_hdr(skb
)->source
;
834 else if (key
->ip
.proto
== IPPROTO_TCP
)
835 src
= tcp_hdr(skb
)->source
;
836 else if (key
->ip
.proto
== IPPROTO_SCTP
)
837 src
= sctp_hdr(skb
)->source
;
845 key
->ct_state
|= OVS_CS_F_DST_NAT
;
846 if (key
->eth
.type
== htons(ETH_P_IP
))
847 key
->ipv4
.addr
.dst
= ip_hdr(skb
)->daddr
;
848 else if (key
->eth
.type
== htons(ETH_P_IPV6
))
849 memcpy(&key
->ipv6
.addr
.dst
, &ipv6_hdr(skb
)->daddr
,
850 sizeof(key
->ipv6
.addr
.dst
));
854 if (key
->ip
.proto
== IPPROTO_UDP
)
855 dst
= udp_hdr(skb
)->dest
;
856 else if (key
->ip
.proto
== IPPROTO_TCP
)
857 dst
= tcp_hdr(skb
)->dest
;
858 else if (key
->ip
.proto
== IPPROTO_SCTP
)
859 dst
= sctp_hdr(skb
)->dest
;
867 /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
868 static int ovs_ct_nat(struct net
*net
, struct sw_flow_key
*key
,
869 const struct ovs_conntrack_info
*info
,
870 struct sk_buff
*skb
, struct nf_conn
*ct
,
871 enum ip_conntrack_info ctinfo
)
873 enum nf_nat_manip_type maniptype
;
876 /* Add NAT extension if not confirmed yet. */
877 if (!nf_ct_is_confirmed(ct
) && !nf_ct_nat_ext_add(ct
))
878 return NF_ACCEPT
; /* Can't NAT. */
880 /* Determine NAT type.
881 * Check if the NAT type can be deduced from the tracked connection.
882 * Make sure new expected connections (IP_CT_RELATED) are NATted only
885 if (info
->nat
& OVS_CT_NAT
&& ctinfo
!= IP_CT_NEW
&&
886 ct
->status
& IPS_NAT_MASK
&&
887 (ctinfo
!= IP_CT_RELATED
|| info
->commit
)) {
888 /* NAT an established or related connection like before. */
889 if (CTINFO2DIR(ctinfo
) == IP_CT_DIR_REPLY
)
890 /* This is the REPLY direction for a connection
891 * for which NAT was applied in the forward
892 * direction. Do the reverse NAT.
894 maniptype
= ct
->status
& IPS_SRC_NAT
895 ? NF_NAT_MANIP_DST
: NF_NAT_MANIP_SRC
;
897 maniptype
= ct
->status
& IPS_SRC_NAT
898 ? NF_NAT_MANIP_SRC
: NF_NAT_MANIP_DST
;
899 } else if (info
->nat
& OVS_CT_SRC_NAT
) {
900 maniptype
= NF_NAT_MANIP_SRC
;
901 } else if (info
->nat
& OVS_CT_DST_NAT
) {
902 maniptype
= NF_NAT_MANIP_DST
;
904 return NF_ACCEPT
; /* Connection is not NATed. */
906 err
= ovs_ct_nat_execute(skb
, ct
, ctinfo
, &info
->range
, maniptype
);
908 if (err
== NF_ACCEPT
&& ct
->status
& IPS_DST_NAT
) {
909 if (ct
->status
& IPS_SRC_NAT
) {
910 if (maniptype
== NF_NAT_MANIP_SRC
)
911 maniptype
= NF_NAT_MANIP_DST
;
913 maniptype
= NF_NAT_MANIP_SRC
;
915 err
= ovs_ct_nat_execute(skb
, ct
, ctinfo
, &info
->range
,
917 } else if (CTINFO2DIR(ctinfo
) == IP_CT_DIR_ORIGINAL
) {
918 err
= ovs_ct_nat_execute(skb
, ct
, ctinfo
, NULL
,
923 /* Mark NAT done if successful and update the flow key. */
924 if (err
== NF_ACCEPT
)
925 ovs_nat_update_key(key
, skb
, maniptype
);
929 #else /* !CONFIG_NF_NAT */
930 static int ovs_ct_nat(struct net
*net
, struct sw_flow_key
*key
,
931 const struct ovs_conntrack_info
*info
,
932 struct sk_buff
*skb
, struct nf_conn
*ct
,
933 enum ip_conntrack_info ctinfo
)
939 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
940 * not done already. Update key with new CT state after passing the packet
942 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
943 * set to NULL and 0 will be returned.
945 static int __ovs_ct_lookup(struct net
*net
, struct sw_flow_key
*key
,
946 const struct ovs_conntrack_info
*info
,
949 /* If we are recirculating packets to match on conntrack fields and
950 * committing with a separate conntrack action, then we don't need to
951 * actually run the packet through conntrack twice unless it's for a
954 bool cached
= skb_nfct_cached(net
, key
, info
, skb
);
955 enum ip_conntrack_info ctinfo
;
959 struct nf_hook_state state
= {
960 .hook
= NF_INET_PRE_ROUTING
,
964 struct nf_conn
*tmpl
= info
->ct
;
967 /* Associate skb with specified zone. */
970 nf_conntrack_put(skb_nfct(skb
));
971 nf_conntrack_get(&tmpl
->ct_general
);
972 nf_ct_set(skb
, tmpl
, IP_CT_NEW
);
975 err
= nf_conntrack_in(skb
, &state
);
976 if (err
!= NF_ACCEPT
)
979 /* Clear CT state NAT flags to mark that we have not yet done
980 * NAT after the nf_conntrack_in() call. We can actually clear
981 * the whole state, as it will be re-initialized below.
985 /* Update the key, but keep the NAT flags. */
986 ovs_ct_update_key(skb
, info
, key
, true, true);
989 ct
= nf_ct_get(skb
, &ctinfo
);
991 bool add_helper
= false;
993 /* Packets starting a new connection must be NATted before the
994 * helper, so that the helper knows about the NAT. We enforce
995 * this by delaying both NAT and helper calls for unconfirmed
996 * connections until the committing CT action. For later
997 * packets NAT and Helper may be called in either order.
999 * NAT will be done only if the CT action has NAT, and only
1000 * once per packet (per zone), as guarded by the NAT bits in
1001 * the key->ct_state.
1003 if (info
->nat
&& !(key
->ct_state
& OVS_CS_F_NAT_MASK
) &&
1004 (nf_ct_is_confirmed(ct
) || info
->commit
) &&
1005 ovs_ct_nat(net
, key
, info
, skb
, ct
, ctinfo
) != NF_ACCEPT
) {
1009 /* Userspace may decide to perform a ct lookup without a helper
1010 * specified followed by a (recirculate and) commit with one,
1011 * or attach a helper in a later commit. Therefore, for
1012 * connections which we will commit, we may need to attach
1015 if (info
->commit
&& info
->helper
&& !nfct_help(ct
)) {
1016 int err
= __nf_ct_try_assign_helper(ct
, info
->ct
,
1022 /* helper installed, add seqadj if NAT is required */
1023 if (info
->nat
&& !nfct_seqadj(ct
)) {
1024 if (!nfct_seqadj_ext_add(ct
))
1029 /* Call the helper only if:
1030 * - nf_conntrack_in() was executed above ("!cached") or a
1031 * helper was just attached ("add_helper") for a confirmed
1033 * - When committing an unconfirmed connection.
1035 if ((nf_ct_is_confirmed(ct
) ? !cached
|| add_helper
:
1037 ovs_ct_helper(skb
, info
->family
) != NF_ACCEPT
) {
1041 if (nf_ct_protonum(ct
) == IPPROTO_TCP
&&
1042 nf_ct_is_confirmed(ct
) && nf_conntrack_tcp_established(ct
)) {
1043 /* Be liberal for tcp packets so that out-of-window
1044 * packets are not marked invalid.
1046 nf_ct_set_tcp_be_liberal(ct
);
1053 /* Lookup connection and read fields into key. */
1054 static int ovs_ct_lookup(struct net
*net
, struct sw_flow_key
*key
,
1055 const struct ovs_conntrack_info
*info
,
1056 struct sk_buff
*skb
)
1058 struct nf_conntrack_expect
*exp
;
1060 /* If we pass an expected packet through nf_conntrack_in() the
1061 * expectation is typically removed, but the packet could still be
1062 * lost in upcall processing. To prevent this from happening we
1063 * perform an explicit expectation lookup. Expected connections are
1064 * always new, and will be passed through conntrack only when they are
1065 * committed, as it is OK to remove the expectation at that time.
1067 exp
= ovs_ct_expect_find(net
, &info
->zone
, info
->family
, skb
);
1071 /* NOTE: New connections are NATted and Helped only when
1072 * committed, so we are not calling into NAT here.
1074 state
= OVS_CS_F_TRACKED
| OVS_CS_F_NEW
| OVS_CS_F_RELATED
;
1075 __ovs_ct_update_key(key
, state
, &info
->zone
, exp
->master
);
1080 err
= __ovs_ct_lookup(net
, key
, info
, skb
);
1084 ct
= (struct nf_conn
*)skb_nfct(skb
);
1086 nf_ct_deliver_cached_events(ct
);
1092 static bool labels_nonzero(const struct ovs_key_ct_labels
*labels
)
1096 for (i
= 0; i
< OVS_CT_LABELS_LEN_32
; i
++)
1097 if (labels
->ct_labels_32
[i
])
1103 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1104 static struct hlist_head
*ct_limit_hash_bucket(
1105 const struct ovs_ct_limit_info
*info
, u16 zone
)
1107 return &info
->limits
[zone
& (CT_LIMIT_HASH_BUCKETS
- 1)];
1110 /* Call with ovs_mutex */
1111 static void ct_limit_set(const struct ovs_ct_limit_info
*info
,
1112 struct ovs_ct_limit
*new_ct_limit
)
1114 struct ovs_ct_limit
*ct_limit
;
1115 struct hlist_head
*head
;
1117 head
= ct_limit_hash_bucket(info
, new_ct_limit
->zone
);
1118 hlist_for_each_entry_rcu(ct_limit
, head
, hlist_node
) {
1119 if (ct_limit
->zone
== new_ct_limit
->zone
) {
1120 hlist_replace_rcu(&ct_limit
->hlist_node
,
1121 &new_ct_limit
->hlist_node
);
1122 kfree_rcu(ct_limit
, rcu
);
1127 hlist_add_head_rcu(&new_ct_limit
->hlist_node
, head
);
1130 /* Call with ovs_mutex */
1131 static void ct_limit_del(const struct ovs_ct_limit_info
*info
, u16 zone
)
1133 struct ovs_ct_limit
*ct_limit
;
1134 struct hlist_head
*head
;
1135 struct hlist_node
*n
;
1137 head
= ct_limit_hash_bucket(info
, zone
);
1138 hlist_for_each_entry_safe(ct_limit
, n
, head
, hlist_node
) {
1139 if (ct_limit
->zone
== zone
) {
1140 hlist_del_rcu(&ct_limit
->hlist_node
);
1141 kfree_rcu(ct_limit
, rcu
);
1147 /* Call with RCU read lock */
1148 static u32
ct_limit_get(const struct ovs_ct_limit_info
*info
, u16 zone
)
1150 struct ovs_ct_limit
*ct_limit
;
1151 struct hlist_head
*head
;
1153 head
= ct_limit_hash_bucket(info
, zone
);
1154 hlist_for_each_entry_rcu(ct_limit
, head
, hlist_node
) {
1155 if (ct_limit
->zone
== zone
)
1156 return ct_limit
->limit
;
1159 return info
->default_limit
;
1162 static int ovs_ct_check_limit(struct net
*net
,
1163 const struct ovs_conntrack_info
*info
,
1164 const struct nf_conntrack_tuple
*tuple
)
1166 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1167 const struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
1168 u32 per_zone_limit
, connections
;
1171 conncount_key
= info
->zone
.id
;
1173 per_zone_limit
= ct_limit_get(ct_limit_info
, info
->zone
.id
);
1174 if (per_zone_limit
== OVS_CT_LIMIT_UNLIMITED
)
1177 connections
= nf_conncount_count(net
, ct_limit_info
->data
,
1178 &conncount_key
, tuple
, &info
->zone
);
1179 if (connections
> per_zone_limit
)
1186 /* Lookup connection and confirm if unconfirmed. */
1187 static int ovs_ct_commit(struct net
*net
, struct sw_flow_key
*key
,
1188 const struct ovs_conntrack_info
*info
,
1189 struct sk_buff
*skb
)
1191 enum ip_conntrack_info ctinfo
;
1195 err
= __ovs_ct_lookup(net
, key
, info
, skb
);
1199 /* The connection could be invalid, in which case this is a no-op.*/
1200 ct
= nf_ct_get(skb
, &ctinfo
);
1204 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1205 if (static_branch_unlikely(&ovs_ct_limit_enabled
)) {
1206 if (!nf_ct_is_confirmed(ct
)) {
1207 err
= ovs_ct_check_limit(net
, info
,
1208 &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
);
1210 net_warn_ratelimited("openvswitch: zone: %u "
1211 "exceeds conntrack limit\n",
1219 /* Set the conntrack event mask if given. NEW and DELETE events have
1220 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
1221 * typically would receive many kinds of updates. Setting the event
1222 * mask allows those events to be filtered. The set event mask will
1223 * remain in effect for the lifetime of the connection unless changed
1224 * by a further CT action with both the commit flag and the eventmask
1226 if (info
->have_eventmask
) {
1227 struct nf_conntrack_ecache
*cache
= nf_ct_ecache_find(ct
);
1230 cache
->ctmask
= info
->eventmask
;
1233 /* Apply changes before confirming the connection so that the initial
1234 * conntrack NEW netlink event carries the values given in the CT
1237 if (info
->mark
.mask
) {
1238 err
= ovs_ct_set_mark(ct
, key
, info
->mark
.value
,
1243 if (!nf_ct_is_confirmed(ct
)) {
1244 err
= ovs_ct_init_labels(ct
, key
, &info
->labels
.value
,
1245 &info
->labels
.mask
);
1248 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
1249 labels_nonzero(&info
->labels
.mask
)) {
1250 err
= ovs_ct_set_labels(ct
, key
, &info
->labels
.value
,
1251 &info
->labels
.mask
);
1255 /* This will take care of sending queued events even if the connection
1256 * is already confirmed.
1258 if (nf_conntrack_confirm(skb
) != NF_ACCEPT
)
1264 /* Trim the skb to the length specified by the IP/IPv6 header,
1265 * removing any trailing lower-layer padding. This prepares the skb
1266 * for higher-layer processing that assumes skb->len excludes padding
1267 * (such as nf_ip_checksum). The caller needs to pull the skb to the
1268 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
1270 static int ovs_skb_network_trim(struct sk_buff
*skb
)
1275 switch (skb
->protocol
) {
1276 case htons(ETH_P_IP
):
1277 len
= ntohs(ip_hdr(skb
)->tot_len
);
1279 case htons(ETH_P_IPV6
):
1280 len
= sizeof(struct ipv6hdr
)
1281 + ntohs(ipv6_hdr(skb
)->payload_len
);
1287 err
= pskb_trim_rcsum(skb
, len
);
1294 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
1295 * value if 'skb' is freed.
1297 int ovs_ct_execute(struct net
*net
, struct sk_buff
*skb
,
1298 struct sw_flow_key
*key
,
1299 const struct ovs_conntrack_info
*info
)
1304 /* The conntrack module expects to be working at L3. */
1305 nh_ofs
= skb_network_offset(skb
);
1306 skb_pull_rcsum(skb
, nh_ofs
);
1308 err
= ovs_skb_network_trim(skb
);
1312 if (key
->ip
.frag
!= OVS_FRAG_TYPE_NONE
) {
1313 err
= handle_fragments(net
, key
, info
->zone
.id
, skb
);
1319 err
= ovs_ct_commit(net
, key
, info
, skb
);
1321 err
= ovs_ct_lookup(net
, key
, info
, skb
);
1323 skb_push(skb
, nh_ofs
);
1324 skb_postpush_rcsum(skb
, skb
->data
, nh_ofs
);
1330 int ovs_ct_clear(struct sk_buff
*skb
, struct sw_flow_key
*key
)
1332 if (skb_nfct(skb
)) {
1333 nf_conntrack_put(skb_nfct(skb
));
1334 nf_ct_set(skb
, NULL
, IP_CT_UNTRACKED
);
1335 ovs_ct_fill_key(skb
, key
);
1341 static int ovs_ct_add_helper(struct ovs_conntrack_info
*info
, const char *name
,
1342 const struct sw_flow_key
*key
, bool log
)
1344 struct nf_conntrack_helper
*helper
;
1345 struct nf_conn_help
*help
;
1348 helper
= nf_conntrack_helper_try_module_get(name
, info
->family
,
1351 OVS_NLERR(log
, "Unknown helper \"%s\"", name
);
1355 help
= nf_ct_helper_ext_add(info
->ct
, GFP_KERNEL
);
1357 nf_conntrack_helper_put(helper
);
1361 #if IS_ENABLED(CONFIG_NF_NAT)
1363 ret
= nf_nat_helper_try_module_get(name
, info
->family
,
1366 nf_conntrack_helper_put(helper
);
1367 OVS_NLERR(log
, "Failed to load \"%s\" NAT helper, error: %d",
1373 rcu_assign_pointer(help
->helper
, helper
);
1374 info
->helper
= helper
;
1378 #if IS_ENABLED(CONFIG_NF_NAT)
1379 static int parse_nat(const struct nlattr
*attr
,
1380 struct ovs_conntrack_info
*info
, bool log
)
1384 bool have_ip_max
= false;
1385 bool have_proto_max
= false;
1386 bool ip_vers
= (info
->family
== NFPROTO_IPV6
);
1388 nla_for_each_nested(a
, attr
, rem
) {
1389 static const int ovs_nat_attr_lens
[OVS_NAT_ATTR_MAX
+ 1][2] = {
1390 [OVS_NAT_ATTR_SRC
] = {0, 0},
1391 [OVS_NAT_ATTR_DST
] = {0, 0},
1392 [OVS_NAT_ATTR_IP_MIN
] = {sizeof(struct in_addr
),
1393 sizeof(struct in6_addr
)},
1394 [OVS_NAT_ATTR_IP_MAX
] = {sizeof(struct in_addr
),
1395 sizeof(struct in6_addr
)},
1396 [OVS_NAT_ATTR_PROTO_MIN
] = {sizeof(u16
), sizeof(u16
)},
1397 [OVS_NAT_ATTR_PROTO_MAX
] = {sizeof(u16
), sizeof(u16
)},
1398 [OVS_NAT_ATTR_PERSISTENT
] = {0, 0},
1399 [OVS_NAT_ATTR_PROTO_HASH
] = {0, 0},
1400 [OVS_NAT_ATTR_PROTO_RANDOM
] = {0, 0},
1402 int type
= nla_type(a
);
1404 if (type
> OVS_NAT_ATTR_MAX
) {
1405 OVS_NLERR(log
, "Unknown NAT attribute (type=%d, max=%d)",
1406 type
, OVS_NAT_ATTR_MAX
);
1410 if (nla_len(a
) != ovs_nat_attr_lens
[type
][ip_vers
]) {
1411 OVS_NLERR(log
, "NAT attribute type %d has unexpected length (%d != %d)",
1413 ovs_nat_attr_lens
[type
][ip_vers
]);
1418 case OVS_NAT_ATTR_SRC
:
1419 case OVS_NAT_ATTR_DST
:
1421 OVS_NLERR(log
, "Only one type of NAT may be specified");
1424 info
->nat
|= OVS_CT_NAT
;
1425 info
->nat
|= ((type
== OVS_NAT_ATTR_SRC
)
1426 ? OVS_CT_SRC_NAT
: OVS_CT_DST_NAT
);
1429 case OVS_NAT_ATTR_IP_MIN
:
1430 nla_memcpy(&info
->range
.min_addr
, a
,
1431 sizeof(info
->range
.min_addr
));
1432 info
->range
.flags
|= NF_NAT_RANGE_MAP_IPS
;
1435 case OVS_NAT_ATTR_IP_MAX
:
1437 nla_memcpy(&info
->range
.max_addr
, a
,
1438 sizeof(info
->range
.max_addr
));
1439 info
->range
.flags
|= NF_NAT_RANGE_MAP_IPS
;
1442 case OVS_NAT_ATTR_PROTO_MIN
:
1443 info
->range
.min_proto
.all
= htons(nla_get_u16(a
));
1444 info
->range
.flags
|= NF_NAT_RANGE_PROTO_SPECIFIED
;
1447 case OVS_NAT_ATTR_PROTO_MAX
:
1448 have_proto_max
= true;
1449 info
->range
.max_proto
.all
= htons(nla_get_u16(a
));
1450 info
->range
.flags
|= NF_NAT_RANGE_PROTO_SPECIFIED
;
1453 case OVS_NAT_ATTR_PERSISTENT
:
1454 info
->range
.flags
|= NF_NAT_RANGE_PERSISTENT
;
1457 case OVS_NAT_ATTR_PROTO_HASH
:
1458 info
->range
.flags
|= NF_NAT_RANGE_PROTO_RANDOM
;
1461 case OVS_NAT_ATTR_PROTO_RANDOM
:
1462 info
->range
.flags
|= NF_NAT_RANGE_PROTO_RANDOM_FULLY
;
1466 OVS_NLERR(log
, "Unknown nat attribute (%d)", type
);
1472 OVS_NLERR(log
, "NAT attribute has %d unknown bytes", rem
);
1476 /* Do not allow flags if no type is given. */
1477 if (info
->range
.flags
) {
1479 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
1483 info
->nat
= OVS_CT_NAT
; /* NAT existing connections. */
1484 } else if (!info
->commit
) {
1486 "NAT attributes may be specified only when CT COMMIT flag is also specified."
1490 /* Allow missing IP_MAX. */
1491 if (info
->range
.flags
& NF_NAT_RANGE_MAP_IPS
&& !have_ip_max
) {
1492 memcpy(&info
->range
.max_addr
, &info
->range
.min_addr
,
1493 sizeof(info
->range
.max_addr
));
1495 /* Allow missing PROTO_MAX. */
1496 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_SPECIFIED
&&
1498 info
->range
.max_proto
.all
= info
->range
.min_proto
.all
;
1504 static const struct ovs_ct_len_tbl ovs_ct_attr_lens
[OVS_CT_ATTR_MAX
+ 1] = {
1505 [OVS_CT_ATTR_COMMIT
] = { .minlen
= 0, .maxlen
= 0 },
1506 [OVS_CT_ATTR_FORCE_COMMIT
] = { .minlen
= 0, .maxlen
= 0 },
1507 [OVS_CT_ATTR_ZONE
] = { .minlen
= sizeof(u16
),
1508 .maxlen
= sizeof(u16
) },
1509 [OVS_CT_ATTR_MARK
] = { .minlen
= sizeof(struct md_mark
),
1510 .maxlen
= sizeof(struct md_mark
) },
1511 [OVS_CT_ATTR_LABELS
] = { .minlen
= sizeof(struct md_labels
),
1512 .maxlen
= sizeof(struct md_labels
) },
1513 [OVS_CT_ATTR_HELPER
] = { .minlen
= 1,
1514 .maxlen
= NF_CT_HELPER_NAME_LEN
},
1515 #if IS_ENABLED(CONFIG_NF_NAT)
1516 /* NAT length is checked when parsing the nested attributes. */
1517 [OVS_CT_ATTR_NAT
] = { .minlen
= 0, .maxlen
= INT_MAX
},
1519 [OVS_CT_ATTR_EVENTMASK
] = { .minlen
= sizeof(u32
),
1520 .maxlen
= sizeof(u32
) },
1521 [OVS_CT_ATTR_TIMEOUT
] = { .minlen
= 1,
1522 .maxlen
= CTNL_TIMEOUT_NAME_MAX
},
1525 static int parse_ct(const struct nlattr
*attr
, struct ovs_conntrack_info
*info
,
1526 const char **helper
, bool log
)
1531 nla_for_each_nested(a
, attr
, rem
) {
1532 int type
= nla_type(a
);
1536 if (type
> OVS_CT_ATTR_MAX
) {
1538 "Unknown conntrack attr (type=%d, max=%d)",
1539 type
, OVS_CT_ATTR_MAX
);
1543 maxlen
= ovs_ct_attr_lens
[type
].maxlen
;
1544 minlen
= ovs_ct_attr_lens
[type
].minlen
;
1545 if (nla_len(a
) < minlen
|| nla_len(a
) > maxlen
) {
1547 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
1548 type
, nla_len(a
), maxlen
);
1553 case OVS_CT_ATTR_FORCE_COMMIT
:
1556 case OVS_CT_ATTR_COMMIT
:
1557 info
->commit
= true;
1559 #ifdef CONFIG_NF_CONNTRACK_ZONES
1560 case OVS_CT_ATTR_ZONE
:
1561 info
->zone
.id
= nla_get_u16(a
);
1564 #ifdef CONFIG_NF_CONNTRACK_MARK
1565 case OVS_CT_ATTR_MARK
: {
1566 struct md_mark
*mark
= nla_data(a
);
1569 OVS_NLERR(log
, "ct_mark mask cannot be 0");
1576 #ifdef CONFIG_NF_CONNTRACK_LABELS
1577 case OVS_CT_ATTR_LABELS
: {
1578 struct md_labels
*labels
= nla_data(a
);
1580 if (!labels_nonzero(&labels
->mask
)) {
1581 OVS_NLERR(log
, "ct_labels mask cannot be 0");
1584 info
->labels
= *labels
;
1588 case OVS_CT_ATTR_HELPER
:
1589 *helper
= nla_data(a
);
1590 if (!memchr(*helper
, '\0', nla_len(a
))) {
1591 OVS_NLERR(log
, "Invalid conntrack helper");
1595 #if IS_ENABLED(CONFIG_NF_NAT)
1596 case OVS_CT_ATTR_NAT
: {
1597 int err
= parse_nat(a
, info
, log
);
1604 case OVS_CT_ATTR_EVENTMASK
:
1605 info
->have_eventmask
= true;
1606 info
->eventmask
= nla_get_u32(a
);
1608 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1609 case OVS_CT_ATTR_TIMEOUT
:
1610 memcpy(info
->timeout
, nla_data(a
), nla_len(a
));
1611 if (!memchr(info
->timeout
, '\0', nla_len(a
))) {
1612 OVS_NLERR(log
, "Invalid conntrack timeout");
1619 OVS_NLERR(log
, "Unknown conntrack attr (%d)",
1625 #ifdef CONFIG_NF_CONNTRACK_MARK
1626 if (!info
->commit
&& info
->mark
.mask
) {
1628 "Setting conntrack mark requires 'commit' flag.");
1632 #ifdef CONFIG_NF_CONNTRACK_LABELS
1633 if (!info
->commit
&& labels_nonzero(&info
->labels
.mask
)) {
1635 "Setting conntrack labels requires 'commit' flag.");
1640 OVS_NLERR(log
, "Conntrack attr has %d unknown bytes", rem
);
1647 bool ovs_ct_verify(struct net
*net
, enum ovs_key_attr attr
)
1649 if (attr
== OVS_KEY_ATTR_CT_STATE
)
1651 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
1652 attr
== OVS_KEY_ATTR_CT_ZONE
)
1654 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) &&
1655 attr
== OVS_KEY_ATTR_CT_MARK
)
1657 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
1658 attr
== OVS_KEY_ATTR_CT_LABELS
) {
1659 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1661 return ovs_net
->xt_label
;
1667 int ovs_ct_copy_action(struct net
*net
, const struct nlattr
*attr
,
1668 const struct sw_flow_key
*key
,
1669 struct sw_flow_actions
**sfa
, bool log
)
1671 struct ovs_conntrack_info ct_info
;
1672 const char *helper
= NULL
;
1676 family
= key_to_nfproto(key
);
1677 if (family
== NFPROTO_UNSPEC
) {
1678 OVS_NLERR(log
, "ct family unspecified");
1682 memset(&ct_info
, 0, sizeof(ct_info
));
1683 ct_info
.family
= family
;
1685 nf_ct_zone_init(&ct_info
.zone
, NF_CT_DEFAULT_ZONE_ID
,
1686 NF_CT_DEFAULT_ZONE_DIR
, 0);
1688 err
= parse_ct(attr
, &ct_info
, &helper
, log
);
1692 /* Set up template for tracking connections in specific zones. */
1693 ct_info
.ct
= nf_ct_tmpl_alloc(net
, &ct_info
.zone
, GFP_KERNEL
);
1695 OVS_NLERR(log
, "Failed to allocate conntrack template");
1699 if (ct_info
.timeout
[0]) {
1700 if (nf_ct_set_timeout(net
, ct_info
.ct
, family
, key
->ip
.proto
,
1702 pr_info_ratelimited("Failed to associated timeout "
1703 "policy `%s'\n", ct_info
.timeout
);
1705 ct_info
.nf_ct_timeout
= rcu_dereference(
1706 nf_ct_timeout_find(ct_info
.ct
)->timeout
);
1711 err
= ovs_ct_add_helper(&ct_info
, helper
, key
, log
);
1716 err
= ovs_nla_add_action(sfa
, OVS_ACTION_ATTR_CT
, &ct_info
,
1717 sizeof(ct_info
), log
);
1721 __set_bit(IPS_CONFIRMED_BIT
, &ct_info
.ct
->status
);
1722 nf_conntrack_get(&ct_info
.ct
->ct_general
);
1725 __ovs_ct_free_action(&ct_info
);
1729 #if IS_ENABLED(CONFIG_NF_NAT)
1730 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info
*info
,
1731 struct sk_buff
*skb
)
1733 struct nlattr
*start
;
1735 start
= nla_nest_start_noflag(skb
, OVS_CT_ATTR_NAT
);
1739 if (info
->nat
& OVS_CT_SRC_NAT
) {
1740 if (nla_put_flag(skb
, OVS_NAT_ATTR_SRC
))
1742 } else if (info
->nat
& OVS_CT_DST_NAT
) {
1743 if (nla_put_flag(skb
, OVS_NAT_ATTR_DST
))
1749 if (info
->range
.flags
& NF_NAT_RANGE_MAP_IPS
) {
1750 if (IS_ENABLED(CONFIG_NF_NAT
) &&
1751 info
->family
== NFPROTO_IPV4
) {
1752 if (nla_put_in_addr(skb
, OVS_NAT_ATTR_IP_MIN
,
1753 info
->range
.min_addr
.ip
) ||
1754 (info
->range
.max_addr
.ip
1755 != info
->range
.min_addr
.ip
&&
1756 (nla_put_in_addr(skb
, OVS_NAT_ATTR_IP_MAX
,
1757 info
->range
.max_addr
.ip
))))
1759 } else if (IS_ENABLED(CONFIG_IPV6
) &&
1760 info
->family
== NFPROTO_IPV6
) {
1761 if (nla_put_in6_addr(skb
, OVS_NAT_ATTR_IP_MIN
,
1762 &info
->range
.min_addr
.in6
) ||
1763 (memcmp(&info
->range
.max_addr
.in6
,
1764 &info
->range
.min_addr
.in6
,
1765 sizeof(info
->range
.max_addr
.in6
)) &&
1766 (nla_put_in6_addr(skb
, OVS_NAT_ATTR_IP_MAX
,
1767 &info
->range
.max_addr
.in6
))))
1773 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_SPECIFIED
&&
1774 (nla_put_u16(skb
, OVS_NAT_ATTR_PROTO_MIN
,
1775 ntohs(info
->range
.min_proto
.all
)) ||
1776 (info
->range
.max_proto
.all
!= info
->range
.min_proto
.all
&&
1777 nla_put_u16(skb
, OVS_NAT_ATTR_PROTO_MAX
,
1778 ntohs(info
->range
.max_proto
.all
)))))
1781 if (info
->range
.flags
& NF_NAT_RANGE_PERSISTENT
&&
1782 nla_put_flag(skb
, OVS_NAT_ATTR_PERSISTENT
))
1784 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_RANDOM
&&
1785 nla_put_flag(skb
, OVS_NAT_ATTR_PROTO_HASH
))
1787 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_RANDOM_FULLY
&&
1788 nla_put_flag(skb
, OVS_NAT_ATTR_PROTO_RANDOM
))
1791 nla_nest_end(skb
, start
);
1797 int ovs_ct_action_to_attr(const struct ovs_conntrack_info
*ct_info
,
1798 struct sk_buff
*skb
)
1800 struct nlattr
*start
;
1802 start
= nla_nest_start_noflag(skb
, OVS_ACTION_ATTR_CT
);
1806 if (ct_info
->commit
&& nla_put_flag(skb
, ct_info
->force
1807 ? OVS_CT_ATTR_FORCE_COMMIT
1808 : OVS_CT_ATTR_COMMIT
))
1810 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
1811 nla_put_u16(skb
, OVS_CT_ATTR_ZONE
, ct_info
->zone
.id
))
1813 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) && ct_info
->mark
.mask
&&
1814 nla_put(skb
, OVS_CT_ATTR_MARK
, sizeof(ct_info
->mark
),
1817 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
1818 labels_nonzero(&ct_info
->labels
.mask
) &&
1819 nla_put(skb
, OVS_CT_ATTR_LABELS
, sizeof(ct_info
->labels
),
1822 if (ct_info
->helper
) {
1823 if (nla_put_string(skb
, OVS_CT_ATTR_HELPER
,
1824 ct_info
->helper
->name
))
1827 if (ct_info
->have_eventmask
&&
1828 nla_put_u32(skb
, OVS_CT_ATTR_EVENTMASK
, ct_info
->eventmask
))
1830 if (ct_info
->timeout
[0]) {
1831 if (nla_put_string(skb
, OVS_CT_ATTR_TIMEOUT
, ct_info
->timeout
))
1835 #if IS_ENABLED(CONFIG_NF_NAT)
1836 if (ct_info
->nat
&& !ovs_ct_nat_to_attr(ct_info
, skb
))
1839 nla_nest_end(skb
, start
);
1844 void ovs_ct_free_action(const struct nlattr
*a
)
1846 struct ovs_conntrack_info
*ct_info
= nla_data(a
);
1848 __ovs_ct_free_action(ct_info
);
1851 static void __ovs_ct_free_action(struct ovs_conntrack_info
*ct_info
)
1853 if (ct_info
->helper
) {
1854 #if IS_ENABLED(CONFIG_NF_NAT)
1856 nf_nat_helper_put(ct_info
->helper
);
1858 nf_conntrack_helper_put(ct_info
->helper
);
1861 if (ct_info
->timeout
[0])
1862 nf_ct_destroy_timeout(ct_info
->ct
);
1863 nf_ct_tmpl_free(ct_info
->ct
);
1867 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1868 static int ovs_ct_limit_init(struct net
*net
, struct ovs_net
*ovs_net
)
1872 ovs_net
->ct_limit_info
= kmalloc(sizeof(*ovs_net
->ct_limit_info
),
1874 if (!ovs_net
->ct_limit_info
)
1877 ovs_net
->ct_limit_info
->default_limit
= OVS_CT_LIMIT_DEFAULT
;
1878 ovs_net
->ct_limit_info
->limits
=
1879 kmalloc_array(CT_LIMIT_HASH_BUCKETS
, sizeof(struct hlist_head
),
1881 if (!ovs_net
->ct_limit_info
->limits
) {
1882 kfree(ovs_net
->ct_limit_info
);
1886 for (i
= 0; i
< CT_LIMIT_HASH_BUCKETS
; i
++)
1887 INIT_HLIST_HEAD(&ovs_net
->ct_limit_info
->limits
[i
]);
1889 ovs_net
->ct_limit_info
->data
=
1890 nf_conncount_init(net
, NFPROTO_INET
, sizeof(u32
));
1892 if (IS_ERR(ovs_net
->ct_limit_info
->data
)) {
1893 err
= PTR_ERR(ovs_net
->ct_limit_info
->data
);
1894 kfree(ovs_net
->ct_limit_info
->limits
);
1895 kfree(ovs_net
->ct_limit_info
);
1896 pr_err("openvswitch: failed to init nf_conncount %d\n", err
);
1902 static void ovs_ct_limit_exit(struct net
*net
, struct ovs_net
*ovs_net
)
1904 const struct ovs_ct_limit_info
*info
= ovs_net
->ct_limit_info
;
1907 nf_conncount_destroy(net
, NFPROTO_INET
, info
->data
);
1908 for (i
= 0; i
< CT_LIMIT_HASH_BUCKETS
; ++i
) {
1909 struct hlist_head
*head
= &info
->limits
[i
];
1910 struct ovs_ct_limit
*ct_limit
;
1912 hlist_for_each_entry_rcu(ct_limit
, head
, hlist_node
,
1913 lockdep_ovsl_is_held())
1914 kfree_rcu(ct_limit
, rcu
);
1916 kfree(info
->limits
);
1920 static struct sk_buff
*
1921 ovs_ct_limit_cmd_reply_start(struct genl_info
*info
, u8 cmd
,
1922 struct ovs_header
**ovs_reply_header
)
1924 struct ovs_header
*ovs_header
= info
->userhdr
;
1925 struct sk_buff
*skb
;
1927 skb
= genlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1929 return ERR_PTR(-ENOMEM
);
1931 *ovs_reply_header
= genlmsg_put(skb
, info
->snd_portid
,
1933 &dp_ct_limit_genl_family
, 0, cmd
);
1935 if (!*ovs_reply_header
) {
1937 return ERR_PTR(-EMSGSIZE
);
1939 (*ovs_reply_header
)->dp_ifindex
= ovs_header
->dp_ifindex
;
1944 static bool check_zone_id(int zone_id
, u16
*pzone
)
1946 if (zone_id
>= 0 && zone_id
<= 65535) {
1947 *pzone
= (u16
)zone_id
;
1953 static int ovs_ct_limit_set_zone_limit(struct nlattr
*nla_zone_limit
,
1954 struct ovs_ct_limit_info
*info
)
1956 struct ovs_zone_limit
*zone_limit
;
1960 rem
= NLA_ALIGN(nla_len(nla_zone_limit
));
1961 zone_limit
= (struct ovs_zone_limit
*)nla_data(nla_zone_limit
);
1963 while (rem
>= sizeof(*zone_limit
)) {
1964 if (unlikely(zone_limit
->zone_id
==
1965 OVS_ZONE_LIMIT_DEFAULT_ZONE
)) {
1967 info
->default_limit
= zone_limit
->limit
;
1969 } else if (unlikely(!check_zone_id(
1970 zone_limit
->zone_id
, &zone
))) {
1971 OVS_NLERR(true, "zone id is out of range");
1973 struct ovs_ct_limit
*ct_limit
;
1975 ct_limit
= kmalloc(sizeof(*ct_limit
), GFP_KERNEL
);
1979 ct_limit
->zone
= zone
;
1980 ct_limit
->limit
= zone_limit
->limit
;
1983 ct_limit_set(info
, ct_limit
);
1986 rem
-= NLA_ALIGN(sizeof(*zone_limit
));
1987 zone_limit
= (struct ovs_zone_limit
*)((u8
*)zone_limit
+
1988 NLA_ALIGN(sizeof(*zone_limit
)));
1992 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem
);
1997 static int ovs_ct_limit_del_zone_limit(struct nlattr
*nla_zone_limit
,
1998 struct ovs_ct_limit_info
*info
)
2000 struct ovs_zone_limit
*zone_limit
;
2004 rem
= NLA_ALIGN(nla_len(nla_zone_limit
));
2005 zone_limit
= (struct ovs_zone_limit
*)nla_data(nla_zone_limit
);
2007 while (rem
>= sizeof(*zone_limit
)) {
2008 if (unlikely(zone_limit
->zone_id
==
2009 OVS_ZONE_LIMIT_DEFAULT_ZONE
)) {
2011 info
->default_limit
= OVS_CT_LIMIT_DEFAULT
;
2013 } else if (unlikely(!check_zone_id(
2014 zone_limit
->zone_id
, &zone
))) {
2015 OVS_NLERR(true, "zone id is out of range");
2018 ct_limit_del(info
, zone
);
2021 rem
-= NLA_ALIGN(sizeof(*zone_limit
));
2022 zone_limit
= (struct ovs_zone_limit
*)((u8
*)zone_limit
+
2023 NLA_ALIGN(sizeof(*zone_limit
)));
2027 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem
);
2032 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info
*info
,
2033 struct sk_buff
*reply
)
2035 struct ovs_zone_limit zone_limit
;
2037 zone_limit
.zone_id
= OVS_ZONE_LIMIT_DEFAULT_ZONE
;
2038 zone_limit
.limit
= info
->default_limit
;
2040 return nla_put_nohdr(reply
, sizeof(zone_limit
), &zone_limit
);
2043 static int __ovs_ct_limit_get_zone_limit(struct net
*net
,
2044 struct nf_conncount_data
*data
,
2045 u16 zone_id
, u32 limit
,
2046 struct sk_buff
*reply
)
2048 struct nf_conntrack_zone ct_zone
;
2049 struct ovs_zone_limit zone_limit
;
2050 u32 conncount_key
= zone_id
;
2052 zone_limit
.zone_id
= zone_id
;
2053 zone_limit
.limit
= limit
;
2054 nf_ct_zone_init(&ct_zone
, zone_id
, NF_CT_DEFAULT_ZONE_DIR
, 0);
2056 zone_limit
.count
= nf_conncount_count(net
, data
, &conncount_key
, NULL
,
2058 return nla_put_nohdr(reply
, sizeof(zone_limit
), &zone_limit
);
2061 static int ovs_ct_limit_get_zone_limit(struct net
*net
,
2062 struct nlattr
*nla_zone_limit
,
2063 struct ovs_ct_limit_info
*info
,
2064 struct sk_buff
*reply
)
2066 struct ovs_zone_limit
*zone_limit
;
2071 rem
= NLA_ALIGN(nla_len(nla_zone_limit
));
2072 zone_limit
= (struct ovs_zone_limit
*)nla_data(nla_zone_limit
);
2074 while (rem
>= sizeof(*zone_limit
)) {
2075 if (unlikely(zone_limit
->zone_id
==
2076 OVS_ZONE_LIMIT_DEFAULT_ZONE
)) {
2077 err
= ovs_ct_limit_get_default_limit(info
, reply
);
2080 } else if (unlikely(!check_zone_id(zone_limit
->zone_id
,
2082 OVS_NLERR(true, "zone id is out of range");
2085 limit
= ct_limit_get(info
, zone
);
2088 err
= __ovs_ct_limit_get_zone_limit(
2089 net
, info
->data
, zone
, limit
, reply
);
2093 rem
-= NLA_ALIGN(sizeof(*zone_limit
));
2094 zone_limit
= (struct ovs_zone_limit
*)((u8
*)zone_limit
+
2095 NLA_ALIGN(sizeof(*zone_limit
)));
2099 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem
);
2104 static int ovs_ct_limit_get_all_zone_limit(struct net
*net
,
2105 struct ovs_ct_limit_info
*info
,
2106 struct sk_buff
*reply
)
2108 struct ovs_ct_limit
*ct_limit
;
2109 struct hlist_head
*head
;
2112 err
= ovs_ct_limit_get_default_limit(info
, reply
);
2117 for (i
= 0; i
< CT_LIMIT_HASH_BUCKETS
; ++i
) {
2118 head
= &info
->limits
[i
];
2119 hlist_for_each_entry_rcu(ct_limit
, head
, hlist_node
) {
2120 err
= __ovs_ct_limit_get_zone_limit(net
, info
->data
,
2121 ct_limit
->zone
, ct_limit
->limit
, reply
);
2132 static int ovs_ct_limit_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
2134 struct nlattr
**a
= info
->attrs
;
2135 struct sk_buff
*reply
;
2136 struct ovs_header
*ovs_reply_header
;
2137 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
2138 struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
2141 reply
= ovs_ct_limit_cmd_reply_start(info
, OVS_CT_LIMIT_CMD_SET
,
2144 return PTR_ERR(reply
);
2146 if (!a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
2151 err
= ovs_ct_limit_set_zone_limit(a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
],
2156 static_branch_enable(&ovs_ct_limit_enabled
);
2158 genlmsg_end(reply
, ovs_reply_header
);
2159 return genlmsg_reply(reply
, info
);
2166 static int ovs_ct_limit_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
2168 struct nlattr
**a
= info
->attrs
;
2169 struct sk_buff
*reply
;
2170 struct ovs_header
*ovs_reply_header
;
2171 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
2172 struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
2175 reply
= ovs_ct_limit_cmd_reply_start(info
, OVS_CT_LIMIT_CMD_DEL
,
2178 return PTR_ERR(reply
);
2180 if (!a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
2185 err
= ovs_ct_limit_del_zone_limit(a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
],
2190 genlmsg_end(reply
, ovs_reply_header
);
2191 return genlmsg_reply(reply
, info
);
2198 static int ovs_ct_limit_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
2200 struct nlattr
**a
= info
->attrs
;
2201 struct nlattr
*nla_reply
;
2202 struct sk_buff
*reply
;
2203 struct ovs_header
*ovs_reply_header
;
2204 struct net
*net
= sock_net(skb
->sk
);
2205 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2206 struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
2209 reply
= ovs_ct_limit_cmd_reply_start(info
, OVS_CT_LIMIT_CMD_GET
,
2212 return PTR_ERR(reply
);
2214 nla_reply
= nla_nest_start_noflag(reply
, OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
2220 if (a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
2221 err
= ovs_ct_limit_get_zone_limit(
2222 net
, a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
], ct_limit_info
,
2227 err
= ovs_ct_limit_get_all_zone_limit(net
, ct_limit_info
,
2233 nla_nest_end(reply
, nla_reply
);
2234 genlmsg_end(reply
, ovs_reply_header
);
2235 return genlmsg_reply(reply
, info
);
2242 static const struct genl_small_ops ct_limit_genl_ops
[] = {
2243 { .cmd
= OVS_CT_LIMIT_CMD_SET
,
2244 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2245 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN
2247 .doit
= ovs_ct_limit_cmd_set
,
2249 { .cmd
= OVS_CT_LIMIT_CMD_DEL
,
2250 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2251 .flags
= GENL_ADMIN_PERM
, /* Requires CAP_NET_ADMIN
2253 .doit
= ovs_ct_limit_cmd_del
,
2255 { .cmd
= OVS_CT_LIMIT_CMD_GET
,
2256 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2257 .flags
= 0, /* OK for unprivileged users. */
2258 .doit
= ovs_ct_limit_cmd_get
,
2262 static const struct genl_multicast_group ovs_ct_limit_multicast_group
= {
2263 .name
= OVS_CT_LIMIT_MCGROUP
,
2266 struct genl_family dp_ct_limit_genl_family __ro_after_init
= {
2267 .hdrsize
= sizeof(struct ovs_header
),
2268 .name
= OVS_CT_LIMIT_FAMILY
,
2269 .version
= OVS_CT_LIMIT_VERSION
,
2270 .maxattr
= OVS_CT_LIMIT_ATTR_MAX
,
2271 .policy
= ct_limit_policy
,
2273 .parallel_ops
= true,
2274 .small_ops
= ct_limit_genl_ops
,
2275 .n_small_ops
= ARRAY_SIZE(ct_limit_genl_ops
),
2276 .mcgrps
= &ovs_ct_limit_multicast_group
,
2278 .module
= THIS_MODULE
,
2282 int ovs_ct_init(struct net
*net
)
2284 unsigned int n_bits
= sizeof(struct ovs_key_ct_labels
) * BITS_PER_BYTE
;
2285 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2287 if (nf_connlabels_get(net
, n_bits
- 1)) {
2288 ovs_net
->xt_label
= false;
2289 OVS_NLERR(true, "Failed to set connlabel length");
2291 ovs_net
->xt_label
= true;
2294 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2295 return ovs_ct_limit_init(net
, ovs_net
);
2301 void ovs_ct_exit(struct net
*net
)
2303 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2305 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2306 ovs_ct_limit_exit(net
, ovs_net
);
2309 if (ovs_net
->xt_label
)
2310 nf_connlabels_put(net
);