1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015 Nicira, Inc.
6 #include <linux/module.h>
7 #include <linux/openvswitch.h>
10 #include <linux/sctp.h>
11 #include <linux/static_key.h>
12 #include <linux/string_helpers.h>
14 #include <net/genetlink.h>
15 #include <net/netfilter/nf_conntrack_core.h>
16 #include <net/netfilter/nf_conntrack_count.h>
17 #include <net/netfilter/nf_conntrack_helper.h>
18 #include <net/netfilter/nf_conntrack_labels.h>
19 #include <net/netfilter/nf_conntrack_seqadj.h>
20 #include <net/netfilter/nf_conntrack_timeout.h>
21 #include <net/netfilter/nf_conntrack_zones.h>
22 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
23 #include <net/ipv6_frag.h>
25 #if IS_ENABLED(CONFIG_NF_NAT)
26 #include <net/netfilter/nf_nat.h>
29 #include <net/netfilter/nf_conntrack_act_ct.h>
33 #include "conntrack.h"
35 #include "flow_netlink.h"
37 struct ovs_ct_len_tbl
{
42 /* Metadata mark for masked write to conntrack mark */
48 /* Metadata label for masked write to conntrack label. */
50 struct ovs_key_ct_labels value
;
51 struct ovs_key_ct_labels mask
;
55 OVS_CT_NAT
= 1 << 0, /* NAT for committed connections only. */
56 OVS_CT_SRC_NAT
= 1 << 1, /* Source NAT for NEW connections. */
57 OVS_CT_DST_NAT
= 1 << 2, /* Destination NAT for NEW connections. */
60 /* Conntrack action context for execution. */
61 struct ovs_conntrack_info
{
62 struct nf_conntrack_helper
*helper
;
63 struct nf_conntrack_zone zone
;
66 u8 nat
: 3; /* enum ovs_ct_nat */
68 u8 have_eventmask
: 1;
70 u32 eventmask
; /* Mask of 1 << IPCT_*. */
72 struct md_labels labels
;
73 char timeout
[CTNL_TIMEOUT_NAME_MAX
];
74 struct nf_ct_timeout
*nf_ct_timeout
;
75 #if IS_ENABLED(CONFIG_NF_NAT)
76 struct nf_nat_range2 range
; /* Only present for SRC NAT and DST NAT. */
80 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
81 #define OVS_CT_LIMIT_UNLIMITED 0
82 #define OVS_CT_LIMIT_DEFAULT OVS_CT_LIMIT_UNLIMITED
83 #define CT_LIMIT_HASH_BUCKETS 512
84 static DEFINE_STATIC_KEY_FALSE(ovs_ct_limit_enabled
);
87 /* Elements in ovs_ct_limit_info->limits hash table */
88 struct hlist_node hlist_node
;
94 struct ovs_ct_limit_info
{
96 struct hlist_head
*limits
;
97 struct nf_conncount_data
*data
;
100 static const struct nla_policy ct_limit_policy
[OVS_CT_LIMIT_ATTR_MAX
+ 1] = {
101 [OVS_CT_LIMIT_ATTR_ZONE_LIMIT
] = { .type
= NLA_NESTED
, },
105 static bool labels_nonzero(const struct ovs_key_ct_labels
*labels
);
107 static void __ovs_ct_free_action(struct ovs_conntrack_info
*ct_info
);
109 static u16
key_to_nfproto(const struct sw_flow_key
*key
)
111 switch (ntohs(key
->eth
.type
)) {
117 return NFPROTO_UNSPEC
;
121 /* Map SKB connection state into the values used by flow definition. */
122 static u8
ovs_ct_get_state(enum ip_conntrack_info ctinfo
)
124 u8 ct_state
= OVS_CS_F_TRACKED
;
127 case IP_CT_ESTABLISHED_REPLY
:
128 case IP_CT_RELATED_REPLY
:
129 ct_state
|= OVS_CS_F_REPLY_DIR
;
136 case IP_CT_ESTABLISHED
:
137 case IP_CT_ESTABLISHED_REPLY
:
138 ct_state
|= OVS_CS_F_ESTABLISHED
;
141 case IP_CT_RELATED_REPLY
:
142 ct_state
|= OVS_CS_F_RELATED
;
145 ct_state
|= OVS_CS_F_NEW
;
154 static u32
ovs_ct_get_mark(const struct nf_conn
*ct
)
156 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
157 return ct
? READ_ONCE(ct
->mark
) : 0;
163 /* Guard against conntrack labels max size shrinking below 128 bits. */
164 #if NF_CT_LABELS_MAX_SIZE < 16
165 #error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
168 static void ovs_ct_get_labels(const struct nf_conn
*ct
,
169 struct ovs_key_ct_labels
*labels
)
171 struct nf_conn_labels
*cl
= NULL
;
174 if (ct
->master
&& !nf_ct_is_confirmed(ct
))
176 cl
= nf_ct_labels_find(ct
);
179 memcpy(labels
, cl
->bits
, OVS_CT_LABELS_LEN
);
181 memset(labels
, 0, OVS_CT_LABELS_LEN
);
184 static void __ovs_ct_update_key_orig_tp(struct sw_flow_key
*key
,
185 const struct nf_conntrack_tuple
*orig
,
188 key
->ct_orig_proto
= orig
->dst
.protonum
;
189 if (orig
->dst
.protonum
== icmp_proto
) {
190 key
->ct
.orig_tp
.src
= htons(orig
->dst
.u
.icmp
.type
);
191 key
->ct
.orig_tp
.dst
= htons(orig
->dst
.u
.icmp
.code
);
193 key
->ct
.orig_tp
.src
= orig
->src
.u
.all
;
194 key
->ct
.orig_tp
.dst
= orig
->dst
.u
.all
;
198 static void __ovs_ct_update_key(struct sw_flow_key
*key
, u8 state
,
199 const struct nf_conntrack_zone
*zone
,
200 const struct nf_conn
*ct
)
202 key
->ct_state
= state
;
203 key
->ct_zone
= zone
->id
;
204 key
->ct
.mark
= ovs_ct_get_mark(ct
);
205 ovs_ct_get_labels(ct
, &key
->ct
.labels
);
208 const struct nf_conntrack_tuple
*orig
;
210 /* Use the master if we have one. */
213 orig
= &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
;
215 /* IP version must match with the master connection. */
216 if (key
->eth
.type
== htons(ETH_P_IP
) &&
217 nf_ct_l3num(ct
) == NFPROTO_IPV4
) {
218 key
->ipv4
.ct_orig
.src
= orig
->src
.u3
.ip
;
219 key
->ipv4
.ct_orig
.dst
= orig
->dst
.u3
.ip
;
220 __ovs_ct_update_key_orig_tp(key
, orig
, IPPROTO_ICMP
);
222 } else if (key
->eth
.type
== htons(ETH_P_IPV6
) &&
223 !sw_flow_key_is_nd(key
) &&
224 nf_ct_l3num(ct
) == NFPROTO_IPV6
) {
225 key
->ipv6
.ct_orig
.src
= orig
->src
.u3
.in6
;
226 key
->ipv6
.ct_orig
.dst
= orig
->dst
.u3
.in6
;
227 __ovs_ct_update_key_orig_tp(key
, orig
, NEXTHDR_ICMP
);
231 /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
232 * original direction key fields.
234 key
->ct_orig_proto
= 0;
237 /* Update 'key' based on skb->_nfct. If 'post_ct' is true, then OVS has
238 * previously sent the packet to conntrack via the ct action. If
239 * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
240 * initialized from the connection status.
242 static void ovs_ct_update_key(const struct sk_buff
*skb
,
243 const struct ovs_conntrack_info
*info
,
244 struct sw_flow_key
*key
, bool post_ct
,
247 const struct nf_conntrack_zone
*zone
= &nf_ct_zone_dflt
;
248 enum ip_conntrack_info ctinfo
;
252 ct
= nf_ct_get(skb
, &ctinfo
);
254 state
= ovs_ct_get_state(ctinfo
);
255 /* All unconfirmed entries are NEW connections. */
256 if (!nf_ct_is_confirmed(ct
))
257 state
|= OVS_CS_F_NEW
;
258 /* OVS persists the related flag for the duration of the
262 state
|= OVS_CS_F_RELATED
;
263 if (keep_nat_flags
) {
264 state
|= key
->ct_state
& OVS_CS_F_NAT_MASK
;
266 if (ct
->status
& IPS_SRC_NAT
)
267 state
|= OVS_CS_F_SRC_NAT
;
268 if (ct
->status
& IPS_DST_NAT
)
269 state
|= OVS_CS_F_DST_NAT
;
271 zone
= nf_ct_zone(ct
);
272 } else if (post_ct
) {
273 state
= OVS_CS_F_TRACKED
| OVS_CS_F_INVALID
;
277 __ovs_ct_update_key(key
, state
, zone
, ct
);
280 /* This is called to initialize CT key fields possibly coming in from the local
283 void ovs_ct_fill_key(const struct sk_buff
*skb
,
284 struct sw_flow_key
*key
,
287 ovs_ct_update_key(skb
, NULL
, key
, post_ct
, false);
290 int ovs_ct_put_key(const struct sw_flow_key
*swkey
,
291 const struct sw_flow_key
*output
, struct sk_buff
*skb
)
293 if (nla_put_u32(skb
, OVS_KEY_ATTR_CT_STATE
, output
->ct_state
))
296 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
297 nla_put_u16(skb
, OVS_KEY_ATTR_CT_ZONE
, output
->ct_zone
))
300 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) &&
301 nla_put_u32(skb
, OVS_KEY_ATTR_CT_MARK
, output
->ct
.mark
))
304 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
305 nla_put(skb
, OVS_KEY_ATTR_CT_LABELS
, sizeof(output
->ct
.labels
),
309 if (swkey
->ct_orig_proto
) {
310 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
311 struct ovs_key_ct_tuple_ipv4 orig
;
313 memset(&orig
, 0, sizeof(orig
));
314 orig
.ipv4_src
= output
->ipv4
.ct_orig
.src
;
315 orig
.ipv4_dst
= output
->ipv4
.ct_orig
.dst
;
316 orig
.src_port
= output
->ct
.orig_tp
.src
;
317 orig
.dst_port
= output
->ct
.orig_tp
.dst
;
318 orig
.ipv4_proto
= output
->ct_orig_proto
;
320 if (nla_put(skb
, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
,
321 sizeof(orig
), &orig
))
323 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
324 struct ovs_key_ct_tuple_ipv6 orig
;
326 memset(&orig
, 0, sizeof(orig
));
327 memcpy(orig
.ipv6_src
, output
->ipv6
.ct_orig
.src
.s6_addr32
,
328 sizeof(orig
.ipv6_src
));
329 memcpy(orig
.ipv6_dst
, output
->ipv6
.ct_orig
.dst
.s6_addr32
,
330 sizeof(orig
.ipv6_dst
));
331 orig
.src_port
= output
->ct
.orig_tp
.src
;
332 orig
.dst_port
= output
->ct
.orig_tp
.dst
;
333 orig
.ipv6_proto
= output
->ct_orig_proto
;
335 if (nla_put(skb
, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
,
336 sizeof(orig
), &orig
))
344 static int ovs_ct_set_mark(struct nf_conn
*ct
, struct sw_flow_key
*key
,
345 u32 ct_mark
, u32 mask
)
347 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
350 new_mark
= ct_mark
| (READ_ONCE(ct
->mark
) & ~(mask
));
351 if (READ_ONCE(ct
->mark
) != new_mark
) {
352 WRITE_ONCE(ct
->mark
, new_mark
);
353 if (nf_ct_is_confirmed(ct
))
354 nf_conntrack_event_cache(IPCT_MARK
, ct
);
355 key
->ct
.mark
= new_mark
;
364 static struct nf_conn_labels
*ovs_ct_get_conn_labels(struct nf_conn
*ct
)
366 struct nf_conn_labels
*cl
;
368 cl
= nf_ct_labels_find(ct
);
370 nf_ct_labels_ext_add(ct
);
371 cl
= nf_ct_labels_find(ct
);
377 /* Initialize labels for a new, yet to be committed conntrack entry. Note that
378 * since the new connection is not yet confirmed, and thus no-one else has
379 * access to it's labels, we simply write them over.
381 static int ovs_ct_init_labels(struct nf_conn
*ct
, struct sw_flow_key
*key
,
382 const struct ovs_key_ct_labels
*labels
,
383 const struct ovs_key_ct_labels
*mask
)
385 struct nf_conn_labels
*cl
, *master_cl
;
386 bool have_mask
= labels_nonzero(mask
);
388 /* Inherit master's labels to the related connection? */
389 master_cl
= ct
->master
? nf_ct_labels_find(ct
->master
) : NULL
;
391 if (!master_cl
&& !have_mask
)
392 return 0; /* Nothing to do. */
394 cl
= ovs_ct_get_conn_labels(ct
);
398 /* Inherit the master's labels, if any. */
403 u32
*dst
= (u32
*)cl
->bits
;
406 for (i
= 0; i
< OVS_CT_LABELS_LEN_32
; i
++)
407 dst
[i
] = (dst
[i
] & ~mask
->ct_labels_32
[i
]) |
408 (labels
->ct_labels_32
[i
]
409 & mask
->ct_labels_32
[i
]);
412 /* Labels are included in the IPCTNL_MSG_CT_NEW event only if the
413 * IPCT_LABEL bit is set in the event cache.
415 nf_conntrack_event_cache(IPCT_LABEL
, ct
);
417 memcpy(&key
->ct
.labels
, cl
->bits
, OVS_CT_LABELS_LEN
);
422 static int ovs_ct_set_labels(struct nf_conn
*ct
, struct sw_flow_key
*key
,
423 const struct ovs_key_ct_labels
*labels
,
424 const struct ovs_key_ct_labels
*mask
)
426 struct nf_conn_labels
*cl
;
429 cl
= ovs_ct_get_conn_labels(ct
);
433 err
= nf_connlabels_replace(ct
, labels
->ct_labels_32
,
435 OVS_CT_LABELS_LEN_32
);
439 memcpy(&key
->ct
.labels
, cl
->bits
, OVS_CT_LABELS_LEN
);
444 static int ovs_ct_handle_fragments(struct net
*net
, struct sw_flow_key
*key
,
445 u16 zone
, int family
, struct sk_buff
*skb
)
447 struct ovs_skb_cb ovs_cb
= *OVS_CB(skb
);
450 err
= nf_ct_handle_fragments(net
, skb
, zone
, family
, &key
->ip
.proto
, &ovs_cb
.mru
);
454 /* The key extracted from the fragment that completed this datagram
455 * likely didn't have an L4 header, so regenerate it.
457 ovs_flow_key_update_l3l4(skb
, key
);
458 key
->ip
.frag
= OVS_FRAG_TYPE_NONE
;
459 *OVS_CB(skb
) = ovs_cb
;
464 /* This replicates logic from nf_conntrack_core.c that is not exported. */
465 static enum ip_conntrack_info
466 ovs_ct_get_info(const struct nf_conntrack_tuple_hash
*h
)
468 const struct nf_conn
*ct
= nf_ct_tuplehash_to_ctrack(h
);
470 if (NF_CT_DIRECTION(h
) == IP_CT_DIR_REPLY
)
471 return IP_CT_ESTABLISHED_REPLY
;
472 /* Once we've had two way comms, always ESTABLISHED. */
473 if (test_bit(IPS_SEEN_REPLY_BIT
, &ct
->status
))
474 return IP_CT_ESTABLISHED
;
475 if (test_bit(IPS_EXPECTED_BIT
, &ct
->status
))
476 return IP_CT_RELATED
;
480 /* Find an existing connection which this packet belongs to without
481 * re-attributing statistics or modifying the connection state. This allows an
482 * skb->_nfct lost due to an upcall to be recovered during actions execution.
484 * Must be called with rcu_read_lock.
486 * On success, populates skb->_nfct and returns the connection. Returns NULL
487 * if there is no existing entry.
489 static struct nf_conn
*
490 ovs_ct_find_existing(struct net
*net
, const struct nf_conntrack_zone
*zone
,
491 u8 l3num
, struct sk_buff
*skb
, bool natted
)
493 struct nf_conntrack_tuple tuple
;
494 struct nf_conntrack_tuple_hash
*h
;
497 if (!nf_ct_get_tuplepr(skb
, skb_network_offset(skb
), l3num
,
499 pr_debug("ovs_ct_find_existing: Can't get tuple\n");
503 /* Must invert the tuple if skb has been transformed by NAT. */
505 struct nf_conntrack_tuple inverse
;
507 if (!nf_ct_invert_tuple(&inverse
, &tuple
)) {
508 pr_debug("ovs_ct_find_existing: Inversion failed!\n");
514 /* look for tuple match */
515 h
= nf_conntrack_find_get(net
, zone
, &tuple
);
517 return NULL
; /* Not found. */
519 ct
= nf_ct_tuplehash_to_ctrack(h
);
521 /* Inverted packet tuple matches the reverse direction conntrack tuple,
522 * select the other tuplehash to get the right 'ctinfo' bits for this
526 h
= &ct
->tuplehash
[!h
->tuple
.dst
.dir
];
528 nf_ct_set(skb
, ct
, ovs_ct_get_info(h
));
533 struct nf_conn
*ovs_ct_executed(struct net
*net
,
534 const struct sw_flow_key
*key
,
535 const struct ovs_conntrack_info
*info
,
539 struct nf_conn
*ct
= NULL
;
541 /* If no ct, check if we have evidence that an existing conntrack entry
542 * might be found for this skb. This happens when we lose a skb->_nfct
543 * due to an upcall, or if the direction is being forced. If the
544 * connection was not confirmed, it is not cached and needs to be run
545 * through conntrack again.
547 *ct_executed
= (key
->ct_state
& OVS_CS_F_TRACKED
) &&
548 !(key
->ct_state
& OVS_CS_F_INVALID
) &&
549 (key
->ct_zone
== info
->zone
.id
);
551 if (*ct_executed
|| (!key
->ct_state
&& info
->force
)) {
552 ct
= ovs_ct_find_existing(net
, &info
->zone
, info
->family
, skb
,
560 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
561 static bool skb_nfct_cached(struct net
*net
,
562 const struct sw_flow_key
*key
,
563 const struct ovs_conntrack_info
*info
,
566 enum ip_conntrack_info ctinfo
;
568 bool ct_executed
= true;
570 ct
= nf_ct_get(skb
, &ctinfo
);
572 ct
= ovs_ct_executed(net
, key
, info
, skb
, &ct_executed
);
575 nf_ct_get(skb
, &ctinfo
);
579 if (!net_eq(net
, read_pnet(&ct
->ct_net
)))
581 if (!nf_ct_zone_equal_any(info
->ct
, nf_ct_zone(ct
)))
584 struct nf_conn_help
*help
;
586 help
= nf_ct_ext_find(ct
, NF_CT_EXT_HELPER
);
587 if (help
&& rcu_access_pointer(help
->helper
) != info
->helper
)
590 if (info
->nf_ct_timeout
) {
591 struct nf_conn_timeout
*timeout_ext
;
593 timeout_ext
= nf_ct_timeout_find(ct
);
594 if (!timeout_ext
|| info
->nf_ct_timeout
!=
595 rcu_dereference(timeout_ext
->timeout
))
598 /* Force conntrack entry direction to the current packet? */
599 if (info
->force
&& CTINFO2DIR(ctinfo
) != IP_CT_DIR_ORIGINAL
) {
600 /* Delete the conntrack entry if confirmed, else just release
603 if (nf_ct_is_confirmed(ct
))
604 nf_ct_delete(ct
, 0, 0);
607 nf_ct_set(skb
, NULL
, 0);
614 #if IS_ENABLED(CONFIG_NF_NAT)
615 static void ovs_nat_update_key(struct sw_flow_key
*key
,
616 const struct sk_buff
*skb
,
617 enum nf_nat_manip_type maniptype
)
619 if (maniptype
== NF_NAT_MANIP_SRC
) {
622 key
->ct_state
|= OVS_CS_F_SRC_NAT
;
623 if (key
->eth
.type
== htons(ETH_P_IP
))
624 key
->ipv4
.addr
.src
= ip_hdr(skb
)->saddr
;
625 else if (key
->eth
.type
== htons(ETH_P_IPV6
))
626 memcpy(&key
->ipv6
.addr
.src
, &ipv6_hdr(skb
)->saddr
,
627 sizeof(key
->ipv6
.addr
.src
));
631 if (key
->ip
.proto
== IPPROTO_UDP
)
632 src
= udp_hdr(skb
)->source
;
633 else if (key
->ip
.proto
== IPPROTO_TCP
)
634 src
= tcp_hdr(skb
)->source
;
635 else if (key
->ip
.proto
== IPPROTO_SCTP
)
636 src
= sctp_hdr(skb
)->source
;
644 key
->ct_state
|= OVS_CS_F_DST_NAT
;
645 if (key
->eth
.type
== htons(ETH_P_IP
))
646 key
->ipv4
.addr
.dst
= ip_hdr(skb
)->daddr
;
647 else if (key
->eth
.type
== htons(ETH_P_IPV6
))
648 memcpy(&key
->ipv6
.addr
.dst
, &ipv6_hdr(skb
)->daddr
,
649 sizeof(key
->ipv6
.addr
.dst
));
653 if (key
->ip
.proto
== IPPROTO_UDP
)
654 dst
= udp_hdr(skb
)->dest
;
655 else if (key
->ip
.proto
== IPPROTO_TCP
)
656 dst
= tcp_hdr(skb
)->dest
;
657 else if (key
->ip
.proto
== IPPROTO_SCTP
)
658 dst
= sctp_hdr(skb
)->dest
;
666 /* Returns NF_DROP if the packet should be dropped, NF_ACCEPT otherwise. */
667 static int ovs_ct_nat(struct net
*net
, struct sw_flow_key
*key
,
668 const struct ovs_conntrack_info
*info
,
669 struct sk_buff
*skb
, struct nf_conn
*ct
,
670 enum ip_conntrack_info ctinfo
)
674 if (!(info
->nat
& OVS_CT_NAT
))
676 if (info
->nat
& OVS_CT_SRC_NAT
)
677 action
|= BIT(NF_NAT_MANIP_SRC
);
678 if (info
->nat
& OVS_CT_DST_NAT
)
679 action
|= BIT(NF_NAT_MANIP_DST
);
681 err
= nf_ct_nat(skb
, ct
, ctinfo
, &action
, &info
->range
, info
->commit
);
682 if (err
!= NF_ACCEPT
)
685 if (action
& BIT(NF_NAT_MANIP_SRC
))
686 ovs_nat_update_key(key
, skb
, NF_NAT_MANIP_SRC
);
687 if (action
& BIT(NF_NAT_MANIP_DST
))
688 ovs_nat_update_key(key
, skb
, NF_NAT_MANIP_DST
);
692 #else /* !CONFIG_NF_NAT */
693 static int ovs_ct_nat(struct net
*net
, struct sw_flow_key
*key
,
694 const struct ovs_conntrack_info
*info
,
695 struct sk_buff
*skb
, struct nf_conn
*ct
,
696 enum ip_conntrack_info ctinfo
)
702 static int verdict_to_errno(unsigned int verdict
)
704 switch (verdict
& NF_VERDICT_MASK
) {
718 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
719 * not done already. Update key with new CT state after passing the packet
721 * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
722 * set to NULL and 0 will be returned.
724 static int __ovs_ct_lookup(struct net
*net
, struct sw_flow_key
*key
,
725 const struct ovs_conntrack_info
*info
,
728 /* If we are recirculating packets to match on conntrack fields and
729 * committing with a separate conntrack action, then we don't need to
730 * actually run the packet through conntrack twice unless it's for a
733 bool cached
= skb_nfct_cached(net
, key
, info
, skb
);
734 enum ip_conntrack_info ctinfo
;
738 struct nf_hook_state state
= {
739 .hook
= NF_INET_PRE_ROUTING
,
743 struct nf_conn
*tmpl
= info
->ct
;
746 /* Associate skb with specified zone. */
748 ct
= nf_ct_get(skb
, &ctinfo
);
750 nf_conntrack_get(&tmpl
->ct_general
);
751 nf_ct_set(skb
, tmpl
, IP_CT_NEW
);
754 err
= nf_conntrack_in(skb
, &state
);
755 if (err
!= NF_ACCEPT
)
756 return verdict_to_errno(err
);
758 /* Clear CT state NAT flags to mark that we have not yet done
759 * NAT after the nf_conntrack_in() call. We can actually clear
760 * the whole state, as it will be re-initialized below.
764 /* Update the key, but keep the NAT flags. */
765 ovs_ct_update_key(skb
, info
, key
, true, true);
768 ct
= nf_ct_get(skb
, &ctinfo
);
770 bool add_helper
= false;
772 /* Packets starting a new connection must be NATted before the
773 * helper, so that the helper knows about the NAT. We enforce
774 * this by delaying both NAT and helper calls for unconfirmed
775 * connections until the committing CT action. For later
776 * packets NAT and Helper may be called in either order.
778 * NAT will be done only if the CT action has NAT, and only
779 * once per packet (per zone), as guarded by the NAT bits in
782 if (info
->nat
&& !(key
->ct_state
& OVS_CS_F_NAT_MASK
) &&
783 (nf_ct_is_confirmed(ct
) || info
->commit
)) {
784 int err
= ovs_ct_nat(net
, key
, info
, skb
, ct
, ctinfo
);
786 err
= verdict_to_errno(err
);
791 /* Userspace may decide to perform a ct lookup without a helper
792 * specified followed by a (recirculate and) commit with one,
793 * or attach a helper in a later commit. Therefore, for
794 * connections which we will commit, we may need to attach
797 if (!nf_ct_is_confirmed(ct
) && info
->commit
&&
798 info
->helper
&& !nfct_help(ct
)) {
799 int err
= __nf_ct_try_assign_helper(ct
, info
->ct
,
805 /* helper installed, add seqadj if NAT is required */
806 if (info
->nat
&& !nfct_seqadj(ct
)) {
807 if (!nfct_seqadj_ext_add(ct
))
812 /* Call the helper only if:
813 * - nf_conntrack_in() was executed above ("!cached") or a
814 * helper was just attached ("add_helper") for a confirmed
816 * - When committing an unconfirmed connection.
818 if ((nf_ct_is_confirmed(ct
) ? !cached
|| add_helper
:
820 int err
= nf_ct_helper(skb
, ct
, ctinfo
, info
->family
);
822 err
= verdict_to_errno(err
);
827 if (nf_ct_protonum(ct
) == IPPROTO_TCP
&&
828 nf_ct_is_confirmed(ct
) && nf_conntrack_tcp_established(ct
)) {
829 /* Be liberal for tcp packets so that out-of-window
830 * packets are not marked invalid.
832 nf_ct_set_tcp_be_liberal(ct
);
835 nf_conn_act_ct_ext_fill(skb
, ct
, ctinfo
);
841 /* Lookup connection and read fields into key. */
842 static int ovs_ct_lookup(struct net
*net
, struct sw_flow_key
*key
,
843 const struct ovs_conntrack_info
*info
,
849 err
= __ovs_ct_lookup(net
, key
, info
, skb
);
853 ct
= (struct nf_conn
*)skb_nfct(skb
);
855 nf_ct_deliver_cached_events(ct
);
860 static bool labels_nonzero(const struct ovs_key_ct_labels
*labels
)
864 for (i
= 0; i
< OVS_CT_LABELS_LEN_32
; i
++)
865 if (labels
->ct_labels_32
[i
])
871 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
872 static struct hlist_head
*ct_limit_hash_bucket(
873 const struct ovs_ct_limit_info
*info
, u16 zone
)
875 return &info
->limits
[zone
& (CT_LIMIT_HASH_BUCKETS
- 1)];
878 /* Call with ovs_mutex */
879 static void ct_limit_set(const struct ovs_ct_limit_info
*info
,
880 struct ovs_ct_limit
*new_ct_limit
)
882 struct ovs_ct_limit
*ct_limit
;
883 struct hlist_head
*head
;
885 head
= ct_limit_hash_bucket(info
, new_ct_limit
->zone
);
886 hlist_for_each_entry_rcu(ct_limit
, head
, hlist_node
) {
887 if (ct_limit
->zone
== new_ct_limit
->zone
) {
888 hlist_replace_rcu(&ct_limit
->hlist_node
,
889 &new_ct_limit
->hlist_node
);
890 kfree_rcu(ct_limit
, rcu
);
895 hlist_add_head_rcu(&new_ct_limit
->hlist_node
, head
);
898 /* Call with ovs_mutex */
899 static void ct_limit_del(const struct ovs_ct_limit_info
*info
, u16 zone
)
901 struct ovs_ct_limit
*ct_limit
;
902 struct hlist_head
*head
;
903 struct hlist_node
*n
;
905 head
= ct_limit_hash_bucket(info
, zone
);
906 hlist_for_each_entry_safe(ct_limit
, n
, head
, hlist_node
) {
907 if (ct_limit
->zone
== zone
) {
908 hlist_del_rcu(&ct_limit
->hlist_node
);
909 kfree_rcu(ct_limit
, rcu
);
915 /* Call with RCU read lock */
916 static u32
ct_limit_get(const struct ovs_ct_limit_info
*info
, u16 zone
)
918 struct ovs_ct_limit
*ct_limit
;
919 struct hlist_head
*head
;
921 head
= ct_limit_hash_bucket(info
, zone
);
922 hlist_for_each_entry_rcu(ct_limit
, head
, hlist_node
) {
923 if (ct_limit
->zone
== zone
)
924 return ct_limit
->limit
;
927 return info
->default_limit
;
930 static int ovs_ct_check_limit(struct net
*net
,
931 const struct ovs_conntrack_info
*info
,
932 const struct nf_conntrack_tuple
*tuple
)
934 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
935 const struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
936 u32 per_zone_limit
, connections
;
939 conncount_key
= info
->zone
.id
;
941 per_zone_limit
= ct_limit_get(ct_limit_info
, info
->zone
.id
);
942 if (per_zone_limit
== OVS_CT_LIMIT_UNLIMITED
)
945 connections
= nf_conncount_count(net
, ct_limit_info
->data
,
946 &conncount_key
, tuple
, &info
->zone
);
947 if (connections
> per_zone_limit
)
954 /* Lookup connection and confirm if unconfirmed. */
955 static int ovs_ct_commit(struct net
*net
, struct sw_flow_key
*key
,
956 const struct ovs_conntrack_info
*info
,
959 enum ip_conntrack_info ctinfo
;
963 err
= __ovs_ct_lookup(net
, key
, info
, skb
);
967 /* The connection could be invalid, in which case this is a no-op.*/
968 ct
= nf_ct_get(skb
, &ctinfo
);
972 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
973 if (static_branch_unlikely(&ovs_ct_limit_enabled
)) {
974 if (!nf_ct_is_confirmed(ct
)) {
975 err
= ovs_ct_check_limit(net
, info
,
976 &ct
->tuplehash
[IP_CT_DIR_ORIGINAL
].tuple
);
978 net_warn_ratelimited("openvswitch: zone: %u "
979 "exceeds conntrack limit\n",
987 /* Set the conntrack event mask if given. NEW and DELETE events have
988 * their own groups, but the NFNLGRP_CONNTRACK_UPDATE group listener
989 * typically would receive many kinds of updates. Setting the event
990 * mask allows those events to be filtered. The set event mask will
991 * remain in effect for the lifetime of the connection unless changed
992 * by a further CT action with both the commit flag and the eventmask
994 if (info
->have_eventmask
) {
995 struct nf_conntrack_ecache
*cache
= nf_ct_ecache_find(ct
);
998 cache
->ctmask
= info
->eventmask
;
1001 /* Apply changes before confirming the connection so that the initial
1002 * conntrack NEW netlink event carries the values given in the CT
1005 if (info
->mark
.mask
) {
1006 err
= ovs_ct_set_mark(ct
, key
, info
->mark
.value
,
1011 if (!nf_ct_is_confirmed(ct
)) {
1012 err
= ovs_ct_init_labels(ct
, key
, &info
->labels
.value
,
1013 &info
->labels
.mask
);
1017 nf_conn_act_ct_ext_add(skb
, ct
, ctinfo
);
1018 } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
1019 labels_nonzero(&info
->labels
.mask
)) {
1020 err
= ovs_ct_set_labels(ct
, key
, &info
->labels
.value
,
1021 &info
->labels
.mask
);
1025 /* This will take care of sending queued events even if the connection
1026 * is already confirmed.
1028 err
= nf_conntrack_confirm(skb
);
1030 return verdict_to_errno(err
);
1033 /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
1034 * value if 'skb' is freed.
1036 int ovs_ct_execute(struct net
*net
, struct sk_buff
*skb
,
1037 struct sw_flow_key
*key
,
1038 const struct ovs_conntrack_info
*info
)
1043 /* The conntrack module expects to be working at L3. */
1044 nh_ofs
= skb_network_offset(skb
);
1045 skb_pull_rcsum(skb
, nh_ofs
);
1047 err
= nf_ct_skb_network_trim(skb
, info
->family
);
1053 if (key
->ip
.frag
!= OVS_FRAG_TYPE_NONE
) {
1054 err
= ovs_ct_handle_fragments(net
, key
, info
->zone
.id
,
1061 err
= ovs_ct_commit(net
, key
, info
, skb
);
1063 err
= ovs_ct_lookup(net
, key
, info
, skb
);
1065 /* conntrack core returned NF_STOLEN */
1066 if (err
== -EINPROGRESS
)
1069 skb_push_rcsum(skb
, nh_ofs
);
1071 ovs_kfree_skb_reason(skb
, OVS_DROP_CONNTRACK
);
1075 int ovs_ct_clear(struct sk_buff
*skb
, struct sw_flow_key
*key
)
1077 enum ip_conntrack_info ctinfo
;
1080 ct
= nf_ct_get(skb
, &ctinfo
);
1083 nf_ct_set(skb
, NULL
, IP_CT_UNTRACKED
);
1086 ovs_ct_fill_key(skb
, key
, false);
1091 #if IS_ENABLED(CONFIG_NF_NAT)
1092 static int parse_nat(const struct nlattr
*attr
,
1093 struct ovs_conntrack_info
*info
, bool log
)
1097 bool have_ip_max
= false;
1098 bool have_proto_max
= false;
1099 bool ip_vers
= (info
->family
== NFPROTO_IPV6
);
1101 nla_for_each_nested(a
, attr
, rem
) {
1102 static const int ovs_nat_attr_lens
[OVS_NAT_ATTR_MAX
+ 1][2] = {
1103 [OVS_NAT_ATTR_SRC
] = {0, 0},
1104 [OVS_NAT_ATTR_DST
] = {0, 0},
1105 [OVS_NAT_ATTR_IP_MIN
] = {sizeof(struct in_addr
),
1106 sizeof(struct in6_addr
)},
1107 [OVS_NAT_ATTR_IP_MAX
] = {sizeof(struct in_addr
),
1108 sizeof(struct in6_addr
)},
1109 [OVS_NAT_ATTR_PROTO_MIN
] = {sizeof(u16
), sizeof(u16
)},
1110 [OVS_NAT_ATTR_PROTO_MAX
] = {sizeof(u16
), sizeof(u16
)},
1111 [OVS_NAT_ATTR_PERSISTENT
] = {0, 0},
1112 [OVS_NAT_ATTR_PROTO_HASH
] = {0, 0},
1113 [OVS_NAT_ATTR_PROTO_RANDOM
] = {0, 0},
1115 int type
= nla_type(a
);
1117 if (type
> OVS_NAT_ATTR_MAX
) {
1118 OVS_NLERR(log
, "Unknown NAT attribute (type=%d, max=%d)",
1119 type
, OVS_NAT_ATTR_MAX
);
1123 if (nla_len(a
) != ovs_nat_attr_lens
[type
][ip_vers
]) {
1124 OVS_NLERR(log
, "NAT attribute type %d has unexpected length (%d != %d)",
1126 ovs_nat_attr_lens
[type
][ip_vers
]);
1131 case OVS_NAT_ATTR_SRC
:
1132 case OVS_NAT_ATTR_DST
:
1134 OVS_NLERR(log
, "Only one type of NAT may be specified");
1137 info
->nat
|= OVS_CT_NAT
;
1138 info
->nat
|= ((type
== OVS_NAT_ATTR_SRC
)
1139 ? OVS_CT_SRC_NAT
: OVS_CT_DST_NAT
);
1142 case OVS_NAT_ATTR_IP_MIN
:
1143 nla_memcpy(&info
->range
.min_addr
, a
,
1144 sizeof(info
->range
.min_addr
));
1145 info
->range
.flags
|= NF_NAT_RANGE_MAP_IPS
;
1148 case OVS_NAT_ATTR_IP_MAX
:
1150 nla_memcpy(&info
->range
.max_addr
, a
,
1151 sizeof(info
->range
.max_addr
));
1152 info
->range
.flags
|= NF_NAT_RANGE_MAP_IPS
;
1155 case OVS_NAT_ATTR_PROTO_MIN
:
1156 info
->range
.min_proto
.all
= htons(nla_get_u16(a
));
1157 info
->range
.flags
|= NF_NAT_RANGE_PROTO_SPECIFIED
;
1160 case OVS_NAT_ATTR_PROTO_MAX
:
1161 have_proto_max
= true;
1162 info
->range
.max_proto
.all
= htons(nla_get_u16(a
));
1163 info
->range
.flags
|= NF_NAT_RANGE_PROTO_SPECIFIED
;
1166 case OVS_NAT_ATTR_PERSISTENT
:
1167 info
->range
.flags
|= NF_NAT_RANGE_PERSISTENT
;
1170 case OVS_NAT_ATTR_PROTO_HASH
:
1171 info
->range
.flags
|= NF_NAT_RANGE_PROTO_RANDOM
;
1174 case OVS_NAT_ATTR_PROTO_RANDOM
:
1175 info
->range
.flags
|= NF_NAT_RANGE_PROTO_RANDOM_FULLY
;
1179 OVS_NLERR(log
, "Unknown nat attribute (%d)", type
);
1185 OVS_NLERR(log
, "NAT attribute has %d unknown bytes", rem
);
1189 /* Do not allow flags if no type is given. */
1190 if (info
->range
.flags
) {
1192 "NAT flags may be given only when NAT range (SRC or DST) is also specified."
1196 info
->nat
= OVS_CT_NAT
; /* NAT existing connections. */
1197 } else if (!info
->commit
) {
1199 "NAT attributes may be specified only when CT COMMIT flag is also specified."
1203 /* Allow missing IP_MAX. */
1204 if (info
->range
.flags
& NF_NAT_RANGE_MAP_IPS
&& !have_ip_max
) {
1205 memcpy(&info
->range
.max_addr
, &info
->range
.min_addr
,
1206 sizeof(info
->range
.max_addr
));
1208 /* Allow missing PROTO_MAX. */
1209 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_SPECIFIED
&&
1211 info
->range
.max_proto
.all
= info
->range
.min_proto
.all
;
1217 static const struct ovs_ct_len_tbl ovs_ct_attr_lens
[OVS_CT_ATTR_MAX
+ 1] = {
1218 [OVS_CT_ATTR_COMMIT
] = { .minlen
= 0, .maxlen
= 0 },
1219 [OVS_CT_ATTR_FORCE_COMMIT
] = { .minlen
= 0, .maxlen
= 0 },
1220 [OVS_CT_ATTR_ZONE
] = { .minlen
= sizeof(u16
),
1221 .maxlen
= sizeof(u16
) },
1222 [OVS_CT_ATTR_MARK
] = { .minlen
= sizeof(struct md_mark
),
1223 .maxlen
= sizeof(struct md_mark
) },
1224 [OVS_CT_ATTR_LABELS
] = { .minlen
= sizeof(struct md_labels
),
1225 .maxlen
= sizeof(struct md_labels
) },
1226 [OVS_CT_ATTR_HELPER
] = { .minlen
= 1,
1227 .maxlen
= NF_CT_HELPER_NAME_LEN
},
1228 #if IS_ENABLED(CONFIG_NF_NAT)
1229 /* NAT length is checked when parsing the nested attributes. */
1230 [OVS_CT_ATTR_NAT
] = { .minlen
= 0, .maxlen
= INT_MAX
},
1232 [OVS_CT_ATTR_EVENTMASK
] = { .minlen
= sizeof(u32
),
1233 .maxlen
= sizeof(u32
) },
1234 [OVS_CT_ATTR_TIMEOUT
] = { .minlen
= 1,
1235 .maxlen
= CTNL_TIMEOUT_NAME_MAX
},
1238 static int parse_ct(const struct nlattr
*attr
, struct ovs_conntrack_info
*info
,
1239 const char **helper
, bool log
)
1244 nla_for_each_nested(a
, attr
, rem
) {
1245 int type
= nla_type(a
);
1249 if (type
> OVS_CT_ATTR_MAX
) {
1251 "Unknown conntrack attr (type=%d, max=%d)",
1252 type
, OVS_CT_ATTR_MAX
);
1256 maxlen
= ovs_ct_attr_lens
[type
].maxlen
;
1257 minlen
= ovs_ct_attr_lens
[type
].minlen
;
1258 if (nla_len(a
) < minlen
|| nla_len(a
) > maxlen
) {
1260 "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
1261 type
, nla_len(a
), maxlen
);
1266 case OVS_CT_ATTR_FORCE_COMMIT
:
1269 case OVS_CT_ATTR_COMMIT
:
1270 info
->commit
= true;
1272 #ifdef CONFIG_NF_CONNTRACK_ZONES
1273 case OVS_CT_ATTR_ZONE
:
1274 info
->zone
.id
= nla_get_u16(a
);
1277 #ifdef CONFIG_NF_CONNTRACK_MARK
1278 case OVS_CT_ATTR_MARK
: {
1279 struct md_mark
*mark
= nla_data(a
);
1282 OVS_NLERR(log
, "ct_mark mask cannot be 0");
1289 #ifdef CONFIG_NF_CONNTRACK_LABELS
1290 case OVS_CT_ATTR_LABELS
: {
1291 struct md_labels
*labels
= nla_data(a
);
1293 if (!labels_nonzero(&labels
->mask
)) {
1294 OVS_NLERR(log
, "ct_labels mask cannot be 0");
1297 info
->labels
= *labels
;
1301 case OVS_CT_ATTR_HELPER
:
1302 *helper
= nla_data(a
);
1303 if (!string_is_terminated(*helper
, nla_len(a
))) {
1304 OVS_NLERR(log
, "Invalid conntrack helper");
1308 #if IS_ENABLED(CONFIG_NF_NAT)
1309 case OVS_CT_ATTR_NAT
: {
1310 int err
= parse_nat(a
, info
, log
);
1317 case OVS_CT_ATTR_EVENTMASK
:
1318 info
->have_eventmask
= true;
1319 info
->eventmask
= nla_get_u32(a
);
1321 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1322 case OVS_CT_ATTR_TIMEOUT
:
1323 memcpy(info
->timeout
, nla_data(a
), nla_len(a
));
1324 if (!string_is_terminated(info
->timeout
, nla_len(a
))) {
1325 OVS_NLERR(log
, "Invalid conntrack timeout");
1332 OVS_NLERR(log
, "Unknown conntrack attr (%d)",
1338 #ifdef CONFIG_NF_CONNTRACK_MARK
1339 if (!info
->commit
&& info
->mark
.mask
) {
1341 "Setting conntrack mark requires 'commit' flag.");
1345 #ifdef CONFIG_NF_CONNTRACK_LABELS
1346 if (!info
->commit
&& labels_nonzero(&info
->labels
.mask
)) {
1348 "Setting conntrack labels requires 'commit' flag.");
1353 OVS_NLERR(log
, "Conntrack attr has %d unknown bytes", rem
);
1360 bool ovs_ct_verify(struct net
*net
, enum ovs_key_attr attr
)
1362 if (attr
== OVS_KEY_ATTR_CT_STATE
)
1364 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
1365 attr
== OVS_KEY_ATTR_CT_ZONE
)
1367 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) &&
1368 attr
== OVS_KEY_ATTR_CT_MARK
)
1370 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
1371 attr
== OVS_KEY_ATTR_CT_LABELS
)
1377 int ovs_ct_copy_action(struct net
*net
, const struct nlattr
*attr
,
1378 const struct sw_flow_key
*key
,
1379 struct sw_flow_actions
**sfa
, bool log
)
1381 unsigned int n_bits
= sizeof(struct ovs_key_ct_labels
) * BITS_PER_BYTE
;
1382 struct ovs_conntrack_info ct_info
;
1383 const char *helper
= NULL
;
1387 family
= key_to_nfproto(key
);
1388 if (family
== NFPROTO_UNSPEC
) {
1389 OVS_NLERR(log
, "ct family unspecified");
1393 memset(&ct_info
, 0, sizeof(ct_info
));
1394 ct_info
.family
= family
;
1396 nf_ct_zone_init(&ct_info
.zone
, NF_CT_DEFAULT_ZONE_ID
,
1397 NF_CT_DEFAULT_ZONE_DIR
, 0);
1399 err
= parse_ct(attr
, &ct_info
, &helper
, log
);
1403 /* Set up template for tracking connections in specific zones. */
1404 ct_info
.ct
= nf_ct_tmpl_alloc(net
, &ct_info
.zone
, GFP_KERNEL
);
1406 OVS_NLERR(log
, "Failed to allocate conntrack template");
1410 if (nf_connlabels_get(net
, n_bits
- 1)) {
1411 nf_ct_tmpl_free(ct_info
.ct
);
1412 OVS_NLERR(log
, "Failed to set connlabel length");
1416 if (ct_info
.timeout
[0]) {
1417 if (nf_ct_set_timeout(net
, ct_info
.ct
, family
, key
->ip
.proto
,
1420 "Failed to associated timeout policy '%s'",
1423 ct_info
.nf_ct_timeout
= rcu_dereference(
1424 nf_ct_timeout_find(ct_info
.ct
)->timeout
);
1429 err
= nf_ct_add_helper(ct_info
.ct
, helper
, ct_info
.family
,
1430 key
->ip
.proto
, ct_info
.nat
, &ct_info
.helper
);
1432 OVS_NLERR(log
, "Failed to add %s helper %d", helper
, err
);
1437 err
= ovs_nla_add_action(sfa
, OVS_ACTION_ATTR_CT
, &ct_info
,
1438 sizeof(ct_info
), log
);
1443 __set_bit(IPS_CONFIRMED_BIT
, &ct_info
.ct
->status
);
1446 __ovs_ct_free_action(&ct_info
);
1450 #if IS_ENABLED(CONFIG_NF_NAT)
1451 static bool ovs_ct_nat_to_attr(const struct ovs_conntrack_info
*info
,
1452 struct sk_buff
*skb
)
1454 struct nlattr
*start
;
1456 start
= nla_nest_start_noflag(skb
, OVS_CT_ATTR_NAT
);
1460 if (info
->nat
& OVS_CT_SRC_NAT
) {
1461 if (nla_put_flag(skb
, OVS_NAT_ATTR_SRC
))
1463 } else if (info
->nat
& OVS_CT_DST_NAT
) {
1464 if (nla_put_flag(skb
, OVS_NAT_ATTR_DST
))
1470 if (info
->range
.flags
& NF_NAT_RANGE_MAP_IPS
) {
1471 if (IS_ENABLED(CONFIG_NF_NAT
) &&
1472 info
->family
== NFPROTO_IPV4
) {
1473 if (nla_put_in_addr(skb
, OVS_NAT_ATTR_IP_MIN
,
1474 info
->range
.min_addr
.ip
) ||
1475 (info
->range
.max_addr
.ip
1476 != info
->range
.min_addr
.ip
&&
1477 (nla_put_in_addr(skb
, OVS_NAT_ATTR_IP_MAX
,
1478 info
->range
.max_addr
.ip
))))
1480 } else if (IS_ENABLED(CONFIG_IPV6
) &&
1481 info
->family
== NFPROTO_IPV6
) {
1482 if (nla_put_in6_addr(skb
, OVS_NAT_ATTR_IP_MIN
,
1483 &info
->range
.min_addr
.in6
) ||
1484 (memcmp(&info
->range
.max_addr
.in6
,
1485 &info
->range
.min_addr
.in6
,
1486 sizeof(info
->range
.max_addr
.in6
)) &&
1487 (nla_put_in6_addr(skb
, OVS_NAT_ATTR_IP_MAX
,
1488 &info
->range
.max_addr
.in6
))))
1494 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_SPECIFIED
&&
1495 (nla_put_u16(skb
, OVS_NAT_ATTR_PROTO_MIN
,
1496 ntohs(info
->range
.min_proto
.all
)) ||
1497 (info
->range
.max_proto
.all
!= info
->range
.min_proto
.all
&&
1498 nla_put_u16(skb
, OVS_NAT_ATTR_PROTO_MAX
,
1499 ntohs(info
->range
.max_proto
.all
)))))
1502 if (info
->range
.flags
& NF_NAT_RANGE_PERSISTENT
&&
1503 nla_put_flag(skb
, OVS_NAT_ATTR_PERSISTENT
))
1505 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_RANDOM
&&
1506 nla_put_flag(skb
, OVS_NAT_ATTR_PROTO_HASH
))
1508 if (info
->range
.flags
& NF_NAT_RANGE_PROTO_RANDOM_FULLY
&&
1509 nla_put_flag(skb
, OVS_NAT_ATTR_PROTO_RANDOM
))
1512 nla_nest_end(skb
, start
);
1518 int ovs_ct_action_to_attr(const struct ovs_conntrack_info
*ct_info
,
1519 struct sk_buff
*skb
)
1521 struct nlattr
*start
;
1523 start
= nla_nest_start_noflag(skb
, OVS_ACTION_ATTR_CT
);
1527 if (ct_info
->commit
&& nla_put_flag(skb
, ct_info
->force
1528 ? OVS_CT_ATTR_FORCE_COMMIT
1529 : OVS_CT_ATTR_COMMIT
))
1531 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES
) &&
1532 nla_put_u16(skb
, OVS_CT_ATTR_ZONE
, ct_info
->zone
.id
))
1534 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK
) && ct_info
->mark
.mask
&&
1535 nla_put(skb
, OVS_CT_ATTR_MARK
, sizeof(ct_info
->mark
),
1538 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS
) &&
1539 labels_nonzero(&ct_info
->labels
.mask
) &&
1540 nla_put(skb
, OVS_CT_ATTR_LABELS
, sizeof(ct_info
->labels
),
1543 if (ct_info
->helper
) {
1544 if (nla_put_string(skb
, OVS_CT_ATTR_HELPER
,
1545 ct_info
->helper
->name
))
1548 if (ct_info
->have_eventmask
&&
1549 nla_put_u32(skb
, OVS_CT_ATTR_EVENTMASK
, ct_info
->eventmask
))
1551 if (ct_info
->timeout
[0]) {
1552 if (nla_put_string(skb
, OVS_CT_ATTR_TIMEOUT
, ct_info
->timeout
))
1556 #if IS_ENABLED(CONFIG_NF_NAT)
1557 if (ct_info
->nat
&& !ovs_ct_nat_to_attr(ct_info
, skb
))
1560 nla_nest_end(skb
, start
);
1565 void ovs_ct_free_action(const struct nlattr
*a
)
1567 struct ovs_conntrack_info
*ct_info
= nla_data(a
);
1569 __ovs_ct_free_action(ct_info
);
1572 static void __ovs_ct_free_action(struct ovs_conntrack_info
*ct_info
)
1574 if (ct_info
->helper
) {
1575 #if IS_ENABLED(CONFIG_NF_NAT)
1577 nf_nat_helper_put(ct_info
->helper
);
1579 nf_conntrack_helper_put(ct_info
->helper
);
1582 if (ct_info
->timeout
[0])
1583 nf_ct_destroy_timeout(ct_info
->ct
);
1584 nf_connlabels_put(nf_ct_net(ct_info
->ct
));
1585 nf_ct_tmpl_free(ct_info
->ct
);
1589 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
1590 static int ovs_ct_limit_init(struct net
*net
, struct ovs_net
*ovs_net
)
1594 ovs_net
->ct_limit_info
= kmalloc(sizeof(*ovs_net
->ct_limit_info
),
1596 if (!ovs_net
->ct_limit_info
)
1599 ovs_net
->ct_limit_info
->default_limit
= OVS_CT_LIMIT_DEFAULT
;
1600 ovs_net
->ct_limit_info
->limits
=
1601 kmalloc_array(CT_LIMIT_HASH_BUCKETS
, sizeof(struct hlist_head
),
1603 if (!ovs_net
->ct_limit_info
->limits
) {
1604 kfree(ovs_net
->ct_limit_info
);
1608 for (i
= 0; i
< CT_LIMIT_HASH_BUCKETS
; i
++)
1609 INIT_HLIST_HEAD(&ovs_net
->ct_limit_info
->limits
[i
]);
1611 ovs_net
->ct_limit_info
->data
= nf_conncount_init(net
, sizeof(u32
));
1613 if (IS_ERR(ovs_net
->ct_limit_info
->data
)) {
1614 err
= PTR_ERR(ovs_net
->ct_limit_info
->data
);
1615 kfree(ovs_net
->ct_limit_info
->limits
);
1616 kfree(ovs_net
->ct_limit_info
);
1617 pr_err("openvswitch: failed to init nf_conncount %d\n", err
);
1623 static void ovs_ct_limit_exit(struct net
*net
, struct ovs_net
*ovs_net
)
1625 const struct ovs_ct_limit_info
*info
= ovs_net
->ct_limit_info
;
1628 nf_conncount_destroy(net
, info
->data
);
1629 for (i
= 0; i
< CT_LIMIT_HASH_BUCKETS
; ++i
) {
1630 struct hlist_head
*head
= &info
->limits
[i
];
1631 struct ovs_ct_limit
*ct_limit
;
1632 struct hlist_node
*next
;
1634 hlist_for_each_entry_safe(ct_limit
, next
, head
, hlist_node
)
1635 kfree_rcu(ct_limit
, rcu
);
1637 kfree(info
->limits
);
1641 static struct sk_buff
*
1642 ovs_ct_limit_cmd_reply_start(struct genl_info
*info
, u8 cmd
,
1643 struct ovs_header
**ovs_reply_header
)
1645 struct ovs_header
*ovs_header
= genl_info_userhdr(info
);
1646 struct sk_buff
*skb
;
1648 skb
= genlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
1650 return ERR_PTR(-ENOMEM
);
1652 *ovs_reply_header
= genlmsg_put(skb
, info
->snd_portid
,
1654 &dp_ct_limit_genl_family
, 0, cmd
);
1656 if (!*ovs_reply_header
) {
1658 return ERR_PTR(-EMSGSIZE
);
1660 (*ovs_reply_header
)->dp_ifindex
= ovs_header
->dp_ifindex
;
1665 static bool check_zone_id(int zone_id
, u16
*pzone
)
1667 if (zone_id
>= 0 && zone_id
<= 65535) {
1668 *pzone
= (u16
)zone_id
;
1674 static int ovs_ct_limit_set_zone_limit(struct nlattr
*nla_zone_limit
,
1675 struct ovs_ct_limit_info
*info
)
1677 struct ovs_zone_limit
*zone_limit
;
1681 rem
= NLA_ALIGN(nla_len(nla_zone_limit
));
1682 zone_limit
= (struct ovs_zone_limit
*)nla_data(nla_zone_limit
);
1684 while (rem
>= sizeof(*zone_limit
)) {
1685 if (unlikely(zone_limit
->zone_id
==
1686 OVS_ZONE_LIMIT_DEFAULT_ZONE
)) {
1688 info
->default_limit
= zone_limit
->limit
;
1690 } else if (unlikely(!check_zone_id(
1691 zone_limit
->zone_id
, &zone
))) {
1692 OVS_NLERR(true, "zone id is out of range");
1694 struct ovs_ct_limit
*ct_limit
;
1696 ct_limit
= kmalloc(sizeof(*ct_limit
),
1697 GFP_KERNEL_ACCOUNT
);
1701 ct_limit
->zone
= zone
;
1702 ct_limit
->limit
= zone_limit
->limit
;
1705 ct_limit_set(info
, ct_limit
);
1708 rem
-= NLA_ALIGN(sizeof(*zone_limit
));
1709 zone_limit
= (struct ovs_zone_limit
*)((u8
*)zone_limit
+
1710 NLA_ALIGN(sizeof(*zone_limit
)));
1714 OVS_NLERR(true, "set zone limit has %d unknown bytes", rem
);
1719 static int ovs_ct_limit_del_zone_limit(struct nlattr
*nla_zone_limit
,
1720 struct ovs_ct_limit_info
*info
)
1722 struct ovs_zone_limit
*zone_limit
;
1726 rem
= NLA_ALIGN(nla_len(nla_zone_limit
));
1727 zone_limit
= (struct ovs_zone_limit
*)nla_data(nla_zone_limit
);
1729 while (rem
>= sizeof(*zone_limit
)) {
1730 if (unlikely(zone_limit
->zone_id
==
1731 OVS_ZONE_LIMIT_DEFAULT_ZONE
)) {
1733 info
->default_limit
= OVS_CT_LIMIT_DEFAULT
;
1735 } else if (unlikely(!check_zone_id(
1736 zone_limit
->zone_id
, &zone
))) {
1737 OVS_NLERR(true, "zone id is out of range");
1740 ct_limit_del(info
, zone
);
1743 rem
-= NLA_ALIGN(sizeof(*zone_limit
));
1744 zone_limit
= (struct ovs_zone_limit
*)((u8
*)zone_limit
+
1745 NLA_ALIGN(sizeof(*zone_limit
)));
1749 OVS_NLERR(true, "del zone limit has %d unknown bytes", rem
);
1754 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info
*info
,
1755 struct sk_buff
*reply
)
1757 struct ovs_zone_limit zone_limit
= {
1758 .zone_id
= OVS_ZONE_LIMIT_DEFAULT_ZONE
,
1759 .limit
= info
->default_limit
,
1762 return nla_put_nohdr(reply
, sizeof(zone_limit
), &zone_limit
);
1765 static int __ovs_ct_limit_get_zone_limit(struct net
*net
,
1766 struct nf_conncount_data
*data
,
1767 u16 zone_id
, u32 limit
,
1768 struct sk_buff
*reply
)
1770 struct nf_conntrack_zone ct_zone
;
1771 struct ovs_zone_limit zone_limit
;
1772 u32 conncount_key
= zone_id
;
1774 zone_limit
.zone_id
= zone_id
;
1775 zone_limit
.limit
= limit
;
1776 nf_ct_zone_init(&ct_zone
, zone_id
, NF_CT_DEFAULT_ZONE_DIR
, 0);
1778 zone_limit
.count
= nf_conncount_count(net
, data
, &conncount_key
, NULL
,
1780 return nla_put_nohdr(reply
, sizeof(zone_limit
), &zone_limit
);
1783 static int ovs_ct_limit_get_zone_limit(struct net
*net
,
1784 struct nlattr
*nla_zone_limit
,
1785 struct ovs_ct_limit_info
*info
,
1786 struct sk_buff
*reply
)
1788 struct ovs_zone_limit
*zone_limit
;
1793 rem
= NLA_ALIGN(nla_len(nla_zone_limit
));
1794 zone_limit
= (struct ovs_zone_limit
*)nla_data(nla_zone_limit
);
1796 while (rem
>= sizeof(*zone_limit
)) {
1797 if (unlikely(zone_limit
->zone_id
==
1798 OVS_ZONE_LIMIT_DEFAULT_ZONE
)) {
1799 err
= ovs_ct_limit_get_default_limit(info
, reply
);
1802 } else if (unlikely(!check_zone_id(zone_limit
->zone_id
,
1804 OVS_NLERR(true, "zone id is out of range");
1807 limit
= ct_limit_get(info
, zone
);
1810 err
= __ovs_ct_limit_get_zone_limit(
1811 net
, info
->data
, zone
, limit
, reply
);
1815 rem
-= NLA_ALIGN(sizeof(*zone_limit
));
1816 zone_limit
= (struct ovs_zone_limit
*)((u8
*)zone_limit
+
1817 NLA_ALIGN(sizeof(*zone_limit
)));
1821 OVS_NLERR(true, "get zone limit has %d unknown bytes", rem
);
1826 static int ovs_ct_limit_get_all_zone_limit(struct net
*net
,
1827 struct ovs_ct_limit_info
*info
,
1828 struct sk_buff
*reply
)
1830 struct ovs_ct_limit
*ct_limit
;
1831 struct hlist_head
*head
;
1834 err
= ovs_ct_limit_get_default_limit(info
, reply
);
1839 for (i
= 0; i
< CT_LIMIT_HASH_BUCKETS
; ++i
) {
1840 head
= &info
->limits
[i
];
1841 hlist_for_each_entry_rcu(ct_limit
, head
, hlist_node
) {
1842 err
= __ovs_ct_limit_get_zone_limit(net
, info
->data
,
1843 ct_limit
->zone
, ct_limit
->limit
, reply
);
1854 static int ovs_ct_limit_cmd_set(struct sk_buff
*skb
, struct genl_info
*info
)
1856 struct nlattr
**a
= info
->attrs
;
1857 struct sk_buff
*reply
;
1858 struct ovs_header
*ovs_reply_header
;
1859 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1860 struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
1863 reply
= ovs_ct_limit_cmd_reply_start(info
, OVS_CT_LIMIT_CMD_SET
,
1866 return PTR_ERR(reply
);
1868 if (!a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
1873 err
= ovs_ct_limit_set_zone_limit(a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
],
1878 static_branch_enable(&ovs_ct_limit_enabled
);
1880 genlmsg_end(reply
, ovs_reply_header
);
1881 return genlmsg_reply(reply
, info
);
1888 static int ovs_ct_limit_cmd_del(struct sk_buff
*skb
, struct genl_info
*info
)
1890 struct nlattr
**a
= info
->attrs
;
1891 struct sk_buff
*reply
;
1892 struct ovs_header
*ovs_reply_header
;
1893 struct ovs_net
*ovs_net
= net_generic(sock_net(skb
->sk
), ovs_net_id
);
1894 struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
1897 reply
= ovs_ct_limit_cmd_reply_start(info
, OVS_CT_LIMIT_CMD_DEL
,
1900 return PTR_ERR(reply
);
1902 if (!a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
1907 err
= ovs_ct_limit_del_zone_limit(a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
],
1912 genlmsg_end(reply
, ovs_reply_header
);
1913 return genlmsg_reply(reply
, info
);
1920 static int ovs_ct_limit_cmd_get(struct sk_buff
*skb
, struct genl_info
*info
)
1922 struct nlattr
**a
= info
->attrs
;
1923 struct nlattr
*nla_reply
;
1924 struct sk_buff
*reply
;
1925 struct ovs_header
*ovs_reply_header
;
1926 struct net
*net
= sock_net(skb
->sk
);
1927 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
1928 struct ovs_ct_limit_info
*ct_limit_info
= ovs_net
->ct_limit_info
;
1931 reply
= ovs_ct_limit_cmd_reply_start(info
, OVS_CT_LIMIT_CMD_GET
,
1934 return PTR_ERR(reply
);
1936 nla_reply
= nla_nest_start_noflag(reply
, OVS_CT_LIMIT_ATTR_ZONE_LIMIT
);
1942 if (a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
]) {
1943 err
= ovs_ct_limit_get_zone_limit(
1944 net
, a
[OVS_CT_LIMIT_ATTR_ZONE_LIMIT
], ct_limit_info
,
1949 err
= ovs_ct_limit_get_all_zone_limit(net
, ct_limit_info
,
1955 nla_nest_end(reply
, nla_reply
);
1956 genlmsg_end(reply
, ovs_reply_header
);
1957 return genlmsg_reply(reply
, info
);
1964 static const struct genl_small_ops ct_limit_genl_ops
[] = {
1965 { .cmd
= OVS_CT_LIMIT_CMD_SET
,
1966 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1967 .flags
= GENL_UNS_ADMIN_PERM
, /* Requires CAP_NET_ADMIN
1970 .doit
= ovs_ct_limit_cmd_set
,
1972 { .cmd
= OVS_CT_LIMIT_CMD_DEL
,
1973 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1974 .flags
= GENL_UNS_ADMIN_PERM
, /* Requires CAP_NET_ADMIN
1977 .doit
= ovs_ct_limit_cmd_del
,
1979 { .cmd
= OVS_CT_LIMIT_CMD_GET
,
1980 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
1981 .flags
= 0, /* OK for unprivileged users. */
1982 .doit
= ovs_ct_limit_cmd_get
,
1986 static const struct genl_multicast_group ovs_ct_limit_multicast_group
= {
1987 .name
= OVS_CT_LIMIT_MCGROUP
,
1990 struct genl_family dp_ct_limit_genl_family __ro_after_init
= {
1991 .hdrsize
= sizeof(struct ovs_header
),
1992 .name
= OVS_CT_LIMIT_FAMILY
,
1993 .version
= OVS_CT_LIMIT_VERSION
,
1994 .maxattr
= OVS_CT_LIMIT_ATTR_MAX
,
1995 .policy
= ct_limit_policy
,
1997 .parallel_ops
= true,
1998 .small_ops
= ct_limit_genl_ops
,
1999 .n_small_ops
= ARRAY_SIZE(ct_limit_genl_ops
),
2000 .resv_start_op
= OVS_CT_LIMIT_CMD_GET
+ 1,
2001 .mcgrps
= &ovs_ct_limit_multicast_group
,
2003 .module
= THIS_MODULE
,
2007 int ovs_ct_init(struct net
*net
)
2009 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2010 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2012 return ovs_ct_limit_init(net
, ovs_net
);
2018 void ovs_ct_exit(struct net
*net
)
2020 #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
2021 struct ovs_net
*ovs_net
= net_generic(net
, ovs_net_id
);
2023 ovs_ct_limit_exit(net
, ovs_net
);