Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / net / sched / act_ct.c
blob83a5c6722a06918a0e87661eee553f3963b63e1e
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3 * net/sched/act_ct.c Connection Tracking action
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/ip.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_acct.h>
34 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35 #include <uapi/linux/netfilter/nf_nat.h>
37 static struct workqueue_struct *act_ct_wq;
38 static struct rhashtable zones_ht;
39 static DEFINE_MUTEX(zones_mutex);
41 struct tcf_ct_flow_table {
42 struct rhash_head node; /* In zones tables */
44 struct rcu_work rwork;
45 struct nf_flowtable nf_ft;
46 refcount_t ref;
47 u16 zone;
49 bool dying;
52 static const struct rhashtable_params zones_params = {
53 .head_offset = offsetof(struct tcf_ct_flow_table, node),
54 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
55 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56 .automatic_shrinking = true,
59 static struct flow_action_entry *
60 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
62 int i = flow_action->num_entries++;
64 return &flow_action->entries[i];
67 static void tcf_ct_add_mangle_action(struct flow_action *action,
68 enum flow_action_mangle_base htype,
69 u32 offset,
70 u32 mask,
71 u32 val)
73 struct flow_action_entry *entry;
75 entry = tcf_ct_flow_table_flow_action_get_next(action);
76 entry->id = FLOW_ACTION_MANGLE;
77 entry->mangle.htype = htype;
78 entry->mangle.mask = ~mask;
79 entry->mangle.offset = offset;
80 entry->mangle.val = val;
83 /* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
87 static void
88 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89 struct nf_conntrack_tuple target,
90 struct flow_action *action)
92 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94 offsetof(struct iphdr, saddr),
95 0xFFFFFFFF,
96 be32_to_cpu(target.src.u3.ip));
97 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99 offsetof(struct iphdr, daddr),
100 0xFFFFFFFF,
101 be32_to_cpu(target.dst.u3.ip));
104 static void
105 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106 union nf_inet_addr *addr,
107 u32 offset)
109 int i;
111 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113 i * sizeof(u32) + offset,
114 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
117 static void
118 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119 struct nf_conntrack_tuple target,
120 struct flow_action *action)
122 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124 offsetof(struct ipv6hdr,
125 saddr));
126 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128 offsetof(struct ipv6hdr,
129 daddr));
132 static void
133 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134 struct nf_conntrack_tuple target,
135 struct flow_action *action)
137 __be16 target_src = target.src.u.tcp.port;
138 __be16 target_dst = target.dst.u.tcp.port;
140 if (target_src != tuple->src.u.tcp.port)
141 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142 offsetof(struct tcphdr, source),
143 0xFFFF, be16_to_cpu(target_src));
144 if (target_dst != tuple->dst.u.tcp.port)
145 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146 offsetof(struct tcphdr, dest),
147 0xFFFF, be16_to_cpu(target_dst));
150 static void
151 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152 struct nf_conntrack_tuple target,
153 struct flow_action *action)
155 __be16 target_src = target.src.u.udp.port;
156 __be16 target_dst = target.dst.u.udp.port;
158 if (target_src != tuple->src.u.udp.port)
159 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
160 offsetof(struct udphdr, source),
161 0xFFFF, be16_to_cpu(target_src));
162 if (target_dst != tuple->dst.u.udp.port)
163 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
164 offsetof(struct udphdr, dest),
165 0xFFFF, be16_to_cpu(target_dst));
168 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169 enum ip_conntrack_dir dir,
170 struct flow_action *action)
172 struct nf_conn_labels *ct_labels;
173 struct flow_action_entry *entry;
174 enum ip_conntrack_info ctinfo;
175 u32 *act_ct_labels;
177 entry = tcf_ct_flow_table_flow_action_get_next(action);
178 entry->id = FLOW_ACTION_CT_METADATA;
179 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180 entry->ct_metadata.mark = ct->mark;
181 #endif
182 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183 IP_CT_ESTABLISHED_REPLY;
184 /* aligns with the CT reference on the SKB nf_ct_set */
185 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
187 act_ct_labels = entry->ct_metadata.labels;
188 ct_labels = nf_ct_labels_find(ct);
189 if (ct_labels)
190 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
191 else
192 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
195 static int tcf_ct_flow_table_add_action_nat(struct net *net,
196 struct nf_conn *ct,
197 enum ip_conntrack_dir dir,
198 struct flow_action *action)
200 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
201 struct nf_conntrack_tuple target;
203 if (!(ct->status & IPS_NAT_MASK))
204 return 0;
206 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
208 switch (tuple->src.l3num) {
209 case NFPROTO_IPV4:
210 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
211 action);
212 break;
213 case NFPROTO_IPV6:
214 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
215 action);
216 break;
217 default:
218 return -EOPNOTSUPP;
221 switch (nf_ct_protonum(ct)) {
222 case IPPROTO_TCP:
223 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
224 break;
225 case IPPROTO_UDP:
226 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
227 break;
228 default:
229 return -EOPNOTSUPP;
232 return 0;
235 static int tcf_ct_flow_table_fill_actions(struct net *net,
236 const struct flow_offload *flow,
237 enum flow_offload_tuple_dir tdir,
238 struct nf_flow_rule *flow_rule)
240 struct flow_action *action = &flow_rule->rule->action;
241 int num_entries = action->num_entries;
242 struct nf_conn *ct = flow->ct;
243 enum ip_conntrack_dir dir;
244 int i, err;
246 switch (tdir) {
247 case FLOW_OFFLOAD_DIR_ORIGINAL:
248 dir = IP_CT_DIR_ORIGINAL;
249 break;
250 case FLOW_OFFLOAD_DIR_REPLY:
251 dir = IP_CT_DIR_REPLY;
252 break;
253 default:
254 return -EOPNOTSUPP;
257 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
258 if (err)
259 goto err_nat;
261 tcf_ct_flow_table_add_action_meta(ct, dir, action);
262 return 0;
264 err_nat:
265 /* Clear filled actions */
266 for (i = num_entries; i < action->num_entries; i++)
267 memset(&action->entries[i], 0, sizeof(action->entries[i]));
268 action->num_entries = num_entries;
270 return err;
273 static struct nf_flowtable_type flowtable_ct = {
274 .action = tcf_ct_flow_table_fill_actions,
275 .owner = THIS_MODULE,
278 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
280 struct tcf_ct_flow_table *ct_ft;
281 int err = -ENOMEM;
283 mutex_lock(&zones_mutex);
284 ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
285 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
286 goto out_unlock;
288 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
289 if (!ct_ft)
290 goto err_alloc;
291 refcount_set(&ct_ft->ref, 1);
293 ct_ft->zone = params->zone;
294 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
295 if (err)
296 goto err_insert;
298 ct_ft->nf_ft.type = &flowtable_ct;
299 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD |
300 NF_FLOWTABLE_COUNTER;
301 err = nf_flow_table_init(&ct_ft->nf_ft);
302 if (err)
303 goto err_init;
305 __module_get(THIS_MODULE);
306 out_unlock:
307 params->ct_ft = ct_ft;
308 params->nf_ft = &ct_ft->nf_ft;
309 mutex_unlock(&zones_mutex);
311 return 0;
313 err_init:
314 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
315 err_insert:
316 kfree(ct_ft);
317 err_alloc:
318 mutex_unlock(&zones_mutex);
319 return err;
322 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
324 struct tcf_ct_flow_table *ct_ft;
326 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
327 rwork);
328 nf_flow_table_free(&ct_ft->nf_ft);
329 kfree(ct_ft);
331 module_put(THIS_MODULE);
334 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
336 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
338 if (refcount_dec_and_test(&params->ct_ft->ref)) {
339 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
340 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
341 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
345 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
346 struct nf_conn *ct,
347 bool tcp)
349 struct flow_offload *entry;
350 int err;
352 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
353 return;
355 entry = flow_offload_alloc(ct);
356 if (!entry) {
357 WARN_ON_ONCE(1);
358 goto err_alloc;
361 if (tcp) {
362 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
363 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
366 err = flow_offload_add(&ct_ft->nf_ft, entry);
367 if (err)
368 goto err_add;
370 return;
372 err_add:
373 flow_offload_free(entry);
374 err_alloc:
375 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
378 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
379 struct nf_conn *ct,
380 enum ip_conntrack_info ctinfo)
382 bool tcp = false;
384 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
385 return;
387 switch (nf_ct_protonum(ct)) {
388 case IPPROTO_TCP:
389 tcp = true;
390 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
391 return;
392 break;
393 case IPPROTO_UDP:
394 break;
395 default:
396 return;
399 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
400 ct->status & IPS_SEQ_ADJUST)
401 return;
403 tcf_ct_flow_table_add(ct_ft, ct, tcp);
406 static bool
407 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
408 struct flow_offload_tuple *tuple,
409 struct tcphdr **tcph)
411 struct flow_ports *ports;
412 unsigned int thoff;
413 struct iphdr *iph;
415 if (!pskb_network_may_pull(skb, sizeof(*iph)))
416 return false;
418 iph = ip_hdr(skb);
419 thoff = iph->ihl * 4;
421 if (ip_is_fragment(iph) ||
422 unlikely(thoff != sizeof(struct iphdr)))
423 return false;
425 if (iph->protocol != IPPROTO_TCP &&
426 iph->protocol != IPPROTO_UDP)
427 return false;
429 if (iph->ttl <= 1)
430 return false;
432 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
433 thoff + sizeof(struct tcphdr) :
434 thoff + sizeof(*ports)))
435 return false;
437 iph = ip_hdr(skb);
438 if (iph->protocol == IPPROTO_TCP)
439 *tcph = (void *)(skb_network_header(skb) + thoff);
441 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
442 tuple->src_v4.s_addr = iph->saddr;
443 tuple->dst_v4.s_addr = iph->daddr;
444 tuple->src_port = ports->source;
445 tuple->dst_port = ports->dest;
446 tuple->l3proto = AF_INET;
447 tuple->l4proto = iph->protocol;
449 return true;
452 static bool
453 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
454 struct flow_offload_tuple *tuple,
455 struct tcphdr **tcph)
457 struct flow_ports *ports;
458 struct ipv6hdr *ip6h;
459 unsigned int thoff;
461 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
462 return false;
464 ip6h = ipv6_hdr(skb);
466 if (ip6h->nexthdr != IPPROTO_TCP &&
467 ip6h->nexthdr != IPPROTO_UDP)
468 return false;
470 if (ip6h->hop_limit <= 1)
471 return false;
473 thoff = sizeof(*ip6h);
474 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
475 thoff + sizeof(struct tcphdr) :
476 thoff + sizeof(*ports)))
477 return false;
479 ip6h = ipv6_hdr(skb);
480 if (ip6h->nexthdr == IPPROTO_TCP)
481 *tcph = (void *)(skb_network_header(skb) + thoff);
483 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
484 tuple->src_v6 = ip6h->saddr;
485 tuple->dst_v6 = ip6h->daddr;
486 tuple->src_port = ports->source;
487 tuple->dst_port = ports->dest;
488 tuple->l3proto = AF_INET6;
489 tuple->l4proto = ip6h->nexthdr;
491 return true;
494 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
495 struct sk_buff *skb,
496 u8 family)
498 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
499 struct flow_offload_tuple_rhash *tuplehash;
500 struct flow_offload_tuple tuple = {};
501 enum ip_conntrack_info ctinfo;
502 struct tcphdr *tcph = NULL;
503 struct flow_offload *flow;
504 struct nf_conn *ct;
505 u8 dir;
507 /* Previously seen or loopback */
508 ct = nf_ct_get(skb, &ctinfo);
509 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
510 return false;
512 switch (family) {
513 case NFPROTO_IPV4:
514 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
515 return false;
516 break;
517 case NFPROTO_IPV6:
518 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
519 return false;
520 break;
521 default:
522 return false;
525 tuplehash = flow_offload_lookup(nf_ft, &tuple);
526 if (!tuplehash)
527 return false;
529 dir = tuplehash->tuple.dir;
530 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
531 ct = flow->ct;
533 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
534 flow_offload_teardown(flow);
535 return false;
538 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
539 IP_CT_ESTABLISHED_REPLY;
541 flow_offload_refresh(nf_ft, flow);
542 nf_conntrack_get(&ct->ct_general);
543 nf_ct_set(skb, ct, ctinfo);
544 if (nf_ft->flags & NF_FLOWTABLE_COUNTER)
545 nf_ct_acct_update(ct, dir, skb->len);
547 return true;
550 static int tcf_ct_flow_tables_init(void)
552 return rhashtable_init(&zones_ht, &zones_params);
555 static void tcf_ct_flow_tables_uninit(void)
557 rhashtable_destroy(&zones_ht);
560 static struct tc_action_ops act_ct_ops;
561 static unsigned int ct_net_id;
563 struct tc_ct_action_net {
564 struct tc_action_net tn; /* Must be first */
565 bool labels;
568 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
569 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
570 u16 zone_id, bool force)
572 enum ip_conntrack_info ctinfo;
573 struct nf_conn *ct;
575 ct = nf_ct_get(skb, &ctinfo);
576 if (!ct)
577 return false;
578 if (!net_eq(net, read_pnet(&ct->ct_net)))
579 return false;
580 if (nf_ct_zone(ct)->id != zone_id)
581 return false;
583 /* Force conntrack entry direction. */
584 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
585 if (nf_ct_is_confirmed(ct))
586 nf_ct_kill(ct);
588 nf_conntrack_put(&ct->ct_general);
589 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
591 return false;
594 return true;
597 /* Trim the skb to the length specified by the IP/IPv6 header,
598 * removing any trailing lower-layer padding. This prepares the skb
599 * for higher-layer processing that assumes skb->len excludes padding
600 * (such as nf_ip_checksum). The caller needs to pull the skb to the
601 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
603 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
605 unsigned int len;
606 int err;
608 switch (family) {
609 case NFPROTO_IPV4:
610 len = ntohs(ip_hdr(skb)->tot_len);
611 break;
612 case NFPROTO_IPV6:
613 len = sizeof(struct ipv6hdr)
614 + ntohs(ipv6_hdr(skb)->payload_len);
615 break;
616 default:
617 len = skb->len;
620 err = pskb_trim_rcsum(skb, len);
622 return err;
625 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
627 u8 family = NFPROTO_UNSPEC;
629 switch (skb_protocol(skb, true)) {
630 case htons(ETH_P_IP):
631 family = NFPROTO_IPV4;
632 break;
633 case htons(ETH_P_IPV6):
634 family = NFPROTO_IPV6;
635 break;
636 default:
637 break;
640 return family;
643 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
645 unsigned int len;
647 len = skb_network_offset(skb) + sizeof(struct iphdr);
648 if (unlikely(skb->len < len))
649 return -EINVAL;
650 if (unlikely(!pskb_may_pull(skb, len)))
651 return -ENOMEM;
653 *frag = ip_is_fragment(ip_hdr(skb));
654 return 0;
657 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
659 unsigned int flags = 0, len, payload_ofs = 0;
660 unsigned short frag_off;
661 int nexthdr;
663 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
664 if (unlikely(skb->len < len))
665 return -EINVAL;
666 if (unlikely(!pskb_may_pull(skb, len)))
667 return -ENOMEM;
669 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
670 if (unlikely(nexthdr < 0))
671 return -EPROTO;
673 *frag = flags & IP6_FH_F_FRAG;
674 return 0;
677 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
678 u8 family, u16 zone, bool *defrag)
680 enum ip_conntrack_info ctinfo;
681 struct qdisc_skb_cb cb;
682 struct nf_conn *ct;
683 int err = 0;
684 bool frag;
686 /* Previously seen (loopback)? Ignore. */
687 ct = nf_ct_get(skb, &ctinfo);
688 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
689 return 0;
691 if (family == NFPROTO_IPV4)
692 err = tcf_ct_ipv4_is_fragment(skb, &frag);
693 else
694 err = tcf_ct_ipv6_is_fragment(skb, &frag);
695 if (err || !frag)
696 return err;
698 skb_get(skb);
699 cb = *qdisc_skb_cb(skb);
701 if (family == NFPROTO_IPV4) {
702 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
704 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
705 local_bh_disable();
706 err = ip_defrag(net, skb, user);
707 local_bh_enable();
708 if (err && err != -EINPROGRESS)
709 return err;
711 if (!err) {
712 *defrag = true;
713 cb.mru = IPCB(skb)->frag_max_size;
715 } else { /* NFPROTO_IPV6 */
716 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
717 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
719 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
720 err = nf_ct_frag6_gather(net, skb, user);
721 if (err && err != -EINPROGRESS)
722 goto out_free;
724 if (!err) {
725 *defrag = true;
726 cb.mru = IP6CB(skb)->frag_max_size;
728 #else
729 err = -EOPNOTSUPP;
730 goto out_free;
731 #endif
734 *qdisc_skb_cb(skb) = cb;
735 skb_clear_hash(skb);
736 skb->ignore_df = 1;
737 return err;
739 out_free:
740 kfree_skb(skb);
741 return err;
744 static void tcf_ct_params_free(struct rcu_head *head)
746 struct tcf_ct_params *params = container_of(head,
747 struct tcf_ct_params, rcu);
749 tcf_ct_flow_table_put(params);
751 if (params->tmpl)
752 nf_conntrack_put(&params->tmpl->ct_general);
753 kfree(params);
756 #if IS_ENABLED(CONFIG_NF_NAT)
757 /* Modelled after nf_nat_ipv[46]_fn().
758 * range is only used for new, uninitialized NAT state.
759 * Returns either NF_ACCEPT or NF_DROP.
761 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
762 enum ip_conntrack_info ctinfo,
763 const struct nf_nat_range2 *range,
764 enum nf_nat_manip_type maniptype)
766 __be16 proto = skb_protocol(skb, true);
767 int hooknum, err = NF_ACCEPT;
769 /* See HOOK2MANIP(). */
770 if (maniptype == NF_NAT_MANIP_SRC)
771 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
772 else
773 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
775 switch (ctinfo) {
776 case IP_CT_RELATED:
777 case IP_CT_RELATED_REPLY:
778 if (proto == htons(ETH_P_IP) &&
779 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
780 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
781 hooknum))
782 err = NF_DROP;
783 goto out;
784 } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
785 __be16 frag_off;
786 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
787 int hdrlen = ipv6_skip_exthdr(skb,
788 sizeof(struct ipv6hdr),
789 &nexthdr, &frag_off);
791 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
792 if (!nf_nat_icmpv6_reply_translation(skb, ct,
793 ctinfo,
794 hooknum,
795 hdrlen))
796 err = NF_DROP;
797 goto out;
800 /* Non-ICMP, fall thru to initialize if needed. */
801 fallthrough;
802 case IP_CT_NEW:
803 /* Seen it before? This can happen for loopback, retrans,
804 * or local packets.
806 if (!nf_nat_initialized(ct, maniptype)) {
807 /* Initialize according to the NAT action. */
808 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
809 /* Action is set up to establish a new
810 * mapping.
812 ? nf_nat_setup_info(ct, range, maniptype)
813 : nf_nat_alloc_null_binding(ct, hooknum);
814 if (err != NF_ACCEPT)
815 goto out;
817 break;
819 case IP_CT_ESTABLISHED:
820 case IP_CT_ESTABLISHED_REPLY:
821 break;
823 default:
824 err = NF_DROP;
825 goto out;
828 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
829 out:
830 return err;
832 #endif /* CONFIG_NF_NAT */
834 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
836 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
837 u32 new_mark;
839 if (!mask)
840 return;
842 new_mark = mark | (ct->mark & ~(mask));
843 if (ct->mark != new_mark) {
844 ct->mark = new_mark;
845 if (nf_ct_is_confirmed(ct))
846 nf_conntrack_event_cache(IPCT_MARK, ct);
848 #endif
851 static void tcf_ct_act_set_labels(struct nf_conn *ct,
852 u32 *labels,
853 u32 *labels_m)
855 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
856 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
858 if (!memchr_inv(labels_m, 0, labels_sz))
859 return;
861 nf_connlabels_replace(ct, labels, labels_m, 4);
862 #endif
865 static int tcf_ct_act_nat(struct sk_buff *skb,
866 struct nf_conn *ct,
867 enum ip_conntrack_info ctinfo,
868 int ct_action,
869 struct nf_nat_range2 *range,
870 bool commit)
872 #if IS_ENABLED(CONFIG_NF_NAT)
873 int err;
874 enum nf_nat_manip_type maniptype;
876 if (!(ct_action & TCA_CT_ACT_NAT))
877 return NF_ACCEPT;
879 /* Add NAT extension if not confirmed yet. */
880 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
881 return NF_DROP; /* Can't NAT. */
883 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
884 (ctinfo != IP_CT_RELATED || commit)) {
885 /* NAT an established or related connection like before. */
886 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
887 /* This is the REPLY direction for a connection
888 * for which NAT was applied in the forward
889 * direction. Do the reverse NAT.
891 maniptype = ct->status & IPS_SRC_NAT
892 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
893 else
894 maniptype = ct->status & IPS_SRC_NAT
895 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
896 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
897 maniptype = NF_NAT_MANIP_SRC;
898 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
899 maniptype = NF_NAT_MANIP_DST;
900 } else {
901 return NF_ACCEPT;
904 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
905 if (err == NF_ACCEPT &&
906 ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
907 if (maniptype == NF_NAT_MANIP_SRC)
908 maniptype = NF_NAT_MANIP_DST;
909 else
910 maniptype = NF_NAT_MANIP_SRC;
912 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
914 return err;
915 #else
916 return NF_ACCEPT;
917 #endif
920 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
921 struct tcf_result *res)
923 struct net *net = dev_net(skb->dev);
924 bool cached, commit, clear, force;
925 enum ip_conntrack_info ctinfo;
926 struct tcf_ct *c = to_ct(a);
927 struct nf_conn *tmpl = NULL;
928 struct nf_hook_state state;
929 int nh_ofs, err, retval;
930 struct tcf_ct_params *p;
931 bool skip_add = false;
932 bool defrag = false;
933 struct nf_conn *ct;
934 u8 family;
936 p = rcu_dereference_bh(c->params);
938 retval = READ_ONCE(c->tcf_action);
939 commit = p->ct_action & TCA_CT_ACT_COMMIT;
940 clear = p->ct_action & TCA_CT_ACT_CLEAR;
941 force = p->ct_action & TCA_CT_ACT_FORCE;
942 tmpl = p->tmpl;
944 tcf_lastuse_update(&c->tcf_tm);
946 if (clear) {
947 ct = nf_ct_get(skb, &ctinfo);
948 if (ct) {
949 nf_conntrack_put(&ct->ct_general);
950 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
953 goto out;
956 family = tcf_ct_skb_nf_family(skb);
957 if (family == NFPROTO_UNSPEC)
958 goto drop;
960 /* The conntrack module expects to be working at L3.
961 * We also try to pull the IPv4/6 header to linear area
963 nh_ofs = skb_network_offset(skb);
964 skb_pull_rcsum(skb, nh_ofs);
965 err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
966 if (err == -EINPROGRESS) {
967 retval = TC_ACT_STOLEN;
968 goto out;
970 if (err)
971 goto drop;
973 err = tcf_ct_skb_network_trim(skb, family);
974 if (err)
975 goto drop;
977 /* If we are recirculating packets to match on ct fields and
978 * committing with a separate ct action, then we don't need to
979 * actually run the packet through conntrack twice unless it's for a
980 * different zone.
982 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
983 if (!cached) {
984 if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
985 skip_add = true;
986 goto do_nat;
989 /* Associate skb with specified zone. */
990 if (tmpl) {
991 ct = nf_ct_get(skb, &ctinfo);
992 if (skb_nfct(skb))
993 nf_conntrack_put(skb_nfct(skb));
994 nf_conntrack_get(&tmpl->ct_general);
995 nf_ct_set(skb, tmpl, IP_CT_NEW);
998 state.hook = NF_INET_PRE_ROUTING;
999 state.net = net;
1000 state.pf = family;
1001 err = nf_conntrack_in(skb, &state);
1002 if (err != NF_ACCEPT)
1003 goto out_push;
1006 do_nat:
1007 ct = nf_ct_get(skb, &ctinfo);
1008 if (!ct)
1009 goto out_push;
1010 nf_ct_deliver_cached_events(ct);
1012 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1013 if (err != NF_ACCEPT)
1014 goto drop;
1016 if (commit) {
1017 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1018 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1020 /* This will take care of sending queued events
1021 * even if the connection is already confirmed.
1023 nf_conntrack_confirm(skb);
1024 } else if (!skip_add) {
1025 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1028 out_push:
1029 skb_push_rcsum(skb, nh_ofs);
1031 out:
1032 tcf_action_update_bstats(&c->common, skb);
1033 if (defrag)
1034 qdisc_skb_cb(skb)->pkt_len = skb->len;
1035 return retval;
1037 drop:
1038 tcf_action_inc_drop_qstats(&c->common);
1039 return TC_ACT_SHOT;
1042 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1043 [TCA_CT_ACTION] = { .type = NLA_U16 },
1044 [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1045 [TCA_CT_ZONE] = { .type = NLA_U16 },
1046 [TCA_CT_MARK] = { .type = NLA_U32 },
1047 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1048 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1049 .len = 128 / BITS_PER_BYTE },
1050 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1051 .len = 128 / BITS_PER_BYTE },
1052 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1053 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1054 [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1055 [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1056 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1057 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1060 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1061 struct tc_ct *parm,
1062 struct nlattr **tb,
1063 struct netlink_ext_ack *extack)
1065 struct nf_nat_range2 *range;
1067 if (!(p->ct_action & TCA_CT_ACT_NAT))
1068 return 0;
1070 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1071 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1072 return -EOPNOTSUPP;
1075 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1076 return 0;
1078 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1079 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1080 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1081 return -EOPNOTSUPP;
1084 range = &p->range;
1085 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1086 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1088 p->ipv4_range = true;
1089 range->flags |= NF_NAT_RANGE_MAP_IPS;
1090 range->min_addr.ip =
1091 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1093 range->max_addr.ip = max_attr ?
1094 nla_get_in_addr(max_attr) :
1095 range->min_addr.ip;
1096 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1097 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1099 p->ipv4_range = false;
1100 range->flags |= NF_NAT_RANGE_MAP_IPS;
1101 range->min_addr.in6 =
1102 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1104 range->max_addr.in6 = max_attr ?
1105 nla_get_in6_addr(max_attr) :
1106 range->min_addr.in6;
1109 if (tb[TCA_CT_NAT_PORT_MIN]) {
1110 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1111 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1113 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1114 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1115 range->min_proto.all;
1118 return 0;
1121 static void tcf_ct_set_key_val(struct nlattr **tb,
1122 void *val, int val_type,
1123 void *mask, int mask_type,
1124 int len)
1126 if (!tb[val_type])
1127 return;
1128 nla_memcpy(val, tb[val_type], len);
1130 if (!mask)
1131 return;
1133 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1134 memset(mask, 0xff, len);
1135 else
1136 nla_memcpy(mask, tb[mask_type], len);
1139 static int tcf_ct_fill_params(struct net *net,
1140 struct tcf_ct_params *p,
1141 struct tc_ct *parm,
1142 struct nlattr **tb,
1143 struct netlink_ext_ack *extack)
1145 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1146 struct nf_conntrack_zone zone;
1147 struct nf_conn *tmpl;
1148 int err;
1150 p->zone = NF_CT_DEFAULT_ZONE_ID;
1152 tcf_ct_set_key_val(tb,
1153 &p->ct_action, TCA_CT_ACTION,
1154 NULL, TCA_CT_UNSPEC,
1155 sizeof(p->ct_action));
1157 if (p->ct_action & TCA_CT_ACT_CLEAR)
1158 return 0;
1160 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1161 if (err)
1162 return err;
1164 if (tb[TCA_CT_MARK]) {
1165 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1166 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1167 return -EOPNOTSUPP;
1169 tcf_ct_set_key_val(tb,
1170 &p->mark, TCA_CT_MARK,
1171 &p->mark_mask, TCA_CT_MARK_MASK,
1172 sizeof(p->mark));
1175 if (tb[TCA_CT_LABELS]) {
1176 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1177 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1178 return -EOPNOTSUPP;
1181 if (!tn->labels) {
1182 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1183 return -EOPNOTSUPP;
1185 tcf_ct_set_key_val(tb,
1186 p->labels, TCA_CT_LABELS,
1187 p->labels_mask, TCA_CT_LABELS_MASK,
1188 sizeof(p->labels));
1191 if (tb[TCA_CT_ZONE]) {
1192 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1193 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1194 return -EOPNOTSUPP;
1197 tcf_ct_set_key_val(tb,
1198 &p->zone, TCA_CT_ZONE,
1199 NULL, TCA_CT_UNSPEC,
1200 sizeof(p->zone));
1203 if (p->zone == NF_CT_DEFAULT_ZONE_ID)
1204 return 0;
1206 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1207 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1208 if (!tmpl) {
1209 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1210 return -ENOMEM;
1212 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1213 nf_conntrack_get(&tmpl->ct_general);
1214 p->tmpl = tmpl;
1216 return 0;
1219 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1220 struct nlattr *est, struct tc_action **a,
1221 int replace, int bind, bool rtnl_held,
1222 struct tcf_proto *tp, u32 flags,
1223 struct netlink_ext_ack *extack)
1225 struct tc_action_net *tn = net_generic(net, ct_net_id);
1226 struct tcf_ct_params *params = NULL;
1227 struct nlattr *tb[TCA_CT_MAX + 1];
1228 struct tcf_chain *goto_ch = NULL;
1229 struct tc_ct *parm;
1230 struct tcf_ct *c;
1231 int err, res = 0;
1232 u32 index;
1234 if (!nla) {
1235 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1236 return -EINVAL;
1239 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1240 if (err < 0)
1241 return err;
1243 if (!tb[TCA_CT_PARMS]) {
1244 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1245 return -EINVAL;
1247 parm = nla_data(tb[TCA_CT_PARMS]);
1248 index = parm->index;
1249 err = tcf_idr_check_alloc(tn, &index, a, bind);
1250 if (err < 0)
1251 return err;
1253 if (!err) {
1254 err = tcf_idr_create_from_flags(tn, index, est, a,
1255 &act_ct_ops, bind, flags);
1256 if (err) {
1257 tcf_idr_cleanup(tn, index);
1258 return err;
1260 res = ACT_P_CREATED;
1261 } else {
1262 if (bind)
1263 return 0;
1265 if (!replace) {
1266 tcf_idr_release(*a, bind);
1267 return -EEXIST;
1270 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1271 if (err < 0)
1272 goto cleanup;
1274 c = to_ct(*a);
1276 params = kzalloc(sizeof(*params), GFP_KERNEL);
1277 if (unlikely(!params)) {
1278 err = -ENOMEM;
1279 goto cleanup;
1282 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1283 if (err)
1284 goto cleanup;
1286 err = tcf_ct_flow_table_get(params);
1287 if (err)
1288 goto cleanup;
1290 spin_lock_bh(&c->tcf_lock);
1291 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1292 params = rcu_replace_pointer(c->params, params,
1293 lockdep_is_held(&c->tcf_lock));
1294 spin_unlock_bh(&c->tcf_lock);
1296 if (goto_ch)
1297 tcf_chain_put_by_act(goto_ch);
1298 if (params)
1299 call_rcu(&params->rcu, tcf_ct_params_free);
1301 return res;
1303 cleanup:
1304 if (goto_ch)
1305 tcf_chain_put_by_act(goto_ch);
1306 kfree(params);
1307 tcf_idr_release(*a, bind);
1308 return err;
1311 static void tcf_ct_cleanup(struct tc_action *a)
1313 struct tcf_ct_params *params;
1314 struct tcf_ct *c = to_ct(a);
1316 params = rcu_dereference_protected(c->params, 1);
1317 if (params)
1318 call_rcu(&params->rcu, tcf_ct_params_free);
1321 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1322 void *val, int val_type,
1323 void *mask, int mask_type,
1324 int len)
1326 int err;
1328 if (mask && !memchr_inv(mask, 0, len))
1329 return 0;
1331 err = nla_put(skb, val_type, len, val);
1332 if (err)
1333 return err;
1335 if (mask_type != TCA_CT_UNSPEC) {
1336 err = nla_put(skb, mask_type, len, mask);
1337 if (err)
1338 return err;
1341 return 0;
1344 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1346 struct nf_nat_range2 *range = &p->range;
1348 if (!(p->ct_action & TCA_CT_ACT_NAT))
1349 return 0;
1351 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1352 return 0;
1354 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1355 if (p->ipv4_range) {
1356 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1357 range->min_addr.ip))
1358 return -1;
1359 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1360 range->max_addr.ip))
1361 return -1;
1362 } else {
1363 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1364 &range->min_addr.in6))
1365 return -1;
1366 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1367 &range->max_addr.in6))
1368 return -1;
1372 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1373 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1374 range->min_proto.all))
1375 return -1;
1376 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1377 range->max_proto.all))
1378 return -1;
1381 return 0;
1384 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1385 int bind, int ref)
1387 unsigned char *b = skb_tail_pointer(skb);
1388 struct tcf_ct *c = to_ct(a);
1389 struct tcf_ct_params *p;
1391 struct tc_ct opt = {
1392 .index = c->tcf_index,
1393 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1394 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1396 struct tcf_t t;
1398 spin_lock_bh(&c->tcf_lock);
1399 p = rcu_dereference_protected(c->params,
1400 lockdep_is_held(&c->tcf_lock));
1401 opt.action = c->tcf_action;
1403 if (tcf_ct_dump_key_val(skb,
1404 &p->ct_action, TCA_CT_ACTION,
1405 NULL, TCA_CT_UNSPEC,
1406 sizeof(p->ct_action)))
1407 goto nla_put_failure;
1409 if (p->ct_action & TCA_CT_ACT_CLEAR)
1410 goto skip_dump;
1412 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1413 tcf_ct_dump_key_val(skb,
1414 &p->mark, TCA_CT_MARK,
1415 &p->mark_mask, TCA_CT_MARK_MASK,
1416 sizeof(p->mark)))
1417 goto nla_put_failure;
1419 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1420 tcf_ct_dump_key_val(skb,
1421 p->labels, TCA_CT_LABELS,
1422 p->labels_mask, TCA_CT_LABELS_MASK,
1423 sizeof(p->labels)))
1424 goto nla_put_failure;
1426 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1427 tcf_ct_dump_key_val(skb,
1428 &p->zone, TCA_CT_ZONE,
1429 NULL, TCA_CT_UNSPEC,
1430 sizeof(p->zone)))
1431 goto nla_put_failure;
1433 if (tcf_ct_dump_nat(skb, p))
1434 goto nla_put_failure;
1436 skip_dump:
1437 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1438 goto nla_put_failure;
1440 tcf_tm_dump(&t, &c->tcf_tm);
1441 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1442 goto nla_put_failure;
1443 spin_unlock_bh(&c->tcf_lock);
1445 return skb->len;
1446 nla_put_failure:
1447 spin_unlock_bh(&c->tcf_lock);
1448 nlmsg_trim(skb, b);
1449 return -1;
1452 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1453 struct netlink_callback *cb, int type,
1454 const struct tc_action_ops *ops,
1455 struct netlink_ext_ack *extack)
1457 struct tc_action_net *tn = net_generic(net, ct_net_id);
1459 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1462 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1464 struct tc_action_net *tn = net_generic(net, ct_net_id);
1466 return tcf_idr_search(tn, a, index);
1469 static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1470 u64 drops, u64 lastuse, bool hw)
1472 struct tcf_ct *c = to_ct(a);
1474 tcf_action_update_stats(a, bytes, packets, drops, hw);
1475 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1478 static struct tc_action_ops act_ct_ops = {
1479 .kind = "ct",
1480 .id = TCA_ID_CT,
1481 .owner = THIS_MODULE,
1482 .act = tcf_ct_act,
1483 .dump = tcf_ct_dump,
1484 .init = tcf_ct_init,
1485 .cleanup = tcf_ct_cleanup,
1486 .walk = tcf_ct_walker,
1487 .lookup = tcf_ct_search,
1488 .stats_update = tcf_stats_update,
1489 .size = sizeof(struct tcf_ct),
1492 static __net_init int ct_init_net(struct net *net)
1494 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1495 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1497 if (nf_connlabels_get(net, n_bits - 1)) {
1498 tn->labels = false;
1499 pr_err("act_ct: Failed to set connlabels length");
1500 } else {
1501 tn->labels = true;
1504 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1507 static void __net_exit ct_exit_net(struct list_head *net_list)
1509 struct net *net;
1511 rtnl_lock();
1512 list_for_each_entry(net, net_list, exit_list) {
1513 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1515 if (tn->labels)
1516 nf_connlabels_put(net);
1518 rtnl_unlock();
1520 tc_action_net_exit(net_list, ct_net_id);
1523 static struct pernet_operations ct_net_ops = {
1524 .init = ct_init_net,
1525 .exit_batch = ct_exit_net,
1526 .id = &ct_net_id,
1527 .size = sizeof(struct tc_ct_action_net),
1530 static int __init ct_init_module(void)
1532 int err;
1534 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1535 if (!act_ct_wq)
1536 return -ENOMEM;
1538 err = tcf_ct_flow_tables_init();
1539 if (err)
1540 goto err_tbl_init;
1542 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1543 if (err)
1544 goto err_register;
1546 static_branch_inc(&tcf_frag_xmit_count);
1548 return 0;
1550 err_register:
1551 tcf_ct_flow_tables_uninit();
1552 err_tbl_init:
1553 destroy_workqueue(act_ct_wq);
1554 return err;
1557 static void __exit ct_cleanup_module(void)
1559 static_branch_dec(&tcf_frag_xmit_count);
1560 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1561 tcf_ct_flow_tables_uninit();
1562 destroy_workqueue(act_ct_wq);
1565 module_init(ct_init_module);
1566 module_exit(ct_cleanup_module);
1567 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1568 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1569 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1570 MODULE_DESCRIPTION("Connection tracking action");
1571 MODULE_LICENSE("GPL v2");