Linux 5.7.7
[linux/fpc-iii.git] / net / sched / act_ct.c
blob20577355235a65769f71555ba357c4a027e5c5bc
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* -
3 * net/sched/act_ct.c Connection Tracking action
5 * Authors: Paul Blakey <paulb@mellanox.com>
6 * Yossi Kuperman <yossiku@mellanox.com>
7 * Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/skbuff.h>
14 #include <linux/rtnetlink.h>
15 #include <linux/pkt_cls.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/rhashtable.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/act_api.h>
23 #include <net/ip.h>
24 #include <net/ipv6_frag.h>
25 #include <uapi/linux/tc_act/tc_ct.h>
26 #include <net/tc_act/tc_ct.h>
28 #include <net/netfilter/nf_flow_table.h>
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_zones.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
34 #include <uapi/linux/netfilter/nf_nat.h>
36 static struct workqueue_struct *act_ct_wq;
37 static struct rhashtable zones_ht;
38 static DEFINE_MUTEX(zones_mutex);
40 struct tcf_ct_flow_table {
41 struct rhash_head node; /* In zones tables */
43 struct rcu_work rwork;
44 struct nf_flowtable nf_ft;
45 refcount_t ref;
46 u16 zone;
48 bool dying;
51 static const struct rhashtable_params zones_params = {
52 .head_offset = offsetof(struct tcf_ct_flow_table, node),
53 .key_offset = offsetof(struct tcf_ct_flow_table, zone),
54 .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
55 .automatic_shrinking = true,
58 static struct flow_action_entry *
59 tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61 int i = flow_action->num_entries++;
63 return &flow_action->entries[i];
66 static void tcf_ct_add_mangle_action(struct flow_action *action,
67 enum flow_action_mangle_base htype,
68 u32 offset,
69 u32 mask,
70 u32 val)
72 struct flow_action_entry *entry;
74 entry = tcf_ct_flow_table_flow_action_get_next(action);
75 entry->id = FLOW_ACTION_MANGLE;
76 entry->mangle.htype = htype;
77 entry->mangle.mask = ~mask;
78 entry->mangle.offset = offset;
79 entry->mangle.val = val;
82 /* The following nat helper functions check if the inverted reverse tuple
83 * (target) is different then the current dir tuple - meaning nat for ports
84 * and/or ip is needed, and add the relevant mangle actions.
86 static void
87 tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
88 struct nf_conntrack_tuple target,
89 struct flow_action *action)
91 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
92 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
93 offsetof(struct iphdr, saddr),
94 0xFFFFFFFF,
95 be32_to_cpu(target.src.u3.ip));
96 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
97 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
98 offsetof(struct iphdr, daddr),
99 0xFFFFFFFF,
100 be32_to_cpu(target.dst.u3.ip));
103 static void
104 tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
105 union nf_inet_addr *addr,
106 u32 offset)
108 int i;
110 for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
111 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
112 i * sizeof(u32) + offset,
113 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
116 static void
117 tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
118 struct nf_conntrack_tuple target,
119 struct flow_action *action)
121 if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
122 tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
123 offsetof(struct ipv6hdr,
124 saddr));
125 if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
126 tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
127 offsetof(struct ipv6hdr,
128 daddr));
131 static void
132 tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
133 struct nf_conntrack_tuple target,
134 struct flow_action *action)
136 __be16 target_src = target.src.u.tcp.port;
137 __be16 target_dst = target.dst.u.tcp.port;
139 if (target_src != tuple->src.u.tcp.port)
140 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
141 offsetof(struct tcphdr, source),
142 0xFFFF, be16_to_cpu(target_src));
143 if (target_dst != tuple->dst.u.tcp.port)
144 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
145 offsetof(struct tcphdr, dest),
146 0xFFFF, be16_to_cpu(target_dst));
149 static void
150 tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
151 struct nf_conntrack_tuple target,
152 struct flow_action *action)
154 __be16 target_src = target.src.u.udp.port;
155 __be16 target_dst = target.dst.u.udp.port;
157 if (target_src != tuple->src.u.udp.port)
158 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
159 offsetof(struct udphdr, source),
160 0xFFFF, be16_to_cpu(target_src));
161 if (target_dst != tuple->dst.u.udp.port)
162 tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
163 offsetof(struct udphdr, dest),
164 0xFFFF, be16_to_cpu(target_dst));
167 static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
168 enum ip_conntrack_dir dir,
169 struct flow_action *action)
171 struct nf_conn_labels *ct_labels;
172 struct flow_action_entry *entry;
173 enum ip_conntrack_info ctinfo;
174 u32 *act_ct_labels;
176 entry = tcf_ct_flow_table_flow_action_get_next(action);
177 entry->id = FLOW_ACTION_CT_METADATA;
178 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
179 entry->ct_metadata.mark = ct->mark;
180 #endif
181 ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
182 IP_CT_ESTABLISHED_REPLY;
183 /* aligns with the CT reference on the SKB nf_ct_set */
184 entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186 act_ct_labels = entry->ct_metadata.labels;
187 ct_labels = nf_ct_labels_find(ct);
188 if (ct_labels)
189 memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
190 else
191 memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
194 static int tcf_ct_flow_table_add_action_nat(struct net *net,
195 struct nf_conn *ct,
196 enum ip_conntrack_dir dir,
197 struct flow_action *action)
199 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
200 struct nf_conntrack_tuple target;
202 if (!(ct->status & IPS_NAT_MASK))
203 return 0;
205 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
207 switch (tuple->src.l3num) {
208 case NFPROTO_IPV4:
209 tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
210 action);
211 break;
212 case NFPROTO_IPV6:
213 tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
214 action);
215 break;
216 default:
217 return -EOPNOTSUPP;
220 switch (nf_ct_protonum(ct)) {
221 case IPPROTO_TCP:
222 tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
223 break;
224 case IPPROTO_UDP:
225 tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
226 break;
227 default:
228 return -EOPNOTSUPP;
231 return 0;
234 static int tcf_ct_flow_table_fill_actions(struct net *net,
235 const struct flow_offload *flow,
236 enum flow_offload_tuple_dir tdir,
237 struct nf_flow_rule *flow_rule)
239 struct flow_action *action = &flow_rule->rule->action;
240 int num_entries = action->num_entries;
241 struct nf_conn *ct = flow->ct;
242 enum ip_conntrack_dir dir;
243 int i, err;
245 switch (tdir) {
246 case FLOW_OFFLOAD_DIR_ORIGINAL:
247 dir = IP_CT_DIR_ORIGINAL;
248 break;
249 case FLOW_OFFLOAD_DIR_REPLY:
250 dir = IP_CT_DIR_REPLY;
251 break;
252 default:
253 return -EOPNOTSUPP;
256 err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
257 if (err)
258 goto err_nat;
260 tcf_ct_flow_table_add_action_meta(ct, dir, action);
261 return 0;
263 err_nat:
264 /* Clear filled actions */
265 for (i = num_entries; i < action->num_entries; i++)
266 memset(&action->entries[i], 0, sizeof(action->entries[i]));
267 action->num_entries = num_entries;
269 return err;
272 static struct nf_flowtable_type flowtable_ct = {
273 .action = tcf_ct_flow_table_fill_actions,
274 .owner = THIS_MODULE,
277 static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
279 struct tcf_ct_flow_table *ct_ft;
280 int err = -ENOMEM;
282 mutex_lock(&zones_mutex);
283 ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
284 if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
285 goto out_unlock;
287 ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
288 if (!ct_ft)
289 goto err_alloc;
290 refcount_set(&ct_ft->ref, 1);
292 ct_ft->zone = params->zone;
293 err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
294 if (err)
295 goto err_insert;
297 ct_ft->nf_ft.type = &flowtable_ct;
298 ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
299 err = nf_flow_table_init(&ct_ft->nf_ft);
300 if (err)
301 goto err_init;
303 __module_get(THIS_MODULE);
304 out_unlock:
305 params->ct_ft = ct_ft;
306 params->nf_ft = &ct_ft->nf_ft;
307 mutex_unlock(&zones_mutex);
309 return 0;
311 err_init:
312 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
313 err_insert:
314 kfree(ct_ft);
315 err_alloc:
316 mutex_unlock(&zones_mutex);
317 return err;
320 static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
322 struct tcf_ct_flow_table *ct_ft;
324 ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
325 rwork);
326 nf_flow_table_free(&ct_ft->nf_ft);
327 kfree(ct_ft);
329 module_put(THIS_MODULE);
332 static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
334 struct tcf_ct_flow_table *ct_ft = params->ct_ft;
336 if (refcount_dec_and_test(&params->ct_ft->ref)) {
337 rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
338 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
339 queue_rcu_work(act_ct_wq, &ct_ft->rwork);
343 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
344 struct nf_conn *ct,
345 bool tcp)
347 struct flow_offload *entry;
348 int err;
350 if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
351 return;
353 entry = flow_offload_alloc(ct);
354 if (!entry) {
355 WARN_ON_ONCE(1);
356 goto err_alloc;
359 if (tcp) {
360 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
361 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
364 err = flow_offload_add(&ct_ft->nf_ft, entry);
365 if (err)
366 goto err_add;
368 return;
370 err_add:
371 flow_offload_free(entry);
372 err_alloc:
373 clear_bit(IPS_OFFLOAD_BIT, &ct->status);
376 static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
377 struct nf_conn *ct,
378 enum ip_conntrack_info ctinfo)
380 bool tcp = false;
382 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
383 return;
385 switch (nf_ct_protonum(ct)) {
386 case IPPROTO_TCP:
387 tcp = true;
388 if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
389 return;
390 break;
391 case IPPROTO_UDP:
392 break;
393 default:
394 return;
397 if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
398 ct->status & IPS_SEQ_ADJUST)
399 return;
401 tcf_ct_flow_table_add(ct_ft, ct, tcp);
404 static bool
405 tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
406 struct flow_offload_tuple *tuple,
407 struct tcphdr **tcph)
409 struct flow_ports *ports;
410 unsigned int thoff;
411 struct iphdr *iph;
413 if (!pskb_network_may_pull(skb, sizeof(*iph)))
414 return false;
416 iph = ip_hdr(skb);
417 thoff = iph->ihl * 4;
419 if (ip_is_fragment(iph) ||
420 unlikely(thoff != sizeof(struct iphdr)))
421 return false;
423 if (iph->protocol != IPPROTO_TCP &&
424 iph->protocol != IPPROTO_UDP)
425 return false;
427 if (iph->ttl <= 1)
428 return false;
430 if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
431 thoff + sizeof(struct tcphdr) :
432 thoff + sizeof(*ports)))
433 return false;
435 iph = ip_hdr(skb);
436 if (iph->protocol == IPPROTO_TCP)
437 *tcph = (void *)(skb_network_header(skb) + thoff);
439 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
440 tuple->src_v4.s_addr = iph->saddr;
441 tuple->dst_v4.s_addr = iph->daddr;
442 tuple->src_port = ports->source;
443 tuple->dst_port = ports->dest;
444 tuple->l3proto = AF_INET;
445 tuple->l4proto = iph->protocol;
447 return true;
450 static bool
451 tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
452 struct flow_offload_tuple *tuple,
453 struct tcphdr **tcph)
455 struct flow_ports *ports;
456 struct ipv6hdr *ip6h;
457 unsigned int thoff;
459 if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
460 return false;
462 ip6h = ipv6_hdr(skb);
464 if (ip6h->nexthdr != IPPROTO_TCP &&
465 ip6h->nexthdr != IPPROTO_UDP)
466 return false;
468 if (ip6h->hop_limit <= 1)
469 return false;
471 thoff = sizeof(*ip6h);
472 if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
473 thoff + sizeof(struct tcphdr) :
474 thoff + sizeof(*ports)))
475 return false;
477 ip6h = ipv6_hdr(skb);
478 if (ip6h->nexthdr == IPPROTO_TCP)
479 *tcph = (void *)(skb_network_header(skb) + thoff);
481 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
482 tuple->src_v6 = ip6h->saddr;
483 tuple->dst_v6 = ip6h->daddr;
484 tuple->src_port = ports->source;
485 tuple->dst_port = ports->dest;
486 tuple->l3proto = AF_INET6;
487 tuple->l4proto = ip6h->nexthdr;
489 return true;
492 static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
493 struct sk_buff *skb,
494 u8 family)
496 struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
497 struct flow_offload_tuple_rhash *tuplehash;
498 struct flow_offload_tuple tuple = {};
499 enum ip_conntrack_info ctinfo;
500 struct tcphdr *tcph = NULL;
501 struct flow_offload *flow;
502 struct nf_conn *ct;
503 u8 dir;
505 /* Previously seen or loopback */
506 ct = nf_ct_get(skb, &ctinfo);
507 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
508 return false;
510 switch (family) {
511 case NFPROTO_IPV4:
512 if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
513 return false;
514 break;
515 case NFPROTO_IPV6:
516 if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
517 return false;
518 break;
519 default:
520 return false;
523 tuplehash = flow_offload_lookup(nf_ft, &tuple);
524 if (!tuplehash)
525 return false;
527 dir = tuplehash->tuple.dir;
528 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
529 ct = flow->ct;
531 if (tcph && (unlikely(tcph->fin || tcph->rst))) {
532 flow_offload_teardown(flow);
533 return false;
536 ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
537 IP_CT_ESTABLISHED_REPLY;
539 flow_offload_refresh(nf_ft, flow);
540 nf_conntrack_get(&ct->ct_general);
541 nf_ct_set(skb, ct, ctinfo);
543 return true;
546 static int tcf_ct_flow_tables_init(void)
548 return rhashtable_init(&zones_ht, &zones_params);
551 static void tcf_ct_flow_tables_uninit(void)
553 rhashtable_destroy(&zones_ht);
556 static struct tc_action_ops act_ct_ops;
557 static unsigned int ct_net_id;
559 struct tc_ct_action_net {
560 struct tc_action_net tn; /* Must be first */
561 bool labels;
564 /* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
565 static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
566 u16 zone_id, bool force)
568 enum ip_conntrack_info ctinfo;
569 struct nf_conn *ct;
571 ct = nf_ct_get(skb, &ctinfo);
572 if (!ct)
573 return false;
574 if (!net_eq(net, read_pnet(&ct->ct_net)))
575 return false;
576 if (nf_ct_zone(ct)->id != zone_id)
577 return false;
579 /* Force conntrack entry direction. */
580 if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
581 if (nf_ct_is_confirmed(ct))
582 nf_ct_kill(ct);
584 nf_conntrack_put(&ct->ct_general);
585 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
587 return false;
590 return true;
593 /* Trim the skb to the length specified by the IP/IPv6 header,
594 * removing any trailing lower-layer padding. This prepares the skb
595 * for higher-layer processing that assumes skb->len excludes padding
596 * (such as nf_ip_checksum). The caller needs to pull the skb to the
597 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
599 static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
601 unsigned int len;
602 int err;
604 switch (family) {
605 case NFPROTO_IPV4:
606 len = ntohs(ip_hdr(skb)->tot_len);
607 break;
608 case NFPROTO_IPV6:
609 len = sizeof(struct ipv6hdr)
610 + ntohs(ipv6_hdr(skb)->payload_len);
611 break;
612 default:
613 len = skb->len;
616 err = pskb_trim_rcsum(skb, len);
618 return err;
621 static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
623 u8 family = NFPROTO_UNSPEC;
625 switch (skb->protocol) {
626 case htons(ETH_P_IP):
627 family = NFPROTO_IPV4;
628 break;
629 case htons(ETH_P_IPV6):
630 family = NFPROTO_IPV6;
631 break;
632 default:
633 break;
636 return family;
639 static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
641 unsigned int len;
643 len = skb_network_offset(skb) + sizeof(struct iphdr);
644 if (unlikely(skb->len < len))
645 return -EINVAL;
646 if (unlikely(!pskb_may_pull(skb, len)))
647 return -ENOMEM;
649 *frag = ip_is_fragment(ip_hdr(skb));
650 return 0;
653 static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
655 unsigned int flags = 0, len, payload_ofs = 0;
656 unsigned short frag_off;
657 int nexthdr;
659 len = skb_network_offset(skb) + sizeof(struct ipv6hdr);
660 if (unlikely(skb->len < len))
661 return -EINVAL;
662 if (unlikely(!pskb_may_pull(skb, len)))
663 return -ENOMEM;
665 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
666 if (unlikely(nexthdr < 0))
667 return -EPROTO;
669 *frag = flags & IP6_FH_F_FRAG;
670 return 0;
673 static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
674 u8 family, u16 zone)
676 enum ip_conntrack_info ctinfo;
677 struct nf_conn *ct;
678 int err = 0;
679 bool frag;
681 /* Previously seen (loopback)? Ignore. */
682 ct = nf_ct_get(skb, &ctinfo);
683 if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
684 return 0;
686 if (family == NFPROTO_IPV4)
687 err = tcf_ct_ipv4_is_fragment(skb, &frag);
688 else
689 err = tcf_ct_ipv6_is_fragment(skb, &frag);
690 if (err || !frag)
691 return err;
693 skb_get(skb);
695 if (family == NFPROTO_IPV4) {
696 enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
698 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
699 local_bh_disable();
700 err = ip_defrag(net, skb, user);
701 local_bh_enable();
702 if (err && err != -EINPROGRESS)
703 goto out_free;
704 } else { /* NFPROTO_IPV6 */
705 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
706 enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
708 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
709 err = nf_ct_frag6_gather(net, skb, user);
710 if (err && err != -EINPROGRESS)
711 goto out_free;
712 #else
713 err = -EOPNOTSUPP;
714 goto out_free;
715 #endif
718 skb_clear_hash(skb);
719 skb->ignore_df = 1;
720 return err;
722 out_free:
723 kfree_skb(skb);
724 return err;
727 static void tcf_ct_params_free(struct rcu_head *head)
729 struct tcf_ct_params *params = container_of(head,
730 struct tcf_ct_params, rcu);
732 tcf_ct_flow_table_put(params);
734 if (params->tmpl)
735 nf_conntrack_put(&params->tmpl->ct_general);
736 kfree(params);
739 #if IS_ENABLED(CONFIG_NF_NAT)
740 /* Modelled after nf_nat_ipv[46]_fn().
741 * range is only used for new, uninitialized NAT state.
742 * Returns either NF_ACCEPT or NF_DROP.
744 static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
745 enum ip_conntrack_info ctinfo,
746 const struct nf_nat_range2 *range,
747 enum nf_nat_manip_type maniptype)
749 int hooknum, err = NF_ACCEPT;
751 /* See HOOK2MANIP(). */
752 if (maniptype == NF_NAT_MANIP_SRC)
753 hooknum = NF_INET_LOCAL_IN; /* Source NAT */
754 else
755 hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
757 switch (ctinfo) {
758 case IP_CT_RELATED:
759 case IP_CT_RELATED_REPLY:
760 if (skb->protocol == htons(ETH_P_IP) &&
761 ip_hdr(skb)->protocol == IPPROTO_ICMP) {
762 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
763 hooknum))
764 err = NF_DROP;
765 goto out;
766 } else if (IS_ENABLED(CONFIG_IPV6) &&
767 skb->protocol == htons(ETH_P_IPV6)) {
768 __be16 frag_off;
769 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
770 int hdrlen = ipv6_skip_exthdr(skb,
771 sizeof(struct ipv6hdr),
772 &nexthdr, &frag_off);
774 if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
775 if (!nf_nat_icmpv6_reply_translation(skb, ct,
776 ctinfo,
777 hooknum,
778 hdrlen))
779 err = NF_DROP;
780 goto out;
783 /* Non-ICMP, fall thru to initialize if needed. */
784 /* fall through */
785 case IP_CT_NEW:
786 /* Seen it before? This can happen for loopback, retrans,
787 * or local packets.
789 if (!nf_nat_initialized(ct, maniptype)) {
790 /* Initialize according to the NAT action. */
791 err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
792 /* Action is set up to establish a new
793 * mapping.
795 ? nf_nat_setup_info(ct, range, maniptype)
796 : nf_nat_alloc_null_binding(ct, hooknum);
797 if (err != NF_ACCEPT)
798 goto out;
800 break;
802 case IP_CT_ESTABLISHED:
803 case IP_CT_ESTABLISHED_REPLY:
804 break;
806 default:
807 err = NF_DROP;
808 goto out;
811 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
812 out:
813 return err;
815 #endif /* CONFIG_NF_NAT */
817 static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
819 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
820 u32 new_mark;
822 if (!mask)
823 return;
825 new_mark = mark | (ct->mark & ~(mask));
826 if (ct->mark != new_mark) {
827 ct->mark = new_mark;
828 if (nf_ct_is_confirmed(ct))
829 nf_conntrack_event_cache(IPCT_MARK, ct);
831 #endif
834 static void tcf_ct_act_set_labels(struct nf_conn *ct,
835 u32 *labels,
836 u32 *labels_m)
838 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
839 size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
841 if (!memchr_inv(labels_m, 0, labels_sz))
842 return;
844 nf_connlabels_replace(ct, labels, labels_m, 4);
845 #endif
848 static int tcf_ct_act_nat(struct sk_buff *skb,
849 struct nf_conn *ct,
850 enum ip_conntrack_info ctinfo,
851 int ct_action,
852 struct nf_nat_range2 *range,
853 bool commit)
855 #if IS_ENABLED(CONFIG_NF_NAT)
856 int err;
857 enum nf_nat_manip_type maniptype;
859 if (!(ct_action & TCA_CT_ACT_NAT))
860 return NF_ACCEPT;
862 /* Add NAT extension if not confirmed yet. */
863 if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
864 return NF_DROP; /* Can't NAT. */
866 if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
867 (ctinfo != IP_CT_RELATED || commit)) {
868 /* NAT an established or related connection like before. */
869 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
870 /* This is the REPLY direction for a connection
871 * for which NAT was applied in the forward
872 * direction. Do the reverse NAT.
874 maniptype = ct->status & IPS_SRC_NAT
875 ? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
876 else
877 maniptype = ct->status & IPS_SRC_NAT
878 ? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
879 } else if (ct_action & TCA_CT_ACT_NAT_SRC) {
880 maniptype = NF_NAT_MANIP_SRC;
881 } else if (ct_action & TCA_CT_ACT_NAT_DST) {
882 maniptype = NF_NAT_MANIP_DST;
883 } else {
884 return NF_ACCEPT;
887 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
888 if (err == NF_ACCEPT &&
889 ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
890 if (maniptype == NF_NAT_MANIP_SRC)
891 maniptype = NF_NAT_MANIP_DST;
892 else
893 maniptype = NF_NAT_MANIP_SRC;
895 err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
897 return err;
898 #else
899 return NF_ACCEPT;
900 #endif
903 static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
904 struct tcf_result *res)
906 struct net *net = dev_net(skb->dev);
907 bool cached, commit, clear, force;
908 enum ip_conntrack_info ctinfo;
909 struct tcf_ct *c = to_ct(a);
910 struct nf_conn *tmpl = NULL;
911 struct nf_hook_state state;
912 int nh_ofs, err, retval;
913 struct tcf_ct_params *p;
914 bool skip_add = false;
915 struct nf_conn *ct;
916 u8 family;
918 p = rcu_dereference_bh(c->params);
920 retval = READ_ONCE(c->tcf_action);
921 commit = p->ct_action & TCA_CT_ACT_COMMIT;
922 clear = p->ct_action & TCA_CT_ACT_CLEAR;
923 force = p->ct_action & TCA_CT_ACT_FORCE;
924 tmpl = p->tmpl;
926 if (clear) {
927 ct = nf_ct_get(skb, &ctinfo);
928 if (ct) {
929 nf_conntrack_put(&ct->ct_general);
930 nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
933 goto out;
936 family = tcf_ct_skb_nf_family(skb);
937 if (family == NFPROTO_UNSPEC)
938 goto drop;
940 /* The conntrack module expects to be working at L3.
941 * We also try to pull the IPv4/6 header to linear area
943 nh_ofs = skb_network_offset(skb);
944 skb_pull_rcsum(skb, nh_ofs);
945 err = tcf_ct_handle_fragments(net, skb, family, p->zone);
946 if (err == -EINPROGRESS) {
947 retval = TC_ACT_STOLEN;
948 goto out;
950 if (err)
951 goto drop;
953 err = tcf_ct_skb_network_trim(skb, family);
954 if (err)
955 goto drop;
957 /* If we are recirculating packets to match on ct fields and
958 * committing with a separate ct action, then we don't need to
959 * actually run the packet through conntrack twice unless it's for a
960 * different zone.
962 cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
963 if (!cached) {
964 if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) {
965 skip_add = true;
966 goto do_nat;
969 /* Associate skb with specified zone. */
970 if (tmpl) {
971 ct = nf_ct_get(skb, &ctinfo);
972 if (skb_nfct(skb))
973 nf_conntrack_put(skb_nfct(skb));
974 nf_conntrack_get(&tmpl->ct_general);
975 nf_ct_set(skb, tmpl, IP_CT_NEW);
978 state.hook = NF_INET_PRE_ROUTING;
979 state.net = net;
980 state.pf = family;
981 err = nf_conntrack_in(skb, &state);
982 if (err != NF_ACCEPT)
983 goto out_push;
986 do_nat:
987 ct = nf_ct_get(skb, &ctinfo);
988 if (!ct)
989 goto out_push;
990 nf_ct_deliver_cached_events(ct);
992 err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
993 if (err != NF_ACCEPT)
994 goto drop;
996 if (commit) {
997 tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
998 tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1000 /* This will take care of sending queued events
1001 * even if the connection is already confirmed.
1003 nf_conntrack_confirm(skb);
1004 } else if (!skip_add) {
1005 tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1008 out_push:
1009 skb_push_rcsum(skb, nh_ofs);
1011 out:
1012 tcf_action_update_bstats(&c->common, skb);
1013 return retval;
1015 drop:
1016 tcf_action_inc_drop_qstats(&c->common);
1017 return TC_ACT_SHOT;
1020 static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1021 [TCA_CT_ACTION] = { .type = NLA_U16 },
1022 [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) },
1023 [TCA_CT_ZONE] = { .type = NLA_U16 },
1024 [TCA_CT_MARK] = { .type = NLA_U32 },
1025 [TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1026 [TCA_CT_LABELS] = { .type = NLA_BINARY,
1027 .len = 128 / BITS_PER_BYTE },
1028 [TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1029 .len = 128 / BITS_PER_BYTE },
1030 [TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1031 [TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1032 [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN,
1033 .len = sizeof(struct in6_addr) },
1034 [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN,
1035 .len = sizeof(struct in6_addr) },
1036 [TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1037 [TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1040 static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1041 struct tc_ct *parm,
1042 struct nlattr **tb,
1043 struct netlink_ext_ack *extack)
1045 struct nf_nat_range2 *range;
1047 if (!(p->ct_action & TCA_CT_ACT_NAT))
1048 return 0;
1050 if (!IS_ENABLED(CONFIG_NF_NAT)) {
1051 NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1052 return -EOPNOTSUPP;
1055 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1056 return 0;
1058 if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1059 (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1060 NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1061 return -EOPNOTSUPP;
1064 range = &p->range;
1065 if (tb[TCA_CT_NAT_IPV4_MIN]) {
1066 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1068 p->ipv4_range = true;
1069 range->flags |= NF_NAT_RANGE_MAP_IPS;
1070 range->min_addr.ip =
1071 nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1073 range->max_addr.ip = max_attr ?
1074 nla_get_in_addr(max_attr) :
1075 range->min_addr.ip;
1076 } else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1077 struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1079 p->ipv4_range = false;
1080 range->flags |= NF_NAT_RANGE_MAP_IPS;
1081 range->min_addr.in6 =
1082 nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1084 range->max_addr.in6 = max_attr ?
1085 nla_get_in6_addr(max_attr) :
1086 range->min_addr.in6;
1089 if (tb[TCA_CT_NAT_PORT_MIN]) {
1090 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1091 range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1093 range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1094 nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1095 range->min_proto.all;
1098 return 0;
1101 static void tcf_ct_set_key_val(struct nlattr **tb,
1102 void *val, int val_type,
1103 void *mask, int mask_type,
1104 int len)
1106 if (!tb[val_type])
1107 return;
1108 nla_memcpy(val, tb[val_type], len);
1110 if (!mask)
1111 return;
1113 if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1114 memset(mask, 0xff, len);
1115 else
1116 nla_memcpy(mask, tb[mask_type], len);
1119 static int tcf_ct_fill_params(struct net *net,
1120 struct tcf_ct_params *p,
1121 struct tc_ct *parm,
1122 struct nlattr **tb,
1123 struct netlink_ext_ack *extack)
1125 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1126 struct nf_conntrack_zone zone;
1127 struct nf_conn *tmpl;
1128 int err;
1130 p->zone = NF_CT_DEFAULT_ZONE_ID;
1132 tcf_ct_set_key_val(tb,
1133 &p->ct_action, TCA_CT_ACTION,
1134 NULL, TCA_CT_UNSPEC,
1135 sizeof(p->ct_action));
1137 if (p->ct_action & TCA_CT_ACT_CLEAR)
1138 return 0;
1140 err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1141 if (err)
1142 return err;
1144 if (tb[TCA_CT_MARK]) {
1145 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1146 NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1147 return -EOPNOTSUPP;
1149 tcf_ct_set_key_val(tb,
1150 &p->mark, TCA_CT_MARK,
1151 &p->mark_mask, TCA_CT_MARK_MASK,
1152 sizeof(p->mark));
1155 if (tb[TCA_CT_LABELS]) {
1156 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1157 NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1158 return -EOPNOTSUPP;
1161 if (!tn->labels) {
1162 NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1163 return -EOPNOTSUPP;
1165 tcf_ct_set_key_val(tb,
1166 p->labels, TCA_CT_LABELS,
1167 p->labels_mask, TCA_CT_LABELS_MASK,
1168 sizeof(p->labels));
1171 if (tb[TCA_CT_ZONE]) {
1172 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1173 NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1174 return -EOPNOTSUPP;
1177 tcf_ct_set_key_val(tb,
1178 &p->zone, TCA_CT_ZONE,
1179 NULL, TCA_CT_UNSPEC,
1180 sizeof(p->zone));
1183 if (p->zone == NF_CT_DEFAULT_ZONE_ID)
1184 return 0;
1186 nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1187 tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1188 if (!tmpl) {
1189 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1190 return -ENOMEM;
1192 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1193 nf_conntrack_get(&tmpl->ct_general);
1194 p->tmpl = tmpl;
1196 return 0;
1199 static int tcf_ct_init(struct net *net, struct nlattr *nla,
1200 struct nlattr *est, struct tc_action **a,
1201 int replace, int bind, bool rtnl_held,
1202 struct tcf_proto *tp, u32 flags,
1203 struct netlink_ext_ack *extack)
1205 struct tc_action_net *tn = net_generic(net, ct_net_id);
1206 struct tcf_ct_params *params = NULL;
1207 struct nlattr *tb[TCA_CT_MAX + 1];
1208 struct tcf_chain *goto_ch = NULL;
1209 struct tc_ct *parm;
1210 struct tcf_ct *c;
1211 int err, res = 0;
1212 u32 index;
1214 if (!nla) {
1215 NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1216 return -EINVAL;
1219 err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1220 if (err < 0)
1221 return err;
1223 if (!tb[TCA_CT_PARMS]) {
1224 NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1225 return -EINVAL;
1227 parm = nla_data(tb[TCA_CT_PARMS]);
1228 index = parm->index;
1229 err = tcf_idr_check_alloc(tn, &index, a, bind);
1230 if (err < 0)
1231 return err;
1233 if (!err) {
1234 err = tcf_idr_create_from_flags(tn, index, est, a,
1235 &act_ct_ops, bind, flags);
1236 if (err) {
1237 tcf_idr_cleanup(tn, index);
1238 return err;
1240 res = ACT_P_CREATED;
1241 } else {
1242 if (bind)
1243 return 0;
1245 if (!replace) {
1246 tcf_idr_release(*a, bind);
1247 return -EEXIST;
1250 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1251 if (err < 0)
1252 goto cleanup;
1254 c = to_ct(*a);
1256 params = kzalloc(sizeof(*params), GFP_KERNEL);
1257 if (unlikely(!params)) {
1258 err = -ENOMEM;
1259 goto cleanup;
1262 err = tcf_ct_fill_params(net, params, parm, tb, extack);
1263 if (err)
1264 goto cleanup;
1266 err = tcf_ct_flow_table_get(params);
1267 if (err)
1268 goto cleanup;
1270 spin_lock_bh(&c->tcf_lock);
1271 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1272 params = rcu_replace_pointer(c->params, params,
1273 lockdep_is_held(&c->tcf_lock));
1274 spin_unlock_bh(&c->tcf_lock);
1276 if (goto_ch)
1277 tcf_chain_put_by_act(goto_ch);
1278 if (params)
1279 call_rcu(&params->rcu, tcf_ct_params_free);
1280 if (res == ACT_P_CREATED)
1281 tcf_idr_insert(tn, *a);
1283 return res;
1285 cleanup:
1286 if (goto_ch)
1287 tcf_chain_put_by_act(goto_ch);
1288 kfree(params);
1289 tcf_idr_release(*a, bind);
1290 return err;
1293 static void tcf_ct_cleanup(struct tc_action *a)
1295 struct tcf_ct_params *params;
1296 struct tcf_ct *c = to_ct(a);
1298 params = rcu_dereference_protected(c->params, 1);
1299 if (params)
1300 call_rcu(&params->rcu, tcf_ct_params_free);
1303 static int tcf_ct_dump_key_val(struct sk_buff *skb,
1304 void *val, int val_type,
1305 void *mask, int mask_type,
1306 int len)
1308 int err;
1310 if (mask && !memchr_inv(mask, 0, len))
1311 return 0;
1313 err = nla_put(skb, val_type, len, val);
1314 if (err)
1315 return err;
1317 if (mask_type != TCA_CT_UNSPEC) {
1318 err = nla_put(skb, mask_type, len, mask);
1319 if (err)
1320 return err;
1323 return 0;
1326 static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1328 struct nf_nat_range2 *range = &p->range;
1330 if (!(p->ct_action & TCA_CT_ACT_NAT))
1331 return 0;
1333 if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1334 return 0;
1336 if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1337 if (p->ipv4_range) {
1338 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1339 range->min_addr.ip))
1340 return -1;
1341 if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1342 range->max_addr.ip))
1343 return -1;
1344 } else {
1345 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1346 &range->min_addr.in6))
1347 return -1;
1348 if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1349 &range->max_addr.in6))
1350 return -1;
1354 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1355 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1356 range->min_proto.all))
1357 return -1;
1358 if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1359 range->max_proto.all))
1360 return -1;
1363 return 0;
1366 static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1367 int bind, int ref)
1369 unsigned char *b = skb_tail_pointer(skb);
1370 struct tcf_ct *c = to_ct(a);
1371 struct tcf_ct_params *p;
1373 struct tc_ct opt = {
1374 .index = c->tcf_index,
1375 .refcnt = refcount_read(&c->tcf_refcnt) - ref,
1376 .bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1378 struct tcf_t t;
1380 spin_lock_bh(&c->tcf_lock);
1381 p = rcu_dereference_protected(c->params,
1382 lockdep_is_held(&c->tcf_lock));
1383 opt.action = c->tcf_action;
1385 if (tcf_ct_dump_key_val(skb,
1386 &p->ct_action, TCA_CT_ACTION,
1387 NULL, TCA_CT_UNSPEC,
1388 sizeof(p->ct_action)))
1389 goto nla_put_failure;
1391 if (p->ct_action & TCA_CT_ACT_CLEAR)
1392 goto skip_dump;
1394 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1395 tcf_ct_dump_key_val(skb,
1396 &p->mark, TCA_CT_MARK,
1397 &p->mark_mask, TCA_CT_MARK_MASK,
1398 sizeof(p->mark)))
1399 goto nla_put_failure;
1401 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1402 tcf_ct_dump_key_val(skb,
1403 p->labels, TCA_CT_LABELS,
1404 p->labels_mask, TCA_CT_LABELS_MASK,
1405 sizeof(p->labels)))
1406 goto nla_put_failure;
1408 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1409 tcf_ct_dump_key_val(skb,
1410 &p->zone, TCA_CT_ZONE,
1411 NULL, TCA_CT_UNSPEC,
1412 sizeof(p->zone)))
1413 goto nla_put_failure;
1415 if (tcf_ct_dump_nat(skb, p))
1416 goto nla_put_failure;
1418 skip_dump:
1419 if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1420 goto nla_put_failure;
1422 tcf_tm_dump(&t, &c->tcf_tm);
1423 if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1424 goto nla_put_failure;
1425 spin_unlock_bh(&c->tcf_lock);
1427 return skb->len;
1428 nla_put_failure:
1429 spin_unlock_bh(&c->tcf_lock);
1430 nlmsg_trim(skb, b);
1431 return -1;
1434 static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1435 struct netlink_callback *cb, int type,
1436 const struct tc_action_ops *ops,
1437 struct netlink_ext_ack *extack)
1439 struct tc_action_net *tn = net_generic(net, ct_net_id);
1441 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1444 static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1446 struct tc_action_net *tn = net_generic(net, ct_net_id);
1448 return tcf_idr_search(tn, a, index);
1451 static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
1452 u64 lastuse, bool hw)
1454 struct tcf_ct *c = to_ct(a);
1456 tcf_action_update_stats(a, bytes, packets, false, hw);
1457 c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1460 static struct tc_action_ops act_ct_ops = {
1461 .kind = "ct",
1462 .id = TCA_ID_CT,
1463 .owner = THIS_MODULE,
1464 .act = tcf_ct_act,
1465 .dump = tcf_ct_dump,
1466 .init = tcf_ct_init,
1467 .cleanup = tcf_ct_cleanup,
1468 .walk = tcf_ct_walker,
1469 .lookup = tcf_ct_search,
1470 .stats_update = tcf_stats_update,
1471 .size = sizeof(struct tcf_ct),
1474 static __net_init int ct_init_net(struct net *net)
1476 unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1477 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1479 if (nf_connlabels_get(net, n_bits - 1)) {
1480 tn->labels = false;
1481 pr_err("act_ct: Failed to set connlabels length");
1482 } else {
1483 tn->labels = true;
1486 return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1489 static void __net_exit ct_exit_net(struct list_head *net_list)
1491 struct net *net;
1493 rtnl_lock();
1494 list_for_each_entry(net, net_list, exit_list) {
1495 struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1497 if (tn->labels)
1498 nf_connlabels_put(net);
1500 rtnl_unlock();
1502 tc_action_net_exit(net_list, ct_net_id);
1505 static struct pernet_operations ct_net_ops = {
1506 .init = ct_init_net,
1507 .exit_batch = ct_exit_net,
1508 .id = &ct_net_id,
1509 .size = sizeof(struct tc_ct_action_net),
1512 static int __init ct_init_module(void)
1514 int err;
1516 act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1517 if (!act_ct_wq)
1518 return -ENOMEM;
1520 err = tcf_ct_flow_tables_init();
1521 if (err)
1522 goto err_tbl_init;
1524 err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1525 if (err)
1526 goto err_register;
1528 return 0;
1530 err_tbl_init:
1531 destroy_workqueue(act_ct_wq);
1532 err_register:
1533 tcf_ct_flow_tables_uninit();
1534 return err;
1537 static void __exit ct_cleanup_module(void)
1539 tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1540 tcf_ct_flow_tables_uninit();
1541 destroy_workqueue(act_ct_wq);
1544 void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
1546 enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
1547 struct nf_conn *ct;
1549 ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
1550 nf_conntrack_get(&ct->ct_general);
1551 nf_ct_set(skb, ct, ctinfo);
1553 EXPORT_SYMBOL_GPL(tcf_ct_flow_table_restore_skb);
1555 module_init(ct_init_module);
1556 module_exit(ct_cleanup_module);
1557 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1558 MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1559 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1560 MODULE_DESCRIPTION("Connection tracking action");
1561 MODULE_LICENSE("GPL v2");