1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright 2020 NXP */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/errno.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/tc_act/tc_gate.h>
17 #include <net/tc_wrapper.h>
19 static struct tc_action_ops act_gate_ops
;
21 static ktime_t
gate_get_time(struct tcf_gate
*gact
)
23 ktime_t mono
= ktime_get();
25 switch (gact
->tk_offset
) {
29 return ktime_mono_to_any(mono
, gact
->tk_offset
);
35 static void gate_get_start_time(struct tcf_gate
*gact
, ktime_t
*start
)
37 struct tcf_gate_params
*param
= &gact
->param
;
38 ktime_t now
, base
, cycle
;
41 base
= ns_to_ktime(param
->tcfg_basetime
);
42 now
= gate_get_time(gact
);
44 if (ktime_after(base
, now
)) {
49 cycle
= param
->tcfg_cycletime
;
51 n
= div64_u64(ktime_sub_ns(now
, base
), cycle
);
52 *start
= ktime_add_ns(base
, (n
+ 1) * cycle
);
55 static void gate_start_timer(struct tcf_gate
*gact
, ktime_t start
)
59 expires
= hrtimer_get_expires(&gact
->hitimer
);
63 start
= min_t(ktime_t
, start
, expires
);
65 hrtimer_start(&gact
->hitimer
, start
, HRTIMER_MODE_ABS_SOFT
);
68 static enum hrtimer_restart
gate_timer_func(struct hrtimer
*timer
)
70 struct tcf_gate
*gact
= container_of(timer
, struct tcf_gate
,
72 struct tcf_gate_params
*p
= &gact
->param
;
73 struct tcfg_gate_entry
*next
;
74 ktime_t close_time
, now
;
76 spin_lock(&gact
->tcf_lock
);
78 next
= gact
->next_entry
;
80 /* cycle start, clear pending bit, clear total octets */
81 gact
->current_gate_status
= next
->gate_state
? GATE_ACT_GATE_OPEN
: 0;
82 gact
->current_entry_octets
= 0;
83 gact
->current_max_octets
= next
->maxoctets
;
85 gact
->current_close_time
= ktime_add_ns(gact
->current_close_time
,
88 close_time
= gact
->current_close_time
;
90 if (list_is_last(&next
->list
, &p
->entries
))
91 next
= list_first_entry(&p
->entries
,
92 struct tcfg_gate_entry
, list
);
94 next
= list_next_entry(next
, list
);
96 now
= gate_get_time(gact
);
98 if (ktime_after(now
, close_time
)) {
102 cycle
= p
->tcfg_cycletime
;
103 base
= ns_to_ktime(p
->tcfg_basetime
);
104 n
= div64_u64(ktime_sub_ns(now
, base
), cycle
);
105 close_time
= ktime_add_ns(base
, (n
+ 1) * cycle
);
108 gact
->next_entry
= next
;
110 hrtimer_set_expires(&gact
->hitimer
, close_time
);
112 spin_unlock(&gact
->tcf_lock
);
114 return HRTIMER_RESTART
;
117 TC_INDIRECT_SCOPE
int tcf_gate_act(struct sk_buff
*skb
,
118 const struct tc_action
*a
,
119 struct tcf_result
*res
)
121 struct tcf_gate
*gact
= to_gate(a
);
122 int action
= READ_ONCE(gact
->tcf_action
);
124 tcf_lastuse_update(&gact
->tcf_tm
);
125 tcf_action_update_bstats(&gact
->common
, skb
);
127 spin_lock(&gact
->tcf_lock
);
128 if (unlikely(gact
->current_gate_status
& GATE_ACT_PENDING
)) {
129 spin_unlock(&gact
->tcf_lock
);
133 if (!(gact
->current_gate_status
& GATE_ACT_GATE_OPEN
)) {
134 spin_unlock(&gact
->tcf_lock
);
138 if (gact
->current_max_octets
>= 0) {
139 gact
->current_entry_octets
+= qdisc_pkt_len(skb
);
140 if (gact
->current_entry_octets
> gact
->current_max_octets
) {
141 spin_unlock(&gact
->tcf_lock
);
145 spin_unlock(&gact
->tcf_lock
);
150 tcf_action_inc_overlimit_qstats(&gact
->common
);
152 tcf_action_inc_drop_qstats(&gact
->common
);
156 static const struct nla_policy entry_policy
[TCA_GATE_ENTRY_MAX
+ 1] = {
157 [TCA_GATE_ENTRY_INDEX
] = { .type
= NLA_U32
},
158 [TCA_GATE_ENTRY_GATE
] = { .type
= NLA_FLAG
},
159 [TCA_GATE_ENTRY_INTERVAL
] = { .type
= NLA_U32
},
160 [TCA_GATE_ENTRY_IPV
] = { .type
= NLA_S32
},
161 [TCA_GATE_ENTRY_MAX_OCTETS
] = { .type
= NLA_S32
},
164 static const struct nla_policy gate_policy
[TCA_GATE_MAX
+ 1] = {
166 NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate
)),
167 [TCA_GATE_PRIORITY
] = { .type
= NLA_S32
},
168 [TCA_GATE_ENTRY_LIST
] = { .type
= NLA_NESTED
},
169 [TCA_GATE_BASE_TIME
] = { .type
= NLA_U64
},
170 [TCA_GATE_CYCLE_TIME
] = { .type
= NLA_U64
},
171 [TCA_GATE_CYCLE_TIME_EXT
] = { .type
= NLA_U64
},
172 [TCA_GATE_FLAGS
] = { .type
= NLA_U32
},
173 [TCA_GATE_CLOCKID
] = { .type
= NLA_S32
},
176 static int fill_gate_entry(struct nlattr
**tb
, struct tcfg_gate_entry
*entry
,
177 struct netlink_ext_ack
*extack
)
181 entry
->gate_state
= nla_get_flag(tb
[TCA_GATE_ENTRY_GATE
]);
183 if (tb
[TCA_GATE_ENTRY_INTERVAL
])
184 interval
= nla_get_u32(tb
[TCA_GATE_ENTRY_INTERVAL
]);
187 NL_SET_ERR_MSG(extack
, "Invalid interval for schedule entry");
191 entry
->interval
= interval
;
193 if (tb
[TCA_GATE_ENTRY_IPV
])
194 entry
->ipv
= nla_get_s32(tb
[TCA_GATE_ENTRY_IPV
]);
198 if (tb
[TCA_GATE_ENTRY_MAX_OCTETS
])
199 entry
->maxoctets
= nla_get_s32(tb
[TCA_GATE_ENTRY_MAX_OCTETS
]);
201 entry
->maxoctets
= -1;
206 static int parse_gate_entry(struct nlattr
*n
, struct tcfg_gate_entry
*entry
,
207 int index
, struct netlink_ext_ack
*extack
)
209 struct nlattr
*tb
[TCA_GATE_ENTRY_MAX
+ 1] = { };
212 err
= nla_parse_nested(tb
, TCA_GATE_ENTRY_MAX
, n
, entry_policy
, extack
);
214 NL_SET_ERR_MSG(extack
, "Could not parse nested entry");
218 entry
->index
= index
;
220 return fill_gate_entry(tb
, entry
, extack
);
223 static void release_entry_list(struct list_head
*entries
)
225 struct tcfg_gate_entry
*entry
, *e
;
227 list_for_each_entry_safe(entry
, e
, entries
, list
) {
228 list_del(&entry
->list
);
233 static int parse_gate_list(struct nlattr
*list_attr
,
234 struct tcf_gate_params
*sched
,
235 struct netlink_ext_ack
*extack
)
237 struct tcfg_gate_entry
*entry
;
245 nla_for_each_nested(n
, list_attr
, rem
) {
246 if (nla_type(n
) != TCA_GATE_ONE_ENTRY
) {
247 NL_SET_ERR_MSG(extack
, "Attribute isn't type 'entry'");
251 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
253 NL_SET_ERR_MSG(extack
, "Not enough memory for entry");
258 err
= parse_gate_entry(n
, entry
, i
, extack
);
264 list_add_tail(&entry
->list
, &sched
->entries
);
268 sched
->num_entries
= i
;
273 release_entry_list(&sched
->entries
);
278 static void gate_setup_timer(struct tcf_gate
*gact
, u64 basetime
,
279 enum tk_offsets tko
, s32 clockid
,
283 if (basetime
== gact
->param
.tcfg_basetime
&&
284 tko
== gact
->tk_offset
&&
285 clockid
== gact
->param
.tcfg_clockid
)
288 spin_unlock_bh(&gact
->tcf_lock
);
289 hrtimer_cancel(&gact
->hitimer
);
290 spin_lock_bh(&gact
->tcf_lock
);
292 gact
->param
.tcfg_basetime
= basetime
;
293 gact
->param
.tcfg_clockid
= clockid
;
294 gact
->tk_offset
= tko
;
295 hrtimer_init(&gact
->hitimer
, clockid
, HRTIMER_MODE_ABS_SOFT
);
296 gact
->hitimer
.function
= gate_timer_func
;
299 static int tcf_gate_init(struct net
*net
, struct nlattr
*nla
,
300 struct nlattr
*est
, struct tc_action
**a
,
301 struct tcf_proto
*tp
, u32 flags
,
302 struct netlink_ext_ack
*extack
)
304 struct tc_action_net
*tn
= net_generic(net
, act_gate_ops
.net_id
);
305 enum tk_offsets tk_offset
= TK_OFFS_TAI
;
306 bool bind
= flags
& TCA_ACT_FLAGS_BIND
;
307 struct nlattr
*tb
[TCA_GATE_MAX
+ 1];
308 struct tcf_chain
*goto_ch
= NULL
;
309 u64 cycletime
= 0, basetime
= 0;
310 struct tcf_gate_params
*p
;
311 s32 clockid
= CLOCK_TAI
;
312 struct tcf_gate
*gact
;
313 struct tc_gate
*parm
;
323 err
= nla_parse_nested(tb
, TCA_GATE_MAX
, nla
, gate_policy
, extack
);
327 if (!tb
[TCA_GATE_PARMS
])
330 if (tb
[TCA_GATE_CLOCKID
]) {
331 clockid
= nla_get_s32(tb
[TCA_GATE_CLOCKID
]);
334 tk_offset
= TK_OFFS_REAL
;
336 case CLOCK_MONOTONIC
:
337 tk_offset
= TK_OFFS_MAX
;
340 tk_offset
= TK_OFFS_BOOT
;
343 tk_offset
= TK_OFFS_TAI
;
346 NL_SET_ERR_MSG(extack
, "Invalid 'clockid'");
351 parm
= nla_data(tb
[TCA_GATE_PARMS
]);
354 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
362 ret
= tcf_idr_create_from_flags(tn
, index
, est
, a
,
363 &act_gate_ops
, bind
, flags
);
365 tcf_idr_cleanup(tn
, index
);
370 } else if (!(flags
& TCA_ACT_FLAGS_REPLACE
)) {
371 tcf_idr_release(*a
, bind
);
375 if (tb
[TCA_GATE_PRIORITY
])
376 prio
= nla_get_s32(tb
[TCA_GATE_PRIORITY
]);
378 if (tb
[TCA_GATE_BASE_TIME
])
379 basetime
= nla_get_u64(tb
[TCA_GATE_BASE_TIME
]);
381 if (tb
[TCA_GATE_FLAGS
])
382 gflags
= nla_get_u32(tb
[TCA_GATE_FLAGS
]);
385 if (ret
== ACT_P_CREATED
)
386 INIT_LIST_HEAD(&gact
->param
.entries
);
388 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
392 spin_lock_bh(&gact
->tcf_lock
);
395 if (tb
[TCA_GATE_CYCLE_TIME
])
396 cycletime
= nla_get_u64(tb
[TCA_GATE_CYCLE_TIME
]);
398 if (tb
[TCA_GATE_ENTRY_LIST
]) {
399 err
= parse_gate_list(tb
[TCA_GATE_ENTRY_LIST
], p
, extack
);
405 struct tcfg_gate_entry
*entry
;
408 list_for_each_entry(entry
, &p
->entries
, list
)
409 cycle
= ktime_add_ns(cycle
, entry
->interval
);
416 p
->tcfg_cycletime
= cycletime
;
418 if (tb
[TCA_GATE_CYCLE_TIME_EXT
])
419 p
->tcfg_cycletime_ext
=
420 nla_get_u64(tb
[TCA_GATE_CYCLE_TIME_EXT
]);
422 gate_setup_timer(gact
, basetime
, tk_offset
, clockid
,
423 ret
== ACT_P_CREATED
);
424 p
->tcfg_priority
= prio
;
425 p
->tcfg_flags
= gflags
;
426 gate_get_start_time(gact
, &start
);
428 gact
->current_close_time
= start
;
429 gact
->current_gate_status
= GATE_ACT_GATE_OPEN
| GATE_ACT_PENDING
;
431 gact
->next_entry
= list_first_entry(&p
->entries
,
432 struct tcfg_gate_entry
, list
);
434 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
436 gate_start_timer(gact
, start
);
438 spin_unlock_bh(&gact
->tcf_lock
);
441 tcf_chain_put_by_act(goto_ch
);
446 spin_unlock_bh(&gact
->tcf_lock
);
449 tcf_chain_put_by_act(goto_ch
);
451 /* action is not inserted in any list: it's safe to init hitimer
452 * without taking tcf_lock.
454 if (ret
== ACT_P_CREATED
)
455 gate_setup_timer(gact
, gact
->param
.tcfg_basetime
,
456 gact
->tk_offset
, gact
->param
.tcfg_clockid
,
458 tcf_idr_release(*a
, bind
);
462 static void tcf_gate_cleanup(struct tc_action
*a
)
464 struct tcf_gate
*gact
= to_gate(a
);
465 struct tcf_gate_params
*p
;
468 hrtimer_cancel(&gact
->hitimer
);
469 release_entry_list(&p
->entries
);
472 static int dumping_entry(struct sk_buff
*skb
,
473 struct tcfg_gate_entry
*entry
)
477 item
= nla_nest_start_noflag(skb
, TCA_GATE_ONE_ENTRY
);
481 if (nla_put_u32(skb
, TCA_GATE_ENTRY_INDEX
, entry
->index
))
482 goto nla_put_failure
;
484 if (entry
->gate_state
&& nla_put_flag(skb
, TCA_GATE_ENTRY_GATE
))
485 goto nla_put_failure
;
487 if (nla_put_u32(skb
, TCA_GATE_ENTRY_INTERVAL
, entry
->interval
))
488 goto nla_put_failure
;
490 if (nla_put_s32(skb
, TCA_GATE_ENTRY_MAX_OCTETS
, entry
->maxoctets
))
491 goto nla_put_failure
;
493 if (nla_put_s32(skb
, TCA_GATE_ENTRY_IPV
, entry
->ipv
))
494 goto nla_put_failure
;
496 return nla_nest_end(skb
, item
);
499 nla_nest_cancel(skb
, item
);
503 static int tcf_gate_dump(struct sk_buff
*skb
, struct tc_action
*a
,
506 unsigned char *b
= skb_tail_pointer(skb
);
507 struct tcf_gate
*gact
= to_gate(a
);
508 struct tc_gate opt
= {
509 .index
= gact
->tcf_index
,
510 .refcnt
= refcount_read(&gact
->tcf_refcnt
) - ref
,
511 .bindcnt
= atomic_read(&gact
->tcf_bindcnt
) - bind
,
513 struct tcfg_gate_entry
*entry
;
514 struct tcf_gate_params
*p
;
515 struct nlattr
*entry_list
;
518 spin_lock_bh(&gact
->tcf_lock
);
519 opt
.action
= gact
->tcf_action
;
523 if (nla_put(skb
, TCA_GATE_PARMS
, sizeof(opt
), &opt
))
524 goto nla_put_failure
;
526 if (nla_put_u64_64bit(skb
, TCA_GATE_BASE_TIME
,
527 p
->tcfg_basetime
, TCA_GATE_PAD
))
528 goto nla_put_failure
;
530 if (nla_put_u64_64bit(skb
, TCA_GATE_CYCLE_TIME
,
531 p
->tcfg_cycletime
, TCA_GATE_PAD
))
532 goto nla_put_failure
;
534 if (nla_put_u64_64bit(skb
, TCA_GATE_CYCLE_TIME_EXT
,
535 p
->tcfg_cycletime_ext
, TCA_GATE_PAD
))
536 goto nla_put_failure
;
538 if (nla_put_s32(skb
, TCA_GATE_CLOCKID
, p
->tcfg_clockid
))
539 goto nla_put_failure
;
541 if (nla_put_u32(skb
, TCA_GATE_FLAGS
, p
->tcfg_flags
))
542 goto nla_put_failure
;
544 if (nla_put_s32(skb
, TCA_GATE_PRIORITY
, p
->tcfg_priority
))
545 goto nla_put_failure
;
547 entry_list
= nla_nest_start_noflag(skb
, TCA_GATE_ENTRY_LIST
);
549 goto nla_put_failure
;
551 list_for_each_entry(entry
, &p
->entries
, list
) {
552 if (dumping_entry(skb
, entry
) < 0)
553 goto nla_put_failure
;
556 nla_nest_end(skb
, entry_list
);
558 tcf_tm_dump(&t
, &gact
->tcf_tm
);
559 if (nla_put_64bit(skb
, TCA_GATE_TM
, sizeof(t
), &t
, TCA_GATE_PAD
))
560 goto nla_put_failure
;
561 spin_unlock_bh(&gact
->tcf_lock
);
566 spin_unlock_bh(&gact
->tcf_lock
);
571 static void tcf_gate_stats_update(struct tc_action
*a
, u64 bytes
, u64 packets
,
572 u64 drops
, u64 lastuse
, bool hw
)
574 struct tcf_gate
*gact
= to_gate(a
);
575 struct tcf_t
*tm
= &gact
->tcf_tm
;
577 tcf_action_update_stats(a
, bytes
, packets
, drops
, hw
);
578 tm
->lastuse
= max_t(u64
, tm
->lastuse
, lastuse
);
581 static size_t tcf_gate_get_fill_size(const struct tc_action
*act
)
583 return nla_total_size(sizeof(struct tc_gate
));
586 static void tcf_gate_entry_destructor(void *priv
)
588 struct action_gate_entry
*oe
= priv
;
593 static int tcf_gate_get_entries(struct flow_action_entry
*entry
,
594 const struct tc_action
*act
)
596 entry
->gate
.entries
= tcf_gate_get_list(act
);
598 if (!entry
->gate
.entries
)
601 entry
->destructor
= tcf_gate_entry_destructor
;
602 entry
->destructor_priv
= entry
->gate
.entries
;
607 static int tcf_gate_offload_act_setup(struct tc_action
*act
, void *entry_data
,
608 u32
*index_inc
, bool bind
,
609 struct netlink_ext_ack
*extack
)
614 struct flow_action_entry
*entry
= entry_data
;
616 entry
->id
= FLOW_ACTION_GATE
;
617 entry
->gate
.prio
= tcf_gate_prio(act
);
618 entry
->gate
.basetime
= tcf_gate_basetime(act
);
619 entry
->gate
.cycletime
= tcf_gate_cycletime(act
);
620 entry
->gate
.cycletimeext
= tcf_gate_cycletimeext(act
);
621 entry
->gate
.num_entries
= tcf_gate_num_entries(act
);
622 err
= tcf_gate_get_entries(entry
, act
);
627 struct flow_offload_action
*fl_action
= entry_data
;
629 fl_action
->id
= FLOW_ACTION_GATE
;
635 static struct tc_action_ops act_gate_ops
= {
638 .owner
= THIS_MODULE
,
640 .dump
= tcf_gate_dump
,
641 .init
= tcf_gate_init
,
642 .cleanup
= tcf_gate_cleanup
,
643 .stats_update
= tcf_gate_stats_update
,
644 .get_fill_size
= tcf_gate_get_fill_size
,
645 .offload_act_setup
= tcf_gate_offload_act_setup
,
646 .size
= sizeof(struct tcf_gate
),
648 MODULE_ALIAS_NET_ACT("gate");
650 static __net_init
int gate_init_net(struct net
*net
)
652 struct tc_action_net
*tn
= net_generic(net
, act_gate_ops
.net_id
);
654 return tc_action_net_init(net
, tn
, &act_gate_ops
);
657 static void __net_exit
gate_exit_net(struct list_head
*net_list
)
659 tc_action_net_exit(net_list
, act_gate_ops
.net_id
);
662 static struct pernet_operations gate_net_ops
= {
663 .init
= gate_init_net
,
664 .exit_batch
= gate_exit_net
,
665 .id
= &act_gate_ops
.net_id
,
666 .size
= sizeof(struct tc_action_net
),
669 static int __init
gate_init_module(void)
671 return tcf_register_action(&act_gate_ops
, &gate_net_ops
);
674 static void __exit
gate_cleanup_module(void)
676 tcf_unregister_action(&act_gate_ops
, &gate_net_ops
);
679 module_init(gate_init_module
);
680 module_exit(gate_cleanup_module
);
681 MODULE_DESCRIPTION("TC gate action");
682 MODULE_LICENSE("GPL v2");