1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright 2020 NXP */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/errno.h>
9 #include <linux/skbuff.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <net/act_api.h>
14 #include <net/netlink.h>
15 #include <net/pkt_cls.h>
16 #include <net/tc_act/tc_gate.h>
17 #include <net/tc_wrapper.h>
19 static struct tc_action_ops act_gate_ops
;
21 static ktime_t
gate_get_time(struct tcf_gate
*gact
)
23 ktime_t mono
= ktime_get();
25 switch (gact
->tk_offset
) {
29 return ktime_mono_to_any(mono
, gact
->tk_offset
);
35 static void gate_get_start_time(struct tcf_gate
*gact
, ktime_t
*start
)
37 struct tcf_gate_params
*param
= &gact
->param
;
38 ktime_t now
, base
, cycle
;
41 base
= ns_to_ktime(param
->tcfg_basetime
);
42 now
= gate_get_time(gact
);
44 if (ktime_after(base
, now
)) {
49 cycle
= param
->tcfg_cycletime
;
51 n
= div64_u64(ktime_sub_ns(now
, base
), cycle
);
52 *start
= ktime_add_ns(base
, (n
+ 1) * cycle
);
55 static void gate_start_timer(struct tcf_gate
*gact
, ktime_t start
)
59 expires
= hrtimer_get_expires(&gact
->hitimer
);
63 start
= min_t(ktime_t
, start
, expires
);
65 hrtimer_start(&gact
->hitimer
, start
, HRTIMER_MODE_ABS_SOFT
);
68 static enum hrtimer_restart
gate_timer_func(struct hrtimer
*timer
)
70 struct tcf_gate
*gact
= container_of(timer
, struct tcf_gate
,
72 struct tcf_gate_params
*p
= &gact
->param
;
73 struct tcfg_gate_entry
*next
;
74 ktime_t close_time
, now
;
76 spin_lock(&gact
->tcf_lock
);
78 next
= gact
->next_entry
;
80 /* cycle start, clear pending bit, clear total octets */
81 gact
->current_gate_status
= next
->gate_state
? GATE_ACT_GATE_OPEN
: 0;
82 gact
->current_entry_octets
= 0;
83 gact
->current_max_octets
= next
->maxoctets
;
85 gact
->current_close_time
= ktime_add_ns(gact
->current_close_time
,
88 close_time
= gact
->current_close_time
;
90 if (list_is_last(&next
->list
, &p
->entries
))
91 next
= list_first_entry(&p
->entries
,
92 struct tcfg_gate_entry
, list
);
94 next
= list_next_entry(next
, list
);
96 now
= gate_get_time(gact
);
98 if (ktime_after(now
, close_time
)) {
102 cycle
= p
->tcfg_cycletime
;
103 base
= ns_to_ktime(p
->tcfg_basetime
);
104 n
= div64_u64(ktime_sub_ns(now
, base
), cycle
);
105 close_time
= ktime_add_ns(base
, (n
+ 1) * cycle
);
108 gact
->next_entry
= next
;
110 hrtimer_set_expires(&gact
->hitimer
, close_time
);
112 spin_unlock(&gact
->tcf_lock
);
114 return HRTIMER_RESTART
;
117 TC_INDIRECT_SCOPE
int tcf_gate_act(struct sk_buff
*skb
,
118 const struct tc_action
*a
,
119 struct tcf_result
*res
)
121 struct tcf_gate
*gact
= to_gate(a
);
122 int action
= READ_ONCE(gact
->tcf_action
);
124 tcf_lastuse_update(&gact
->tcf_tm
);
125 tcf_action_update_bstats(&gact
->common
, skb
);
127 spin_lock(&gact
->tcf_lock
);
128 if (unlikely(gact
->current_gate_status
& GATE_ACT_PENDING
)) {
129 spin_unlock(&gact
->tcf_lock
);
133 if (!(gact
->current_gate_status
& GATE_ACT_GATE_OPEN
)) {
134 spin_unlock(&gact
->tcf_lock
);
138 if (gact
->current_max_octets
>= 0) {
139 gact
->current_entry_octets
+= qdisc_pkt_len(skb
);
140 if (gact
->current_entry_octets
> gact
->current_max_octets
) {
141 spin_unlock(&gact
->tcf_lock
);
145 spin_unlock(&gact
->tcf_lock
);
150 tcf_action_inc_overlimit_qstats(&gact
->common
);
152 tcf_action_inc_drop_qstats(&gact
->common
);
156 static const struct nla_policy entry_policy
[TCA_GATE_ENTRY_MAX
+ 1] = {
157 [TCA_GATE_ENTRY_INDEX
] = { .type
= NLA_U32
},
158 [TCA_GATE_ENTRY_GATE
] = { .type
= NLA_FLAG
},
159 [TCA_GATE_ENTRY_INTERVAL
] = { .type
= NLA_U32
},
160 [TCA_GATE_ENTRY_IPV
] = { .type
= NLA_S32
},
161 [TCA_GATE_ENTRY_MAX_OCTETS
] = { .type
= NLA_S32
},
164 static const struct nla_policy gate_policy
[TCA_GATE_MAX
+ 1] = {
166 NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate
)),
167 [TCA_GATE_PRIORITY
] = { .type
= NLA_S32
},
168 [TCA_GATE_ENTRY_LIST
] = { .type
= NLA_NESTED
},
169 [TCA_GATE_BASE_TIME
] = { .type
= NLA_U64
},
170 [TCA_GATE_CYCLE_TIME
] = { .type
= NLA_U64
},
171 [TCA_GATE_CYCLE_TIME_EXT
] = { .type
= NLA_U64
},
172 [TCA_GATE_FLAGS
] = { .type
= NLA_U32
},
173 [TCA_GATE_CLOCKID
] = { .type
= NLA_S32
},
176 static int fill_gate_entry(struct nlattr
**tb
, struct tcfg_gate_entry
*entry
,
177 struct netlink_ext_ack
*extack
)
181 entry
->gate_state
= nla_get_flag(tb
[TCA_GATE_ENTRY_GATE
]);
183 if (tb
[TCA_GATE_ENTRY_INTERVAL
])
184 interval
= nla_get_u32(tb
[TCA_GATE_ENTRY_INTERVAL
]);
187 NL_SET_ERR_MSG(extack
, "Invalid interval for schedule entry");
191 entry
->interval
= interval
;
193 entry
->ipv
= nla_get_s32_default(tb
[TCA_GATE_ENTRY_IPV
], -1);
195 entry
->maxoctets
= nla_get_s32_default(tb
[TCA_GATE_ENTRY_MAX_OCTETS
],
201 static int parse_gate_entry(struct nlattr
*n
, struct tcfg_gate_entry
*entry
,
202 int index
, struct netlink_ext_ack
*extack
)
204 struct nlattr
*tb
[TCA_GATE_ENTRY_MAX
+ 1] = { };
207 err
= nla_parse_nested(tb
, TCA_GATE_ENTRY_MAX
, n
, entry_policy
, extack
);
209 NL_SET_ERR_MSG(extack
, "Could not parse nested entry");
213 entry
->index
= index
;
215 return fill_gate_entry(tb
, entry
, extack
);
218 static void release_entry_list(struct list_head
*entries
)
220 struct tcfg_gate_entry
*entry
, *e
;
222 list_for_each_entry_safe(entry
, e
, entries
, list
) {
223 list_del(&entry
->list
);
228 static int parse_gate_list(struct nlattr
*list_attr
,
229 struct tcf_gate_params
*sched
,
230 struct netlink_ext_ack
*extack
)
232 struct tcfg_gate_entry
*entry
;
240 nla_for_each_nested(n
, list_attr
, rem
) {
241 if (nla_type(n
) != TCA_GATE_ONE_ENTRY
) {
242 NL_SET_ERR_MSG(extack
, "Attribute isn't type 'entry'");
246 entry
= kzalloc(sizeof(*entry
), GFP_ATOMIC
);
248 NL_SET_ERR_MSG(extack
, "Not enough memory for entry");
253 err
= parse_gate_entry(n
, entry
, i
, extack
);
259 list_add_tail(&entry
->list
, &sched
->entries
);
263 sched
->num_entries
= i
;
268 release_entry_list(&sched
->entries
);
273 static void gate_setup_timer(struct tcf_gate
*gact
, u64 basetime
,
274 enum tk_offsets tko
, s32 clockid
,
278 if (basetime
== gact
->param
.tcfg_basetime
&&
279 tko
== gact
->tk_offset
&&
280 clockid
== gact
->param
.tcfg_clockid
)
283 spin_unlock_bh(&gact
->tcf_lock
);
284 hrtimer_cancel(&gact
->hitimer
);
285 spin_lock_bh(&gact
->tcf_lock
);
287 gact
->param
.tcfg_basetime
= basetime
;
288 gact
->param
.tcfg_clockid
= clockid
;
289 gact
->tk_offset
= tko
;
290 hrtimer_init(&gact
->hitimer
, clockid
, HRTIMER_MODE_ABS_SOFT
);
291 gact
->hitimer
.function
= gate_timer_func
;
294 static int tcf_gate_init(struct net
*net
, struct nlattr
*nla
,
295 struct nlattr
*est
, struct tc_action
**a
,
296 struct tcf_proto
*tp
, u32 flags
,
297 struct netlink_ext_ack
*extack
)
299 struct tc_action_net
*tn
= net_generic(net
, act_gate_ops
.net_id
);
300 enum tk_offsets tk_offset
= TK_OFFS_TAI
;
301 bool bind
= flags
& TCA_ACT_FLAGS_BIND
;
302 struct nlattr
*tb
[TCA_GATE_MAX
+ 1];
303 struct tcf_chain
*goto_ch
= NULL
;
304 u64 cycletime
= 0, basetime
= 0;
305 struct tcf_gate_params
*p
;
306 s32 clockid
= CLOCK_TAI
;
307 struct tcf_gate
*gact
;
308 struct tc_gate
*parm
;
318 err
= nla_parse_nested(tb
, TCA_GATE_MAX
, nla
, gate_policy
, extack
);
322 if (!tb
[TCA_GATE_PARMS
])
325 if (tb
[TCA_GATE_CLOCKID
]) {
326 clockid
= nla_get_s32(tb
[TCA_GATE_CLOCKID
]);
329 tk_offset
= TK_OFFS_REAL
;
331 case CLOCK_MONOTONIC
:
332 tk_offset
= TK_OFFS_MAX
;
335 tk_offset
= TK_OFFS_BOOT
;
338 tk_offset
= TK_OFFS_TAI
;
341 NL_SET_ERR_MSG(extack
, "Invalid 'clockid'");
346 parm
= nla_data(tb
[TCA_GATE_PARMS
]);
349 err
= tcf_idr_check_alloc(tn
, &index
, a
, bind
);
357 ret
= tcf_idr_create_from_flags(tn
, index
, est
, a
,
358 &act_gate_ops
, bind
, flags
);
360 tcf_idr_cleanup(tn
, index
);
365 } else if (!(flags
& TCA_ACT_FLAGS_REPLACE
)) {
366 tcf_idr_release(*a
, bind
);
370 if (tb
[TCA_GATE_PRIORITY
])
371 prio
= nla_get_s32(tb
[TCA_GATE_PRIORITY
]);
373 if (tb
[TCA_GATE_BASE_TIME
])
374 basetime
= nla_get_u64(tb
[TCA_GATE_BASE_TIME
]);
376 if (tb
[TCA_GATE_FLAGS
])
377 gflags
= nla_get_u32(tb
[TCA_GATE_FLAGS
]);
380 if (ret
== ACT_P_CREATED
)
381 INIT_LIST_HEAD(&gact
->param
.entries
);
383 err
= tcf_action_check_ctrlact(parm
->action
, tp
, &goto_ch
, extack
);
387 spin_lock_bh(&gact
->tcf_lock
);
390 if (tb
[TCA_GATE_CYCLE_TIME
])
391 cycletime
= nla_get_u64(tb
[TCA_GATE_CYCLE_TIME
]);
393 if (tb
[TCA_GATE_ENTRY_LIST
]) {
394 err
= parse_gate_list(tb
[TCA_GATE_ENTRY_LIST
], p
, extack
);
400 struct tcfg_gate_entry
*entry
;
403 list_for_each_entry(entry
, &p
->entries
, list
)
404 cycle
= ktime_add_ns(cycle
, entry
->interval
);
411 p
->tcfg_cycletime
= cycletime
;
413 if (tb
[TCA_GATE_CYCLE_TIME_EXT
])
414 p
->tcfg_cycletime_ext
=
415 nla_get_u64(tb
[TCA_GATE_CYCLE_TIME_EXT
]);
417 gate_setup_timer(gact
, basetime
, tk_offset
, clockid
,
418 ret
== ACT_P_CREATED
);
419 p
->tcfg_priority
= prio
;
420 p
->tcfg_flags
= gflags
;
421 gate_get_start_time(gact
, &start
);
423 gact
->current_close_time
= start
;
424 gact
->current_gate_status
= GATE_ACT_GATE_OPEN
| GATE_ACT_PENDING
;
426 gact
->next_entry
= list_first_entry(&p
->entries
,
427 struct tcfg_gate_entry
, list
);
429 goto_ch
= tcf_action_set_ctrlact(*a
, parm
->action
, goto_ch
);
431 gate_start_timer(gact
, start
);
433 spin_unlock_bh(&gact
->tcf_lock
);
436 tcf_chain_put_by_act(goto_ch
);
441 spin_unlock_bh(&gact
->tcf_lock
);
444 tcf_chain_put_by_act(goto_ch
);
446 /* action is not inserted in any list: it's safe to init hitimer
447 * without taking tcf_lock.
449 if (ret
== ACT_P_CREATED
)
450 gate_setup_timer(gact
, gact
->param
.tcfg_basetime
,
451 gact
->tk_offset
, gact
->param
.tcfg_clockid
,
453 tcf_idr_release(*a
, bind
);
457 static void tcf_gate_cleanup(struct tc_action
*a
)
459 struct tcf_gate
*gact
= to_gate(a
);
460 struct tcf_gate_params
*p
;
463 hrtimer_cancel(&gact
->hitimer
);
464 release_entry_list(&p
->entries
);
467 static int dumping_entry(struct sk_buff
*skb
,
468 struct tcfg_gate_entry
*entry
)
472 item
= nla_nest_start_noflag(skb
, TCA_GATE_ONE_ENTRY
);
476 if (nla_put_u32(skb
, TCA_GATE_ENTRY_INDEX
, entry
->index
))
477 goto nla_put_failure
;
479 if (entry
->gate_state
&& nla_put_flag(skb
, TCA_GATE_ENTRY_GATE
))
480 goto nla_put_failure
;
482 if (nla_put_u32(skb
, TCA_GATE_ENTRY_INTERVAL
, entry
->interval
))
483 goto nla_put_failure
;
485 if (nla_put_s32(skb
, TCA_GATE_ENTRY_MAX_OCTETS
, entry
->maxoctets
))
486 goto nla_put_failure
;
488 if (nla_put_s32(skb
, TCA_GATE_ENTRY_IPV
, entry
->ipv
))
489 goto nla_put_failure
;
491 return nla_nest_end(skb
, item
);
494 nla_nest_cancel(skb
, item
);
498 static int tcf_gate_dump(struct sk_buff
*skb
, struct tc_action
*a
,
501 unsigned char *b
= skb_tail_pointer(skb
);
502 struct tcf_gate
*gact
= to_gate(a
);
503 struct tc_gate opt
= {
504 .index
= gact
->tcf_index
,
505 .refcnt
= refcount_read(&gact
->tcf_refcnt
) - ref
,
506 .bindcnt
= atomic_read(&gact
->tcf_bindcnt
) - bind
,
508 struct tcfg_gate_entry
*entry
;
509 struct tcf_gate_params
*p
;
510 struct nlattr
*entry_list
;
513 spin_lock_bh(&gact
->tcf_lock
);
514 opt
.action
= gact
->tcf_action
;
518 if (nla_put(skb
, TCA_GATE_PARMS
, sizeof(opt
), &opt
))
519 goto nla_put_failure
;
521 if (nla_put_u64_64bit(skb
, TCA_GATE_BASE_TIME
,
522 p
->tcfg_basetime
, TCA_GATE_PAD
))
523 goto nla_put_failure
;
525 if (nla_put_u64_64bit(skb
, TCA_GATE_CYCLE_TIME
,
526 p
->tcfg_cycletime
, TCA_GATE_PAD
))
527 goto nla_put_failure
;
529 if (nla_put_u64_64bit(skb
, TCA_GATE_CYCLE_TIME_EXT
,
530 p
->tcfg_cycletime_ext
, TCA_GATE_PAD
))
531 goto nla_put_failure
;
533 if (nla_put_s32(skb
, TCA_GATE_CLOCKID
, p
->tcfg_clockid
))
534 goto nla_put_failure
;
536 if (nla_put_u32(skb
, TCA_GATE_FLAGS
, p
->tcfg_flags
))
537 goto nla_put_failure
;
539 if (nla_put_s32(skb
, TCA_GATE_PRIORITY
, p
->tcfg_priority
))
540 goto nla_put_failure
;
542 entry_list
= nla_nest_start_noflag(skb
, TCA_GATE_ENTRY_LIST
);
544 goto nla_put_failure
;
546 list_for_each_entry(entry
, &p
->entries
, list
) {
547 if (dumping_entry(skb
, entry
) < 0)
548 goto nla_put_failure
;
551 nla_nest_end(skb
, entry_list
);
553 tcf_tm_dump(&t
, &gact
->tcf_tm
);
554 if (nla_put_64bit(skb
, TCA_GATE_TM
, sizeof(t
), &t
, TCA_GATE_PAD
))
555 goto nla_put_failure
;
556 spin_unlock_bh(&gact
->tcf_lock
);
561 spin_unlock_bh(&gact
->tcf_lock
);
566 static void tcf_gate_stats_update(struct tc_action
*a
, u64 bytes
, u64 packets
,
567 u64 drops
, u64 lastuse
, bool hw
)
569 struct tcf_gate
*gact
= to_gate(a
);
570 struct tcf_t
*tm
= &gact
->tcf_tm
;
572 tcf_action_update_stats(a
, bytes
, packets
, drops
, hw
);
573 tm
->lastuse
= max_t(u64
, tm
->lastuse
, lastuse
);
576 static size_t tcf_gate_get_fill_size(const struct tc_action
*act
)
578 return nla_total_size(sizeof(struct tc_gate
));
581 static void tcf_gate_entry_destructor(void *priv
)
583 struct action_gate_entry
*oe
= priv
;
588 static int tcf_gate_get_entries(struct flow_action_entry
*entry
,
589 const struct tc_action
*act
)
591 entry
->gate
.entries
= tcf_gate_get_list(act
);
593 if (!entry
->gate
.entries
)
596 entry
->destructor
= tcf_gate_entry_destructor
;
597 entry
->destructor_priv
= entry
->gate
.entries
;
602 static int tcf_gate_offload_act_setup(struct tc_action
*act
, void *entry_data
,
603 u32
*index_inc
, bool bind
,
604 struct netlink_ext_ack
*extack
)
609 struct flow_action_entry
*entry
= entry_data
;
611 entry
->id
= FLOW_ACTION_GATE
;
612 entry
->gate
.prio
= tcf_gate_prio(act
);
613 entry
->gate
.basetime
= tcf_gate_basetime(act
);
614 entry
->gate
.cycletime
= tcf_gate_cycletime(act
);
615 entry
->gate
.cycletimeext
= tcf_gate_cycletimeext(act
);
616 entry
->gate
.num_entries
= tcf_gate_num_entries(act
);
617 err
= tcf_gate_get_entries(entry
, act
);
622 struct flow_offload_action
*fl_action
= entry_data
;
624 fl_action
->id
= FLOW_ACTION_GATE
;
630 static struct tc_action_ops act_gate_ops
= {
633 .owner
= THIS_MODULE
,
635 .dump
= tcf_gate_dump
,
636 .init
= tcf_gate_init
,
637 .cleanup
= tcf_gate_cleanup
,
638 .stats_update
= tcf_gate_stats_update
,
639 .get_fill_size
= tcf_gate_get_fill_size
,
640 .offload_act_setup
= tcf_gate_offload_act_setup
,
641 .size
= sizeof(struct tcf_gate
),
643 MODULE_ALIAS_NET_ACT("gate");
645 static __net_init
int gate_init_net(struct net
*net
)
647 struct tc_action_net
*tn
= net_generic(net
, act_gate_ops
.net_id
);
649 return tc_action_net_init(net
, tn
, &act_gate_ops
);
652 static void __net_exit
gate_exit_net(struct list_head
*net_list
)
654 tc_action_net_exit(net_list
, act_gate_ops
.net_id
);
657 static struct pernet_operations gate_net_ops
= {
658 .init
= gate_init_net
,
659 .exit_batch
= gate_exit_net
,
660 .id
= &act_gate_ops
.net_id
,
661 .size
= sizeof(struct tc_action_net
),
664 static int __init
gate_init_module(void)
666 return tcf_register_action(&act_gate_ops
, &gate_net_ops
);
669 static void __exit
gate_cleanup_module(void)
671 tcf_unregister_action(&act_gate_ops
, &gate_net_ops
);
674 module_init(gate_init_module
);
675 module_exit(gate_cleanup_module
);
676 MODULE_DESCRIPTION("TC gate action");
677 MODULE_LICENSE("GPL v2");