1 // SPDX-License-Identifier: GPL-2.0
3 /* net/sched/sch_taprio.c Time Aware Priority Scheduler
5 * Authors: Vinicius Costa Gomes <vinicius.gomes@intel.com>
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/list.h>
14 #include <linux/errno.h>
15 #include <linux/skbuff.h>
16 #include <linux/math64.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/rcupdate.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
27 static LIST_HEAD(taprio_list
);
28 static DEFINE_SPINLOCK(taprio_list_lock
);
30 #define TAPRIO_ALL_GATES_OPEN -1
32 #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
33 #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
34 #define TAPRIO_FLAGS_INVALID U32_MAX
37 struct list_head list
;
39 /* The instant that this entry "closes" and the next one
40 * should open, the qdisc will make some effort so that no
41 * packet leaves after this time.
52 struct sched_gate_list
{
54 struct list_head entries
;
56 ktime_t cycle_close_time
;
58 s64 cycle_time_extension
;
63 struct Qdisc
**qdiscs
;
66 enum tk_offsets tk_offset
;
68 atomic64_t picos_per_byte
; /* Using picoseconds because for 10Gbps+
69 * speeds it's sub-nanoseconds per byte
72 /* Protects the update side of the RCU protected current_entry */
73 spinlock_t current_entry_lock
;
74 struct sched_entry __rcu
*current_entry
;
75 struct sched_gate_list __rcu
*oper_sched
;
76 struct sched_gate_list __rcu
*admin_sched
;
77 struct hrtimer advance_timer
;
78 struct list_head taprio_list
;
79 struct sk_buff
*(*dequeue
)(struct Qdisc
*sch
);
80 struct sk_buff
*(*peek
)(struct Qdisc
*sch
);
84 struct __tc_taprio_qopt_offload
{
86 struct tc_taprio_qopt_offload offload
;
89 static ktime_t
sched_base_time(const struct sched_gate_list
*sched
)
94 return ns_to_ktime(sched
->base_time
);
97 static ktime_t
taprio_get_time(struct taprio_sched
*q
)
99 ktime_t mono
= ktime_get();
101 switch (q
->tk_offset
) {
105 return ktime_mono_to_any(mono
, q
->tk_offset
);
111 static void taprio_free_sched_cb(struct rcu_head
*head
)
113 struct sched_gate_list
*sched
= container_of(head
, struct sched_gate_list
, rcu
);
114 struct sched_entry
*entry
, *n
;
119 list_for_each_entry_safe(entry
, n
, &sched
->entries
, list
) {
120 list_del(&entry
->list
);
127 static void switch_schedules(struct taprio_sched
*q
,
128 struct sched_gate_list
**admin
,
129 struct sched_gate_list
**oper
)
131 rcu_assign_pointer(q
->oper_sched
, *admin
);
132 rcu_assign_pointer(q
->admin_sched
, NULL
);
135 call_rcu(&(*oper
)->rcu
, taprio_free_sched_cb
);
141 /* Get how much time has been already elapsed in the current cycle. */
142 static s32
get_cycle_time_elapsed(struct sched_gate_list
*sched
, ktime_t time
)
144 ktime_t time_since_sched_start
;
147 time_since_sched_start
= ktime_sub(time
, sched
->base_time
);
148 div_s64_rem(time_since_sched_start
, sched
->cycle_time
, &time_elapsed
);
153 static ktime_t
get_interval_end_time(struct sched_gate_list
*sched
,
154 struct sched_gate_list
*admin
,
155 struct sched_entry
*entry
,
158 s32 cycle_elapsed
= get_cycle_time_elapsed(sched
, intv_start
);
159 ktime_t intv_end
, cycle_ext_end
, cycle_end
;
161 cycle_end
= ktime_add_ns(intv_start
, sched
->cycle_time
- cycle_elapsed
);
162 intv_end
= ktime_add_ns(intv_start
, entry
->interval
);
163 cycle_ext_end
= ktime_add(cycle_end
, sched
->cycle_time_extension
);
165 if (ktime_before(intv_end
, cycle_end
))
167 else if (admin
&& admin
!= sched
&&
168 ktime_after(admin
->base_time
, cycle_end
) &&
169 ktime_before(admin
->base_time
, cycle_ext_end
))
170 return admin
->base_time
;
175 static int length_to_duration(struct taprio_sched
*q
, int len
)
177 return div_u64(len
* atomic64_read(&q
->picos_per_byte
), 1000);
180 /* Returns the entry corresponding to next available interval. If
181 * validate_interval is set, it only validates whether the timestamp occurs
182 * when the gate corresponding to the skb's traffic class is open.
184 static struct sched_entry
*find_entry_to_transmit(struct sk_buff
*skb
,
186 struct sched_gate_list
*sched
,
187 struct sched_gate_list
*admin
,
189 ktime_t
*interval_start
,
190 ktime_t
*interval_end
,
191 bool validate_interval
)
193 ktime_t curr_intv_start
, curr_intv_end
, cycle_end
, packet_transmit_time
;
194 ktime_t earliest_txtime
= KTIME_MAX
, txtime
, cycle
, transmit_end_time
;
195 struct sched_entry
*entry
= NULL
, *entry_found
= NULL
;
196 struct taprio_sched
*q
= qdisc_priv(sch
);
197 struct net_device
*dev
= qdisc_dev(sch
);
198 bool entry_available
= false;
202 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
203 packet_transmit_time
= length_to_duration(q
, qdisc_pkt_len(skb
));
211 cycle
= sched
->cycle_time
;
212 cycle_elapsed
= get_cycle_time_elapsed(sched
, time
);
213 curr_intv_end
= ktime_sub_ns(time
, cycle_elapsed
);
214 cycle_end
= ktime_add_ns(curr_intv_end
, cycle
);
216 list_for_each_entry(entry
, &sched
->entries
, list
) {
217 curr_intv_start
= curr_intv_end
;
218 curr_intv_end
= get_interval_end_time(sched
, admin
, entry
,
221 if (ktime_after(curr_intv_start
, cycle_end
))
224 if (!(entry
->gate_mask
& BIT(tc
)) ||
225 packet_transmit_time
> entry
->interval
)
228 txtime
= entry
->next_txtime
;
230 if (ktime_before(txtime
, time
) || validate_interval
) {
231 transmit_end_time
= ktime_add_ns(time
, packet_transmit_time
);
232 if ((ktime_before(curr_intv_start
, time
) &&
233 ktime_before(transmit_end_time
, curr_intv_end
)) ||
234 (ktime_after(curr_intv_start
, time
) && !validate_interval
)) {
236 *interval_start
= curr_intv_start
;
237 *interval_end
= curr_intv_end
;
239 } else if (!entry_available
&& !validate_interval
) {
240 /* Here, we are just trying to find out the
241 * first available interval in the next cycle.
245 *interval_start
= ktime_add_ns(curr_intv_start
, cycle
);
246 *interval_end
= ktime_add_ns(curr_intv_end
, cycle
);
248 } else if (ktime_before(txtime
, earliest_txtime
) &&
250 earliest_txtime
= txtime
;
252 n
= div_s64(ktime_sub(txtime
, curr_intv_start
), cycle
);
253 *interval_start
= ktime_add(curr_intv_start
, n
* cycle
);
254 *interval_end
= ktime_add(curr_intv_end
, n
* cycle
);
261 static bool is_valid_interval(struct sk_buff
*skb
, struct Qdisc
*sch
)
263 struct taprio_sched
*q
= qdisc_priv(sch
);
264 struct sched_gate_list
*sched
, *admin
;
265 ktime_t interval_start
, interval_end
;
266 struct sched_entry
*entry
;
269 sched
= rcu_dereference(q
->oper_sched
);
270 admin
= rcu_dereference(q
->admin_sched
);
272 entry
= find_entry_to_transmit(skb
, sch
, sched
, admin
, skb
->tstamp
,
273 &interval_start
, &interval_end
, true);
279 static bool taprio_flags_valid(u32 flags
)
281 /* Make sure no other flag bits are set. */
282 if (flags
& ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST
|
283 TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD
))
285 /* txtime-assist and full offload are mutually exclusive */
286 if ((flags
& TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST
) &&
287 (flags
& TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD
))
292 /* This returns the tstamp value set by TCP in terms of the set clock. */
293 static ktime_t
get_tcp_tstamp(struct taprio_sched
*q
, struct sk_buff
*skb
)
295 unsigned int offset
= skb_network_offset(skb
);
296 const struct ipv6hdr
*ipv6h
;
297 const struct iphdr
*iph
;
298 struct ipv6hdr _ipv6h
;
300 ipv6h
= skb_header_pointer(skb
, offset
, sizeof(_ipv6h
), &_ipv6h
);
304 if (ipv6h
->version
== 4) {
305 iph
= (struct iphdr
*)ipv6h
;
306 offset
+= iph
->ihl
* 4;
308 /* special-case 6in4 tunnelling, as that is a common way to get
309 * v6 connectivity in the home
311 if (iph
->protocol
== IPPROTO_IPV6
) {
312 ipv6h
= skb_header_pointer(skb
, offset
,
313 sizeof(_ipv6h
), &_ipv6h
);
315 if (!ipv6h
|| ipv6h
->nexthdr
!= IPPROTO_TCP
)
317 } else if (iph
->protocol
!= IPPROTO_TCP
) {
320 } else if (ipv6h
->version
== 6 && ipv6h
->nexthdr
!= IPPROTO_TCP
) {
324 return ktime_mono_to_any(skb
->skb_mstamp_ns
, q
->tk_offset
);
327 /* There are a few scenarios where we will have to modify the txtime from
328 * what is read from next_txtime in sched_entry. They are:
329 * 1. If txtime is in the past,
330 * a. The gate for the traffic class is currently open and packet can be
331 * transmitted before it closes, schedule the packet right away.
332 * b. If the gate corresponding to the traffic class is going to open later
333 * in the cycle, set the txtime of packet to the interval start.
334 * 2. If txtime is in the future, there are packets corresponding to the
335 * current traffic class waiting to be transmitted. So, the following
336 * possibilities exist:
337 * a. We can transmit the packet before the window containing the txtime
339 * b. The window might close before the transmission can be completed
340 * successfully. So, schedule the packet in the next open window.
342 static long get_packet_txtime(struct sk_buff
*skb
, struct Qdisc
*sch
)
344 ktime_t transmit_end_time
, interval_end
, interval_start
, tcp_tstamp
;
345 struct taprio_sched
*q
= qdisc_priv(sch
);
346 struct sched_gate_list
*sched
, *admin
;
347 ktime_t minimum_time
, now
, txtime
;
348 int len
, packet_transmit_time
;
349 struct sched_entry
*entry
;
352 now
= taprio_get_time(q
);
353 minimum_time
= ktime_add_ns(now
, q
->txtime_delay
);
355 tcp_tstamp
= get_tcp_tstamp(q
, skb
);
356 minimum_time
= max_t(ktime_t
, minimum_time
, tcp_tstamp
);
359 admin
= rcu_dereference(q
->admin_sched
);
360 sched
= rcu_dereference(q
->oper_sched
);
361 if (admin
&& ktime_after(minimum_time
, admin
->base_time
))
362 switch_schedules(q
, &admin
, &sched
);
364 /* Until the schedule starts, all the queues are open */
365 if (!sched
|| ktime_before(minimum_time
, sched
->base_time
)) {
366 txtime
= minimum_time
;
370 len
= qdisc_pkt_len(skb
);
371 packet_transmit_time
= length_to_duration(q
, len
);
376 entry
= find_entry_to_transmit(skb
, sch
, sched
, admin
,
378 &interval_start
, &interval_end
,
385 txtime
= entry
->next_txtime
;
386 txtime
= max_t(ktime_t
, txtime
, minimum_time
);
387 txtime
= max_t(ktime_t
, txtime
, interval_start
);
389 if (admin
&& admin
!= sched
&&
390 ktime_after(txtime
, admin
->base_time
)) {
396 transmit_end_time
= ktime_add(txtime
, packet_transmit_time
);
397 minimum_time
= transmit_end_time
;
399 /* Update the txtime of current entry to the next time it's
402 if (ktime_after(transmit_end_time
, interval_end
))
403 entry
->next_txtime
= ktime_add(interval_start
, sched
->cycle_time
);
404 } while (sched_changed
|| ktime_after(transmit_end_time
, interval_end
));
406 entry
->next_txtime
= transmit_end_time
;
413 static int taprio_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
,
414 struct sk_buff
**to_free
)
416 struct taprio_sched
*q
= qdisc_priv(sch
);
420 queue
= skb_get_queue_mapping(skb
);
422 child
= q
->qdiscs
[queue
];
423 if (unlikely(!child
))
424 return qdisc_drop(skb
, sch
, to_free
);
426 if (skb
->sk
&& sock_flag(skb
->sk
, SOCK_TXTIME
)) {
427 if (!is_valid_interval(skb
, sch
))
428 return qdisc_drop(skb
, sch
, to_free
);
429 } else if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
430 skb
->tstamp
= get_packet_txtime(skb
, sch
);
432 return qdisc_drop(skb
, sch
, to_free
);
435 qdisc_qstats_backlog_inc(sch
, skb
);
438 return qdisc_enqueue(skb
, child
, to_free
);
441 static struct sk_buff
*taprio_peek_soft(struct Qdisc
*sch
)
443 struct taprio_sched
*q
= qdisc_priv(sch
);
444 struct net_device
*dev
= qdisc_dev(sch
);
445 struct sched_entry
*entry
;
451 entry
= rcu_dereference(q
->current_entry
);
452 gate_mask
= entry
? entry
->gate_mask
: TAPRIO_ALL_GATES_OPEN
;
458 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
459 struct Qdisc
*child
= q
->qdiscs
[i
];
463 if (unlikely(!child
))
466 skb
= child
->ops
->peek(child
);
470 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
))
473 prio
= skb
->priority
;
474 tc
= netdev_get_prio_tc_map(dev
, prio
);
476 if (!(gate_mask
& BIT(tc
)))
485 static struct sk_buff
*taprio_peek_offload(struct Qdisc
*sch
)
487 struct taprio_sched
*q
= qdisc_priv(sch
);
488 struct net_device
*dev
= qdisc_dev(sch
);
492 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
493 struct Qdisc
*child
= q
->qdiscs
[i
];
495 if (unlikely(!child
))
498 skb
= child
->ops
->peek(child
);
508 static struct sk_buff
*taprio_peek(struct Qdisc
*sch
)
510 struct taprio_sched
*q
= qdisc_priv(sch
);
515 static void taprio_set_budget(struct taprio_sched
*q
, struct sched_entry
*entry
)
517 atomic_set(&entry
->budget
,
518 div64_u64((u64
)entry
->interval
* 1000,
519 atomic64_read(&q
->picos_per_byte
)));
522 static struct sk_buff
*taprio_dequeue_soft(struct Qdisc
*sch
)
524 struct taprio_sched
*q
= qdisc_priv(sch
);
525 struct net_device
*dev
= qdisc_dev(sch
);
526 struct sk_buff
*skb
= NULL
;
527 struct sched_entry
*entry
;
532 entry
= rcu_dereference(q
->current_entry
);
533 /* if there's no entry, it means that the schedule didn't
534 * start yet, so force all gates to be open, this is in
535 * accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
538 gate_mask
= entry
? entry
->gate_mask
: TAPRIO_ALL_GATES_OPEN
;
543 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
544 struct Qdisc
*child
= q
->qdiscs
[i
];
550 if (unlikely(!child
))
553 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
554 skb
= child
->ops
->dequeue(child
);
560 skb
= child
->ops
->peek(child
);
564 prio
= skb
->priority
;
565 tc
= netdev_get_prio_tc_map(dev
, prio
);
567 if (!(gate_mask
& BIT(tc
))) {
572 len
= qdisc_pkt_len(skb
);
573 guard
= ktime_add_ns(taprio_get_time(q
),
574 length_to_duration(q
, len
));
576 /* In the case that there's no gate entry, there's no
579 if (gate_mask
!= TAPRIO_ALL_GATES_OPEN
&&
580 ktime_after(guard
, entry
->close_time
)) {
585 /* ... and no budget. */
586 if (gate_mask
!= TAPRIO_ALL_GATES_OPEN
&&
587 atomic_sub_return(len
, &entry
->budget
) < 0) {
592 skb
= child
->ops
->dequeue(child
);
597 qdisc_bstats_update(sch
, skb
);
598 qdisc_qstats_backlog_dec(sch
, skb
);
610 static struct sk_buff
*taprio_dequeue_offload(struct Qdisc
*sch
)
612 struct taprio_sched
*q
= qdisc_priv(sch
);
613 struct net_device
*dev
= qdisc_dev(sch
);
617 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
618 struct Qdisc
*child
= q
->qdiscs
[i
];
620 if (unlikely(!child
))
623 skb
= child
->ops
->dequeue(child
);
627 qdisc_bstats_update(sch
, skb
);
628 qdisc_qstats_backlog_dec(sch
, skb
);
637 static struct sk_buff
*taprio_dequeue(struct Qdisc
*sch
)
639 struct taprio_sched
*q
= qdisc_priv(sch
);
641 return q
->dequeue(sch
);
644 static bool should_restart_cycle(const struct sched_gate_list
*oper
,
645 const struct sched_entry
*entry
)
647 if (list_is_last(&entry
->list
, &oper
->entries
))
650 if (ktime_compare(entry
->close_time
, oper
->cycle_close_time
) == 0)
656 static bool should_change_schedules(const struct sched_gate_list
*admin
,
657 const struct sched_gate_list
*oper
,
660 ktime_t next_base_time
, extension_time
;
665 next_base_time
= sched_base_time(admin
);
667 /* This is the simple case, the close_time would fall after
668 * the next schedule base_time.
670 if (ktime_compare(next_base_time
, close_time
) <= 0)
673 /* This is the cycle_time_extension case, if the close_time
674 * plus the amount that can be extended would fall after the
675 * next schedule base_time, we can extend the current schedule
678 extension_time
= ktime_add_ns(close_time
, oper
->cycle_time_extension
);
680 /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
681 * how precisely the extension should be made. So after
682 * conformance testing, this logic may change.
684 if (ktime_compare(next_base_time
, extension_time
) <= 0)
690 static enum hrtimer_restart
advance_sched(struct hrtimer
*timer
)
692 struct taprio_sched
*q
= container_of(timer
, struct taprio_sched
,
694 struct sched_gate_list
*oper
, *admin
;
695 struct sched_entry
*entry
, *next
;
696 struct Qdisc
*sch
= q
->root
;
699 spin_lock(&q
->current_entry_lock
);
700 entry
= rcu_dereference_protected(q
->current_entry
,
701 lockdep_is_held(&q
->current_entry_lock
));
702 oper
= rcu_dereference_protected(q
->oper_sched
,
703 lockdep_is_held(&q
->current_entry_lock
));
704 admin
= rcu_dereference_protected(q
->admin_sched
,
705 lockdep_is_held(&q
->current_entry_lock
));
708 switch_schedules(q
, &admin
, &oper
);
710 /* This can happen in two cases: 1. this is the very first run
711 * of this function (i.e. we weren't running any schedule
712 * previously); 2. The previous schedule just ended. The first
713 * entry of all schedules are pre-calculated during the
714 * schedule initialization.
716 if (unlikely(!entry
|| entry
->close_time
== oper
->base_time
)) {
717 next
= list_first_entry(&oper
->entries
, struct sched_entry
,
719 close_time
= next
->close_time
;
723 if (should_restart_cycle(oper
, entry
)) {
724 next
= list_first_entry(&oper
->entries
, struct sched_entry
,
726 oper
->cycle_close_time
= ktime_add_ns(oper
->cycle_close_time
,
729 next
= list_next_entry(entry
, list
);
732 close_time
= ktime_add_ns(entry
->close_time
, next
->interval
);
733 close_time
= min_t(ktime_t
, close_time
, oper
->cycle_close_time
);
735 if (should_change_schedules(admin
, oper
, close_time
)) {
736 /* Set things so the next time this runs, the new
739 close_time
= sched_base_time(admin
);
740 switch_schedules(q
, &admin
, &oper
);
743 next
->close_time
= close_time
;
744 taprio_set_budget(q
, next
);
747 rcu_assign_pointer(q
->current_entry
, next
);
748 spin_unlock(&q
->current_entry_lock
);
750 hrtimer_set_expires(&q
->advance_timer
, close_time
);
753 __netif_schedule(sch
);
756 return HRTIMER_RESTART
;
759 static const struct nla_policy entry_policy
[TCA_TAPRIO_SCHED_ENTRY_MAX
+ 1] = {
760 [TCA_TAPRIO_SCHED_ENTRY_INDEX
] = { .type
= NLA_U32
},
761 [TCA_TAPRIO_SCHED_ENTRY_CMD
] = { .type
= NLA_U8
},
762 [TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
] = { .type
= NLA_U32
},
763 [TCA_TAPRIO_SCHED_ENTRY_INTERVAL
] = { .type
= NLA_U32
},
766 static const struct nla_policy taprio_policy
[TCA_TAPRIO_ATTR_MAX
+ 1] = {
767 [TCA_TAPRIO_ATTR_PRIOMAP
] = {
768 .len
= sizeof(struct tc_mqprio_qopt
)
770 [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
] = { .type
= NLA_NESTED
},
771 [TCA_TAPRIO_ATTR_SCHED_BASE_TIME
] = { .type
= NLA_S64
},
772 [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY
] = { .type
= NLA_NESTED
},
773 [TCA_TAPRIO_ATTR_SCHED_CLOCKID
] = { .type
= NLA_S32
},
774 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
] = { .type
= NLA_S64
},
775 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
] = { .type
= NLA_S64
},
776 [TCA_TAPRIO_ATTR_FLAGS
] = { .type
= NLA_U32
},
777 [TCA_TAPRIO_ATTR_TXTIME_DELAY
] = { .type
= NLA_U32
},
780 static int fill_sched_entry(struct nlattr
**tb
, struct sched_entry
*entry
,
781 struct netlink_ext_ack
*extack
)
785 if (tb
[TCA_TAPRIO_SCHED_ENTRY_CMD
])
786 entry
->command
= nla_get_u8(
787 tb
[TCA_TAPRIO_SCHED_ENTRY_CMD
]);
789 if (tb
[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
])
790 entry
->gate_mask
= nla_get_u32(
791 tb
[TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
]);
793 if (tb
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL
])
794 interval
= nla_get_u32(
795 tb
[TCA_TAPRIO_SCHED_ENTRY_INTERVAL
]);
798 NL_SET_ERR_MSG(extack
, "Invalid interval for schedule entry");
802 entry
->interval
= interval
;
807 static int parse_sched_entry(struct nlattr
*n
, struct sched_entry
*entry
,
808 int index
, struct netlink_ext_ack
*extack
)
810 struct nlattr
*tb
[TCA_TAPRIO_SCHED_ENTRY_MAX
+ 1] = { };
813 err
= nla_parse_nested_deprecated(tb
, TCA_TAPRIO_SCHED_ENTRY_MAX
, n
,
816 NL_SET_ERR_MSG(extack
, "Could not parse nested entry");
820 entry
->index
= index
;
822 return fill_sched_entry(tb
, entry
, extack
);
825 static int parse_sched_list(struct nlattr
*list
,
826 struct sched_gate_list
*sched
,
827 struct netlink_ext_ack
*extack
)
836 nla_for_each_nested(n
, list
, rem
) {
837 struct sched_entry
*entry
;
839 if (nla_type(n
) != TCA_TAPRIO_SCHED_ENTRY
) {
840 NL_SET_ERR_MSG(extack
, "Attribute is not of type 'entry'");
844 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
846 NL_SET_ERR_MSG(extack
, "Not enough memory for entry");
850 err
= parse_sched_entry(n
, entry
, i
, extack
);
856 list_add_tail(&entry
->list
, &sched
->entries
);
860 sched
->num_entries
= i
;
865 static int parse_taprio_schedule(struct nlattr
**tb
,
866 struct sched_gate_list
*new,
867 struct netlink_ext_ack
*extack
)
871 if (tb
[TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY
]) {
872 NL_SET_ERR_MSG(extack
, "Adding a single entry is not supported");
876 if (tb
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME
])
877 new->base_time
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_BASE_TIME
]);
879 if (tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
])
880 new->cycle_time_extension
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
]);
882 if (tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
])
883 new->cycle_time
= nla_get_s64(tb
[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
]);
885 if (tb
[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
])
886 err
= parse_sched_list(
887 tb
[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
], new, extack
);
891 if (!new->cycle_time
) {
892 struct sched_entry
*entry
;
895 list_for_each_entry(entry
, &new->entries
, list
)
896 cycle
= ktime_add_ns(cycle
, entry
->interval
);
897 new->cycle_time
= cycle
;
903 static int taprio_parse_mqprio_opt(struct net_device
*dev
,
904 struct tc_mqprio_qopt
*qopt
,
905 struct netlink_ext_ack
*extack
,
910 if (!qopt
&& !dev
->num_tc
) {
911 NL_SET_ERR_MSG(extack
, "'mqprio' configuration is necessary");
915 /* If num_tc is already set, it means that the user already
916 * configured the mqprio part
921 /* Verify num_tc is not out of max range */
922 if (qopt
->num_tc
> TC_MAX_QUEUE
) {
923 NL_SET_ERR_MSG(extack
, "Number of traffic classes is outside valid range");
927 /* taprio imposes that traffic classes map 1:n to tx queues */
928 if (qopt
->num_tc
> dev
->num_tx_queues
) {
929 NL_SET_ERR_MSG(extack
, "Number of traffic classes is greater than number of HW queues");
933 /* Verify priority mapping uses valid tcs */
934 for (i
= 0; i
<= TC_BITMASK
; i
++) {
935 if (qopt
->prio_tc_map
[i
] >= qopt
->num_tc
) {
936 NL_SET_ERR_MSG(extack
, "Invalid traffic class in priority to traffic class mapping");
941 for (i
= 0; i
< qopt
->num_tc
; i
++) {
942 unsigned int last
= qopt
->offset
[i
] + qopt
->count
[i
];
944 /* Verify the queue count is in tx range being equal to the
945 * real_num_tx_queues indicates the last queue is in use.
947 if (qopt
->offset
[i
] >= dev
->num_tx_queues
||
949 last
> dev
->real_num_tx_queues
) {
950 NL_SET_ERR_MSG(extack
, "Invalid queue in traffic class to queue mapping");
954 if (TXTIME_ASSIST_IS_ENABLED(taprio_flags
))
957 /* Verify that the offset and counts do not overlap */
958 for (j
= i
+ 1; j
< qopt
->num_tc
; j
++) {
959 if (last
> qopt
->offset
[j
]) {
960 NL_SET_ERR_MSG(extack
, "Detected overlap in the traffic class to queue mapping");
969 static int taprio_get_start_time(struct Qdisc
*sch
,
970 struct sched_gate_list
*sched
,
973 struct taprio_sched
*q
= qdisc_priv(sch
);
974 ktime_t now
, base
, cycle
;
977 base
= sched_base_time(sched
);
978 now
= taprio_get_time(q
);
980 if (ktime_after(base
, now
)) {
985 cycle
= sched
->cycle_time
;
987 /* The qdisc is expected to have at least one sched_entry. Moreover,
988 * any entry must have 'interval' > 0. Thus if the cycle time is zero,
989 * something went really wrong. In that case, we should warn about this
990 * inconsistent state and return error.
995 /* Schedule the start time for the beginning of the next
998 n
= div64_s64(ktime_sub_ns(now
, base
), cycle
);
999 *start
= ktime_add_ns(base
, (n
+ 1) * cycle
);
1003 static void setup_first_close_time(struct taprio_sched
*q
,
1004 struct sched_gate_list
*sched
, ktime_t base
)
1006 struct sched_entry
*first
;
1009 first
= list_first_entry(&sched
->entries
,
1010 struct sched_entry
, list
);
1012 cycle
= sched
->cycle_time
;
1014 /* FIXME: find a better place to do this */
1015 sched
->cycle_close_time
= ktime_add_ns(base
, cycle
);
1017 first
->close_time
= ktime_add_ns(base
, first
->interval
);
1018 taprio_set_budget(q
, first
);
1019 rcu_assign_pointer(q
->current_entry
, NULL
);
1022 static void taprio_start_sched(struct Qdisc
*sch
,
1023 ktime_t start
, struct sched_gate_list
*new)
1025 struct taprio_sched
*q
= qdisc_priv(sch
);
1028 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1031 expires
= hrtimer_get_expires(&q
->advance_timer
);
1033 expires
= KTIME_MAX
;
1035 /* If the new schedule starts before the next expiration, we
1036 * reprogram it to the earliest one, so we change the admin
1037 * schedule to the operational one at the right time.
1039 start
= min_t(ktime_t
, start
, expires
);
1041 hrtimer_start(&q
->advance_timer
, start
, HRTIMER_MODE_ABS
);
1044 static void taprio_set_picos_per_byte(struct net_device
*dev
,
1045 struct taprio_sched
*q
)
1047 struct ethtool_link_ksettings ecmd
;
1048 int speed
= SPEED_10
;
1052 err
= __ethtool_get_link_ksettings(dev
, &ecmd
);
1056 if (ecmd
.base
.speed
&& ecmd
.base
.speed
!= SPEED_UNKNOWN
)
1057 speed
= ecmd
.base
.speed
;
1060 picos_per_byte
= (USEC_PER_SEC
* 8) / speed
;
1062 atomic64_set(&q
->picos_per_byte
, picos_per_byte
);
1063 netdev_dbg(dev
, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
1064 dev
->name
, (long long)atomic64_read(&q
->picos_per_byte
),
1068 static int taprio_dev_notifier(struct notifier_block
*nb
, unsigned long event
,
1071 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1072 struct net_device
*qdev
;
1073 struct taprio_sched
*q
;
1078 if (event
!= NETDEV_UP
&& event
!= NETDEV_CHANGE
)
1081 spin_lock(&taprio_list_lock
);
1082 list_for_each_entry(q
, &taprio_list
, taprio_list
) {
1083 qdev
= qdisc_dev(q
->root
);
1089 spin_unlock(&taprio_list_lock
);
1092 taprio_set_picos_per_byte(dev
, q
);
1097 static void setup_txtime(struct taprio_sched
*q
,
1098 struct sched_gate_list
*sched
, ktime_t base
)
1100 struct sched_entry
*entry
;
1103 list_for_each_entry(entry
, &sched
->entries
, list
) {
1104 entry
->next_txtime
= ktime_add_ns(base
, interval
);
1105 interval
+= entry
->interval
;
1109 static struct tc_taprio_qopt_offload
*taprio_offload_alloc(int num_entries
)
1111 size_t size
= sizeof(struct tc_taprio_sched_entry
) * num_entries
+
1112 sizeof(struct __tc_taprio_qopt_offload
);
1113 struct __tc_taprio_qopt_offload
*__offload
;
1115 __offload
= kzalloc(size
, GFP_KERNEL
);
1119 refcount_set(&__offload
->users
, 1);
1121 return &__offload
->offload
;
1124 struct tc_taprio_qopt_offload
*taprio_offload_get(struct tc_taprio_qopt_offload
1127 struct __tc_taprio_qopt_offload
*__offload
;
1129 __offload
= container_of(offload
, struct __tc_taprio_qopt_offload
,
1132 refcount_inc(&__offload
->users
);
1136 EXPORT_SYMBOL_GPL(taprio_offload_get
);
1138 void taprio_offload_free(struct tc_taprio_qopt_offload
*offload
)
1140 struct __tc_taprio_qopt_offload
*__offload
;
1142 __offload
= container_of(offload
, struct __tc_taprio_qopt_offload
,
1145 if (!refcount_dec_and_test(&__offload
->users
))
1150 EXPORT_SYMBOL_GPL(taprio_offload_free
);
1152 /* The function will only serve to keep the pointers to the "oper" and "admin"
1153 * schedules valid in relation to their base times, so when calling dump() the
1154 * users looks at the right schedules.
1155 * When using full offload, the admin configuration is promoted to oper at the
1156 * base_time in the PHC time domain. But because the system time is not
1157 * necessarily in sync with that, we can't just trigger a hrtimer to call
1158 * switch_schedules at the right hardware time.
1159 * At the moment we call this by hand right away from taprio, but in the future
1160 * it will be useful to create a mechanism for drivers to notify taprio of the
1161 * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
1162 * This is left as TODO.
1164 static void taprio_offload_config_changed(struct taprio_sched
*q
)
1166 struct sched_gate_list
*oper
, *admin
;
1168 spin_lock(&q
->current_entry_lock
);
1170 oper
= rcu_dereference_protected(q
->oper_sched
,
1171 lockdep_is_held(&q
->current_entry_lock
));
1172 admin
= rcu_dereference_protected(q
->admin_sched
,
1173 lockdep_is_held(&q
->current_entry_lock
));
1175 switch_schedules(q
, &admin
, &oper
);
1177 spin_unlock(&q
->current_entry_lock
);
1180 static void taprio_sched_to_offload(struct taprio_sched
*q
,
1181 struct sched_gate_list
*sched
,
1182 const struct tc_mqprio_qopt
*mqprio
,
1183 struct tc_taprio_qopt_offload
*offload
)
1185 struct sched_entry
*entry
;
1188 offload
->base_time
= sched
->base_time
;
1189 offload
->cycle_time
= sched
->cycle_time
;
1190 offload
->cycle_time_extension
= sched
->cycle_time_extension
;
1192 list_for_each_entry(entry
, &sched
->entries
, list
) {
1193 struct tc_taprio_sched_entry
*e
= &offload
->entries
[i
];
1195 e
->command
= entry
->command
;
1196 e
->interval
= entry
->interval
;
1197 e
->gate_mask
= entry
->gate_mask
;
1201 offload
->num_entries
= i
;
1204 static int taprio_enable_offload(struct net_device
*dev
,
1205 struct tc_mqprio_qopt
*mqprio
,
1206 struct taprio_sched
*q
,
1207 struct sched_gate_list
*sched
,
1208 struct netlink_ext_ack
*extack
)
1210 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1211 struct tc_taprio_qopt_offload
*offload
;
1214 if (!ops
->ndo_setup_tc
) {
1215 NL_SET_ERR_MSG(extack
,
1216 "Device does not support taprio offload");
1220 offload
= taprio_offload_alloc(sched
->num_entries
);
1222 NL_SET_ERR_MSG(extack
,
1223 "Not enough memory for enabling offload mode");
1226 offload
->enable
= 1;
1227 taprio_sched_to_offload(q
, sched
, mqprio
, offload
);
1229 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_TAPRIO
, offload
);
1231 NL_SET_ERR_MSG(extack
,
1232 "Device failed to setup taprio offload");
1237 taprio_offload_free(offload
);
1242 static int taprio_disable_offload(struct net_device
*dev
,
1243 struct taprio_sched
*q
,
1244 struct netlink_ext_ack
*extack
)
1246 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1247 struct tc_taprio_qopt_offload
*offload
;
1250 if (!FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1253 if (!ops
->ndo_setup_tc
)
1256 offload
= taprio_offload_alloc(0);
1258 NL_SET_ERR_MSG(extack
,
1259 "Not enough memory to disable offload mode");
1262 offload
->enable
= 0;
1264 err
= ops
->ndo_setup_tc(dev
, TC_SETUP_QDISC_TAPRIO
, offload
);
1266 NL_SET_ERR_MSG(extack
,
1267 "Device failed to disable offload");
1272 taprio_offload_free(offload
);
1277 /* If full offload is enabled, the only possible clockid is the net device's
1278 * PHC. For that reason, specifying a clockid through netlink is incorrect.
1279 * For txtime-assist, it is implicitly assumed that the device's PHC is kept
1280 * in sync with the specified clockid via a user space daemon such as phc2sys.
1281 * For both software taprio and txtime-assist, the clockid is used for the
1282 * hrtimer that advances the schedule and hence mandatory.
1284 static int taprio_parse_clockid(struct Qdisc
*sch
, struct nlattr
**tb
,
1285 struct netlink_ext_ack
*extack
)
1287 struct taprio_sched
*q
= qdisc_priv(sch
);
1288 struct net_device
*dev
= qdisc_dev(sch
);
1291 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
)) {
1292 const struct ethtool_ops
*ops
= dev
->ethtool_ops
;
1293 struct ethtool_ts_info info
= {
1294 .cmd
= ETHTOOL_GET_TS_INFO
,
1298 if (tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]) {
1299 NL_SET_ERR_MSG(extack
,
1300 "The 'clockid' cannot be specified for full offload");
1304 if (ops
&& ops
->get_ts_info
)
1305 err
= ops
->get_ts_info(dev
, &info
);
1307 if (err
|| info
.phc_index
< 0) {
1308 NL_SET_ERR_MSG(extack
,
1309 "Device does not have a PTP clock");
1313 } else if (tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]) {
1314 int clockid
= nla_get_s32(tb
[TCA_TAPRIO_ATTR_SCHED_CLOCKID
]);
1316 /* We only support static clockids and we don't allow
1317 * for it to be modified after the first init.
1320 (q
->clockid
!= -1 && q
->clockid
!= clockid
)) {
1321 NL_SET_ERR_MSG(extack
,
1322 "Changing the 'clockid' of a running schedule is not supported");
1328 case CLOCK_REALTIME
:
1329 q
->tk_offset
= TK_OFFS_REAL
;
1331 case CLOCK_MONOTONIC
:
1332 q
->tk_offset
= TK_OFFS_MAX
;
1334 case CLOCK_BOOTTIME
:
1335 q
->tk_offset
= TK_OFFS_BOOT
;
1338 q
->tk_offset
= TK_OFFS_TAI
;
1341 NL_SET_ERR_MSG(extack
, "Invalid 'clockid'");
1346 q
->clockid
= clockid
;
1348 NL_SET_ERR_MSG(extack
, "Specifying a 'clockid' is mandatory");
1352 /* Everything went ok, return success. */
1359 static int taprio_mqprio_cmp(const struct net_device
*dev
,
1360 const struct tc_mqprio_qopt
*mqprio
)
1364 if (!mqprio
|| mqprio
->num_tc
!= dev
->num_tc
)
1367 for (i
= 0; i
< mqprio
->num_tc
; i
++)
1368 if (dev
->tc_to_txq
[i
].count
!= mqprio
->count
[i
] ||
1369 dev
->tc_to_txq
[i
].offset
!= mqprio
->offset
[i
])
1372 for (i
= 0; i
<= TC_BITMASK
; i
++)
1373 if (dev
->prio_tc_map
[i
] != mqprio
->prio_tc_map
[i
])
1379 /* The semantics of the 'flags' argument in relation to 'change()'
1380 * requests, are interpreted following two rules (which are applied in
1381 * this order): (1) an omitted 'flags' argument is interpreted as
1382 * zero; (2) the 'flags' of a "running" taprio instance cannot be
1385 static int taprio_new_flags(const struct nlattr
*attr
, u32 old
,
1386 struct netlink_ext_ack
*extack
)
1391 new = nla_get_u32(attr
);
1393 if (old
!= TAPRIO_FLAGS_INVALID
&& old
!= new) {
1394 NL_SET_ERR_MSG_MOD(extack
, "Changing 'flags' of a running schedule is not supported");
1398 if (!taprio_flags_valid(new)) {
1399 NL_SET_ERR_MSG_MOD(extack
, "Specified 'flags' are not valid");
1406 static int taprio_change(struct Qdisc
*sch
, struct nlattr
*opt
,
1407 struct netlink_ext_ack
*extack
)
1409 struct nlattr
*tb
[TCA_TAPRIO_ATTR_MAX
+ 1] = { };
1410 struct sched_gate_list
*oper
, *admin
, *new_admin
;
1411 struct taprio_sched
*q
= qdisc_priv(sch
);
1412 struct net_device
*dev
= qdisc_dev(sch
);
1413 struct tc_mqprio_qopt
*mqprio
= NULL
;
1414 unsigned long flags
;
1418 err
= nla_parse_nested_deprecated(tb
, TCA_TAPRIO_ATTR_MAX
, opt
,
1419 taprio_policy
, extack
);
1423 if (tb
[TCA_TAPRIO_ATTR_PRIOMAP
])
1424 mqprio
= nla_data(tb
[TCA_TAPRIO_ATTR_PRIOMAP
]);
1426 err
= taprio_new_flags(tb
[TCA_TAPRIO_ATTR_FLAGS
],
1433 err
= taprio_parse_mqprio_opt(dev
, mqprio
, extack
, q
->flags
);
1437 new_admin
= kzalloc(sizeof(*new_admin
), GFP_KERNEL
);
1439 NL_SET_ERR_MSG(extack
, "Not enough memory for a new schedule");
1442 INIT_LIST_HEAD(&new_admin
->entries
);
1445 oper
= rcu_dereference(q
->oper_sched
);
1446 admin
= rcu_dereference(q
->admin_sched
);
1449 /* no changes - no new mqprio settings */
1450 if (!taprio_mqprio_cmp(dev
, mqprio
))
1453 if (mqprio
&& (oper
|| admin
)) {
1454 NL_SET_ERR_MSG(extack
, "Changing the traffic mapping of a running schedule is not supported");
1459 err
= parse_taprio_schedule(tb
, new_admin
, extack
);
1463 if (new_admin
->num_entries
== 0) {
1464 NL_SET_ERR_MSG(extack
, "There should be at least one entry in the schedule");
1469 err
= taprio_parse_clockid(sch
, tb
, extack
);
1473 taprio_set_picos_per_byte(dev
, q
);
1476 netdev_set_num_tc(dev
, mqprio
->num_tc
);
1477 for (i
= 0; i
< mqprio
->num_tc
; i
++)
1478 netdev_set_tc_queue(dev
, i
,
1482 /* Always use supplied priority mappings */
1483 for (i
= 0; i
<= TC_BITMASK
; i
++)
1484 netdev_set_prio_tc_map(dev
, i
,
1485 mqprio
->prio_tc_map
[i
]);
1488 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1489 err
= taprio_enable_offload(dev
, mqprio
, q
, new_admin
, extack
);
1491 err
= taprio_disable_offload(dev
, q
, extack
);
1495 /* Protects against enqueue()/dequeue() */
1496 spin_lock_bh(qdisc_lock(sch
));
1498 if (tb
[TCA_TAPRIO_ATTR_TXTIME_DELAY
]) {
1499 if (!TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
1500 NL_SET_ERR_MSG_MOD(extack
, "txtime-delay can only be set when txtime-assist mode is enabled");
1505 q
->txtime_delay
= nla_get_u32(tb
[TCA_TAPRIO_ATTR_TXTIME_DELAY
]);
1508 if (!TXTIME_ASSIST_IS_ENABLED(q
->flags
) &&
1509 !FULL_OFFLOAD_IS_ENABLED(q
->flags
) &&
1510 !hrtimer_active(&q
->advance_timer
)) {
1511 hrtimer_init(&q
->advance_timer
, q
->clockid
, HRTIMER_MODE_ABS
);
1512 q
->advance_timer
.function
= advance_sched
;
1515 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
)) {
1516 q
->dequeue
= taprio_dequeue_offload
;
1517 q
->peek
= taprio_peek_offload
;
1519 /* Be sure to always keep the function pointers
1520 * in a consistent state.
1522 q
->dequeue
= taprio_dequeue_soft
;
1523 q
->peek
= taprio_peek_soft
;
1526 err
= taprio_get_start_time(sch
, new_admin
, &start
);
1528 NL_SET_ERR_MSG(extack
, "Internal error: failed get start time");
1532 setup_txtime(q
, new_admin
, start
);
1534 if (TXTIME_ASSIST_IS_ENABLED(q
->flags
)) {
1536 rcu_assign_pointer(q
->oper_sched
, new_admin
);
1542 rcu_assign_pointer(q
->admin_sched
, new_admin
);
1544 call_rcu(&admin
->rcu
, taprio_free_sched_cb
);
1546 setup_first_close_time(q
, new_admin
, start
);
1548 /* Protects against advance_sched() */
1549 spin_lock_irqsave(&q
->current_entry_lock
, flags
);
1551 taprio_start_sched(sch
, start
, new_admin
);
1553 rcu_assign_pointer(q
->admin_sched
, new_admin
);
1555 call_rcu(&admin
->rcu
, taprio_free_sched_cb
);
1557 spin_unlock_irqrestore(&q
->current_entry_lock
, flags
);
1559 if (FULL_OFFLOAD_IS_ENABLED(q
->flags
))
1560 taprio_offload_config_changed(q
);
1567 spin_unlock_bh(qdisc_lock(sch
));
1571 call_rcu(&new_admin
->rcu
, taprio_free_sched_cb
);
1576 static void taprio_destroy(struct Qdisc
*sch
)
1578 struct taprio_sched
*q
= qdisc_priv(sch
);
1579 struct net_device
*dev
= qdisc_dev(sch
);
1582 spin_lock(&taprio_list_lock
);
1583 list_del(&q
->taprio_list
);
1584 spin_unlock(&taprio_list_lock
);
1586 hrtimer_cancel(&q
->advance_timer
);
1588 taprio_disable_offload(dev
, q
, NULL
);
1591 for (i
= 0; i
< dev
->num_tx_queues
&& q
->qdiscs
[i
]; i
++)
1592 qdisc_put(q
->qdiscs
[i
]);
1598 netdev_reset_tc(dev
);
1601 call_rcu(&q
->oper_sched
->rcu
, taprio_free_sched_cb
);
1604 call_rcu(&q
->admin_sched
->rcu
, taprio_free_sched_cb
);
1607 static int taprio_init(struct Qdisc
*sch
, struct nlattr
*opt
,
1608 struct netlink_ext_ack
*extack
)
1610 struct taprio_sched
*q
= qdisc_priv(sch
);
1611 struct net_device
*dev
= qdisc_dev(sch
);
1614 spin_lock_init(&q
->current_entry_lock
);
1616 hrtimer_init(&q
->advance_timer
, CLOCK_TAI
, HRTIMER_MODE_ABS
);
1617 q
->advance_timer
.function
= advance_sched
;
1619 q
->dequeue
= taprio_dequeue_soft
;
1620 q
->peek
= taprio_peek_soft
;
1624 /* We only support static clockids. Use an invalid value as default
1625 * and get the valid one on taprio_change().
1628 q
->flags
= TAPRIO_FLAGS_INVALID
;
1630 spin_lock(&taprio_list_lock
);
1631 list_add(&q
->taprio_list
, &taprio_list
);
1632 spin_unlock(&taprio_list_lock
);
1634 if (sch
->parent
!= TC_H_ROOT
)
1637 if (!netif_is_multiqueue(dev
))
1640 /* pre-allocate qdisc, attachment can't fail */
1641 q
->qdiscs
= kcalloc(dev
->num_tx_queues
,
1642 sizeof(q
->qdiscs
[0]),
1651 for (i
= 0; i
< dev
->num_tx_queues
; i
++) {
1652 struct netdev_queue
*dev_queue
;
1653 struct Qdisc
*qdisc
;
1655 dev_queue
= netdev_get_tx_queue(dev
, i
);
1656 qdisc
= qdisc_create_dflt(dev_queue
,
1658 TC_H_MAKE(TC_H_MAJ(sch
->handle
),
1664 if (i
< dev
->real_num_tx_queues
)
1665 qdisc_hash_add(qdisc
, false);
1667 q
->qdiscs
[i
] = qdisc
;
1670 return taprio_change(sch
, opt
, extack
);
1673 static struct netdev_queue
*taprio_queue_get(struct Qdisc
*sch
,
1676 struct net_device
*dev
= qdisc_dev(sch
);
1677 unsigned long ntx
= cl
- 1;
1679 if (ntx
>= dev
->num_tx_queues
)
1682 return netdev_get_tx_queue(dev
, ntx
);
1685 static int taprio_graft(struct Qdisc
*sch
, unsigned long cl
,
1686 struct Qdisc
*new, struct Qdisc
**old
,
1687 struct netlink_ext_ack
*extack
)
1689 struct taprio_sched
*q
= qdisc_priv(sch
);
1690 struct net_device
*dev
= qdisc_dev(sch
);
1691 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1696 if (dev
->flags
& IFF_UP
)
1697 dev_deactivate(dev
);
1699 *old
= q
->qdiscs
[cl
- 1];
1700 q
->qdiscs
[cl
- 1] = new;
1703 new->flags
|= TCQ_F_ONETXQUEUE
| TCQ_F_NOPARENT
;
1705 if (dev
->flags
& IFF_UP
)
1711 static int dump_entry(struct sk_buff
*msg
,
1712 const struct sched_entry
*entry
)
1714 struct nlattr
*item
;
1716 item
= nla_nest_start_noflag(msg
, TCA_TAPRIO_SCHED_ENTRY
);
1720 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_INDEX
, entry
->index
))
1721 goto nla_put_failure
;
1723 if (nla_put_u8(msg
, TCA_TAPRIO_SCHED_ENTRY_CMD
, entry
->command
))
1724 goto nla_put_failure
;
1726 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_GATE_MASK
,
1728 goto nla_put_failure
;
1730 if (nla_put_u32(msg
, TCA_TAPRIO_SCHED_ENTRY_INTERVAL
,
1732 goto nla_put_failure
;
1734 return nla_nest_end(msg
, item
);
1737 nla_nest_cancel(msg
, item
);
1741 static int dump_schedule(struct sk_buff
*msg
,
1742 const struct sched_gate_list
*root
)
1744 struct nlattr
*entry_list
;
1745 struct sched_entry
*entry
;
1747 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_BASE_TIME
,
1748 root
->base_time
, TCA_TAPRIO_PAD
))
1751 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME
,
1752 root
->cycle_time
, TCA_TAPRIO_PAD
))
1755 if (nla_put_s64(msg
, TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION
,
1756 root
->cycle_time_extension
, TCA_TAPRIO_PAD
))
1759 entry_list
= nla_nest_start_noflag(msg
,
1760 TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST
);
1764 list_for_each_entry(entry
, &root
->entries
, list
) {
1765 if (dump_entry(msg
, entry
) < 0)
1769 nla_nest_end(msg
, entry_list
);
1773 nla_nest_cancel(msg
, entry_list
);
1777 static int taprio_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
1779 struct taprio_sched
*q
= qdisc_priv(sch
);
1780 struct net_device
*dev
= qdisc_dev(sch
);
1781 struct sched_gate_list
*oper
, *admin
;
1782 struct tc_mqprio_qopt opt
= { 0 };
1783 struct nlattr
*nest
, *sched_nest
;
1787 oper
= rcu_dereference(q
->oper_sched
);
1788 admin
= rcu_dereference(q
->admin_sched
);
1790 opt
.num_tc
= netdev_get_num_tc(dev
);
1791 memcpy(opt
.prio_tc_map
, dev
->prio_tc_map
, sizeof(opt
.prio_tc_map
));
1793 for (i
= 0; i
< netdev_get_num_tc(dev
); i
++) {
1794 opt
.count
[i
] = dev
->tc_to_txq
[i
].count
;
1795 opt
.offset
[i
] = dev
->tc_to_txq
[i
].offset
;
1798 nest
= nla_nest_start_noflag(skb
, TCA_OPTIONS
);
1802 if (nla_put(skb
, TCA_TAPRIO_ATTR_PRIOMAP
, sizeof(opt
), &opt
))
1805 if (!FULL_OFFLOAD_IS_ENABLED(q
->flags
) &&
1806 nla_put_s32(skb
, TCA_TAPRIO_ATTR_SCHED_CLOCKID
, q
->clockid
))
1809 if (q
->flags
&& nla_put_u32(skb
, TCA_TAPRIO_ATTR_FLAGS
, q
->flags
))
1812 if (q
->txtime_delay
&&
1813 nla_put_u32(skb
, TCA_TAPRIO_ATTR_TXTIME_DELAY
, q
->txtime_delay
))
1816 if (oper
&& dump_schedule(skb
, oper
))
1822 sched_nest
= nla_nest_start_noflag(skb
, TCA_TAPRIO_ATTR_ADMIN_SCHED
);
1826 if (dump_schedule(skb
, admin
))
1829 nla_nest_end(skb
, sched_nest
);
1834 return nla_nest_end(skb
, nest
);
1837 nla_nest_cancel(skb
, sched_nest
);
1840 nla_nest_cancel(skb
, nest
);
1847 static struct Qdisc
*taprio_leaf(struct Qdisc
*sch
, unsigned long cl
)
1849 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1854 return dev_queue
->qdisc_sleeping
;
1857 static unsigned long taprio_find(struct Qdisc
*sch
, u32 classid
)
1859 unsigned int ntx
= TC_H_MIN(classid
);
1861 if (!taprio_queue_get(sch
, ntx
))
1866 static int taprio_dump_class(struct Qdisc
*sch
, unsigned long cl
,
1867 struct sk_buff
*skb
, struct tcmsg
*tcm
)
1869 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1871 tcm
->tcm_parent
= TC_H_ROOT
;
1872 tcm
->tcm_handle
|= TC_H_MIN(cl
);
1873 tcm
->tcm_info
= dev_queue
->qdisc_sleeping
->handle
;
1878 static int taprio_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
1879 struct gnet_dump
*d
)
1883 struct netdev_queue
*dev_queue
= taprio_queue_get(sch
, cl
);
1885 sch
= dev_queue
->qdisc_sleeping
;
1886 if (gnet_stats_copy_basic(&sch
->running
, d
, NULL
, &sch
->bstats
) < 0 ||
1887 qdisc_qstats_copy(d
, sch
) < 0)
1892 static void taprio_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
1894 struct net_device
*dev
= qdisc_dev(sch
);
1900 arg
->count
= arg
->skip
;
1901 for (ntx
= arg
->skip
; ntx
< dev
->num_tx_queues
; ntx
++) {
1902 if (arg
->fn(sch
, ntx
+ 1, arg
) < 0) {
1910 static struct netdev_queue
*taprio_select_queue(struct Qdisc
*sch
,
1913 return taprio_queue_get(sch
, TC_H_MIN(tcm
->tcm_parent
));
1916 static const struct Qdisc_class_ops taprio_class_ops
= {
1917 .graft
= taprio_graft
,
1918 .leaf
= taprio_leaf
,
1919 .find
= taprio_find
,
1920 .walk
= taprio_walk
,
1921 .dump
= taprio_dump_class
,
1922 .dump_stats
= taprio_dump_class_stats
,
1923 .select_queue
= taprio_select_queue
,
1926 static struct Qdisc_ops taprio_qdisc_ops __read_mostly
= {
1927 .cl_ops
= &taprio_class_ops
,
1929 .priv_size
= sizeof(struct taprio_sched
),
1930 .init
= taprio_init
,
1931 .change
= taprio_change
,
1932 .destroy
= taprio_destroy
,
1933 .peek
= taprio_peek
,
1934 .dequeue
= taprio_dequeue
,
1935 .enqueue
= taprio_enqueue
,
1936 .dump
= taprio_dump
,
1937 .owner
= THIS_MODULE
,
1940 static struct notifier_block taprio_device_notifier
= {
1941 .notifier_call
= taprio_dev_notifier
,
1944 static int __init
taprio_module_init(void)
1946 int err
= register_netdevice_notifier(&taprio_device_notifier
);
1951 return register_qdisc(&taprio_qdisc_ops
);
1954 static void __exit
taprio_module_exit(void)
1956 unregister_qdisc(&taprio_qdisc_ops
);
1957 unregister_netdevice_notifier(&taprio_device_notifier
);
1960 module_init(taprio_module_init
);
1961 module_exit(taprio_module_exit
);
1962 MODULE_LICENSE("GPL");