2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
33 #include <net/net_namespace.h>
35 #include <net/netlink.h>
36 #include <net/pkt_sched.h>
38 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
39 struct nlmsghdr
*n
, u32 clid
,
40 struct Qdisc
*old
, struct Qdisc
*new);
41 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
42 struct nlmsghdr
*n
, struct Qdisc
*q
,
43 unsigned long cl
, int event
);
50 This file consists of two interrelated parts:
52 1. queueing disciplines manager frontend.
53 2. traffic classes manager frontend.
55 Generally, queueing discipline ("qdisc") is a black box,
56 which is able to enqueue packets and to dequeue them (when
57 device is ready to send something) in order and at times
58 determined by algorithm hidden in it.
60 qdisc's are divided to two categories:
61 - "queues", which have no internal structure visible from outside.
62 - "schedulers", which split all the packets to "traffic classes",
63 using "packet classifiers" (look at cls_api.c)
65 In turn, classes may have child qdiscs (as rule, queues)
66 attached to them etc. etc. etc.
68 The goal of the routines in this file is to translate
69 information supplied by user in the form of handles
70 to more intelligible for kernel form, to make some sanity
71 checks and part of work, which is common to all qdiscs
72 and to provide rtnetlink notifications.
74 All real intelligent work is done inside qdisc modules.
78 Every discipline has two major routines: enqueue and dequeue.
82 dequeue usually returns a skb to send. It is allowed to return NULL,
83 but it does not mean that queue is empty, it just means that
84 discipline does not want to send anything this time.
85 Queue is really empty if q->q.qlen == 0.
86 For complicated disciplines with multiple queues q->q is not
87 real packet queue, but however q->q.qlen must be valid.
91 enqueue returns 0, if packet was enqueued successfully.
92 If packet (this one or another one) was dropped, it returns
94 NET_XMIT_DROP - this packet dropped
95 Expected action: do not backoff, but wait until queue will clear.
96 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
97 Expected action: backoff or ignore
98 NET_XMIT_POLICED - dropped by police.
99 Expected action: backoff or error to real-time apps.
105 like dequeue but without removing a packet from the queue
109 returns qdisc to initial state: purge all buffers, clear all
110 timers, counters (except for statistics) etc.
114 initializes newly created qdisc.
118 destroys resources allocated by init and during lifetime of qdisc.
122 changes qdisc parameters.
125 /* Protects list of registered TC modules. It is pure SMP lock. */
126 static DEFINE_RWLOCK(qdisc_mod_lock
);
129 /************************************************
130 * Queueing disciplines manipulation. *
131 ************************************************/
134 /* The list of all installed queueing disciplines. */
136 static struct Qdisc_ops
*qdisc_base
;
138 /* Register/unregister queueing discipline */
140 int register_qdisc(struct Qdisc_ops
*qops
)
142 struct Qdisc_ops
*q
, **qp
;
145 write_lock(&qdisc_mod_lock
);
146 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
147 if (!strcmp(qops
->id
, q
->id
))
150 if (qops
->enqueue
== NULL
)
151 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
152 if (qops
->peek
== NULL
) {
153 if (qops
->dequeue
== NULL
)
154 qops
->peek
= noop_qdisc_ops
.peek
;
158 if (qops
->dequeue
== NULL
)
159 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
162 const struct Qdisc_class_ops
*cops
= qops
->cl_ops
;
164 if (!(cops
->get
&& cops
->put
&& cops
->walk
&& cops
->leaf
))
167 if (cops
->tcf_chain
&& !(cops
->bind_tcf
&& cops
->unbind_tcf
))
175 write_unlock(&qdisc_mod_lock
);
182 EXPORT_SYMBOL(register_qdisc
);
184 int unregister_qdisc(struct Qdisc_ops
*qops
)
186 struct Qdisc_ops
*q
, **qp
;
189 write_lock(&qdisc_mod_lock
);
190 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
198 write_unlock(&qdisc_mod_lock
);
201 EXPORT_SYMBOL(unregister_qdisc
);
203 /* Get default qdisc if not otherwise specified */
204 void qdisc_get_default(char *name
, size_t len
)
206 read_lock(&qdisc_mod_lock
);
207 strlcpy(name
, default_qdisc_ops
->id
, len
);
208 read_unlock(&qdisc_mod_lock
);
211 static struct Qdisc_ops
*qdisc_lookup_default(const char *name
)
213 struct Qdisc_ops
*q
= NULL
;
215 for (q
= qdisc_base
; q
; q
= q
->next
) {
216 if (!strcmp(name
, q
->id
)) {
217 if (!try_module_get(q
->owner
))
226 /* Set new default qdisc to use */
227 int qdisc_set_default(const char *name
)
229 const struct Qdisc_ops
*ops
;
231 if (!capable(CAP_NET_ADMIN
))
234 write_lock(&qdisc_mod_lock
);
235 ops
= qdisc_lookup_default(name
);
237 /* Not found, drop lock and try to load module */
238 write_unlock(&qdisc_mod_lock
);
239 request_module("sch_%s", name
);
240 write_lock(&qdisc_mod_lock
);
242 ops
= qdisc_lookup_default(name
);
246 /* Set new default */
247 module_put(default_qdisc_ops
->owner
);
248 default_qdisc_ops
= ops
;
250 write_unlock(&qdisc_mod_lock
);
252 return ops
? 0 : -ENOENT
;
255 /* We know handle. Find qdisc among all qdisc's attached to device
256 (root qdisc, all its children, children of children etc.)
259 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
263 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
264 root
->handle
== handle
)
267 list_for_each_entry(q
, &root
->list
, list
) {
268 if (q
->handle
== handle
)
274 void qdisc_list_add(struct Qdisc
*q
)
276 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
277 struct Qdisc
*root
= qdisc_dev(q
)->qdisc
;
279 WARN_ON_ONCE(root
== &noop_qdisc
);
280 list_add_tail(&q
->list
, &root
->list
);
283 EXPORT_SYMBOL(qdisc_list_add
);
285 void qdisc_list_del(struct Qdisc
*q
)
287 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
))
290 EXPORT_SYMBOL(qdisc_list_del
);
292 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
296 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
300 if (dev_ingress_queue(dev
))
301 q
= qdisc_match_from_root(
302 dev_ingress_queue(dev
)->qdisc_sleeping
,
308 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
312 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
316 cl
= cops
->get(p
, classid
);
320 leaf
= cops
->leaf(p
, cl
);
325 /* Find queueing discipline by name */
327 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
329 struct Qdisc_ops
*q
= NULL
;
332 read_lock(&qdisc_mod_lock
);
333 for (q
= qdisc_base
; q
; q
= q
->next
) {
334 if (nla_strcmp(kind
, q
->id
) == 0) {
335 if (!try_module_get(q
->owner
))
340 read_unlock(&qdisc_mod_lock
);
345 /* The linklayer setting were not transferred from iproute2, in older
346 * versions, and the rate tables lookup systems have been dropped in
347 * the kernel. To keep backward compatible with older iproute2 tc
348 * utils, we detect the linklayer setting by detecting if the rate
349 * table were modified.
351 * For linklayer ATM table entries, the rate table will be aligned to
352 * 48 bytes, thus some table entries will contain the same value. The
353 * mpu (min packet unit) is also encoded into the old rate table, thus
354 * starting from the mpu, we find low and high table entries for
355 * mapping this cell. If these entries contain the same value, when
356 * the rate tables have been modified for linklayer ATM.
358 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
359 * and then roundup to the next cell, calc the table entry one below,
362 static __u8
__detect_linklayer(struct tc_ratespec
*r
, __u32
*rtab
)
364 int low
= roundup(r
->mpu
, 48);
365 int high
= roundup(low
+1, 48);
366 int cell_low
= low
>> r
->cell_log
;
367 int cell_high
= (high
>> r
->cell_log
) - 1;
369 /* rtab is too inaccurate at rates > 100Mbit/s */
370 if ((r
->rate
> (100000000/8)) || (rtab
[0] == 0)) {
371 pr_debug("TC linklayer: Giving up ATM detection\n");
372 return TC_LINKLAYER_ETHERNET
;
375 if ((cell_high
> cell_low
) && (cell_high
< 256)
376 && (rtab
[cell_low
] == rtab
[cell_high
])) {
377 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
378 cell_low
, cell_high
, rtab
[cell_high
]);
379 return TC_LINKLAYER_ATM
;
381 return TC_LINKLAYER_ETHERNET
;
384 static struct qdisc_rate_table
*qdisc_rtab_list
;
386 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
, struct nlattr
*tab
)
388 struct qdisc_rate_table
*rtab
;
390 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
391 nla_len(tab
) != TC_RTAB_SIZE
)
394 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
395 if (!memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) &&
396 !memcmp(&rtab
->data
, nla_data(tab
), 1024)) {
402 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
406 memcpy(rtab
->data
, nla_data(tab
), 1024);
407 if (r
->linklayer
== TC_LINKLAYER_UNAWARE
)
408 r
->linklayer
= __detect_linklayer(r
, rtab
->data
);
409 rtab
->next
= qdisc_rtab_list
;
410 qdisc_rtab_list
= rtab
;
414 EXPORT_SYMBOL(qdisc_get_rtab
);
416 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
418 struct qdisc_rate_table
*rtab
, **rtabp
;
420 if (!tab
|| --tab
->refcnt
)
423 for (rtabp
= &qdisc_rtab_list
;
424 (rtab
= *rtabp
) != NULL
;
425 rtabp
= &rtab
->next
) {
433 EXPORT_SYMBOL(qdisc_put_rtab
);
435 static LIST_HEAD(qdisc_stab_list
);
436 static DEFINE_SPINLOCK(qdisc_stab_lock
);
438 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
439 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
440 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
443 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
445 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
446 struct qdisc_size_table
*stab
;
447 struct tc_sizespec
*s
;
448 unsigned int tsize
= 0;
452 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
);
455 if (!tb
[TCA_STAB_BASE
])
456 return ERR_PTR(-EINVAL
);
458 s
= nla_data(tb
[TCA_STAB_BASE
]);
461 if (!tb
[TCA_STAB_DATA
])
462 return ERR_PTR(-EINVAL
);
463 tab
= nla_data(tb
[TCA_STAB_DATA
]);
464 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
467 if (tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
468 return ERR_PTR(-EINVAL
);
470 spin_lock(&qdisc_stab_lock
);
472 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
473 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
475 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
478 spin_unlock(&qdisc_stab_lock
);
482 spin_unlock(&qdisc_stab_lock
);
484 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
486 return ERR_PTR(-ENOMEM
);
491 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
493 spin_lock(&qdisc_stab_lock
);
494 list_add_tail(&stab
->list
, &qdisc_stab_list
);
495 spin_unlock(&qdisc_stab_lock
);
500 static void stab_kfree_rcu(struct rcu_head
*head
)
502 kfree(container_of(head
, struct qdisc_size_table
, rcu
));
505 void qdisc_put_stab(struct qdisc_size_table
*tab
)
510 spin_lock(&qdisc_stab_lock
);
512 if (--tab
->refcnt
== 0) {
513 list_del(&tab
->list
);
514 call_rcu_bh(&tab
->rcu
, stab_kfree_rcu
);
517 spin_unlock(&qdisc_stab_lock
);
519 EXPORT_SYMBOL(qdisc_put_stab
);
521 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
525 nest
= nla_nest_start(skb
, TCA_STAB
);
527 goto nla_put_failure
;
528 if (nla_put(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
))
529 goto nla_put_failure
;
530 nla_nest_end(skb
, nest
);
538 void __qdisc_calculate_pkt_len(struct sk_buff
*skb
, const struct qdisc_size_table
*stab
)
542 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
543 if (unlikely(!stab
->szopts
.tsize
))
546 slot
= pkt_len
+ stab
->szopts
.cell_align
;
547 if (unlikely(slot
< 0))
550 slot
>>= stab
->szopts
.cell_log
;
551 if (likely(slot
< stab
->szopts
.tsize
))
552 pkt_len
= stab
->data
[slot
];
554 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
555 (slot
/ stab
->szopts
.tsize
) +
556 stab
->data
[slot
% stab
->szopts
.tsize
];
558 pkt_len
<<= stab
->szopts
.size_log
;
560 if (unlikely(pkt_len
< 1))
562 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
564 EXPORT_SYMBOL(__qdisc_calculate_pkt_len
);
566 void qdisc_warn_nonwc(const char *txt
, struct Qdisc
*qdisc
)
568 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
569 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
570 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
571 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
574 EXPORT_SYMBOL(qdisc_warn_nonwc
);
576 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
578 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
582 qdisc_unthrottled(wd
->qdisc
);
583 __netif_schedule(qdisc_root(wd
->qdisc
));
586 return HRTIMER_NORESTART
;
589 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
591 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED
);
592 wd
->timer
.function
= qdisc_watchdog
;
595 EXPORT_SYMBOL(qdisc_watchdog_init
);
597 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog
*wd
, u64 expires
, bool throttle
)
599 if (test_bit(__QDISC_STATE_DEACTIVATED
,
600 &qdisc_root_sleeping(wd
->qdisc
)->state
))
604 qdisc_throttled(wd
->qdisc
);
606 hrtimer_start(&wd
->timer
,
607 ns_to_ktime(expires
),
608 HRTIMER_MODE_ABS_PINNED
);
610 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns
);
612 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
614 hrtimer_cancel(&wd
->timer
);
615 qdisc_unthrottled(wd
->qdisc
);
617 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
619 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
621 unsigned int size
= n
* sizeof(struct hlist_head
), i
;
622 struct hlist_head
*h
;
624 if (size
<= PAGE_SIZE
)
625 h
= kmalloc(size
, GFP_KERNEL
);
627 h
= (struct hlist_head
*)
628 __get_free_pages(GFP_KERNEL
, get_order(size
));
631 for (i
= 0; i
< n
; i
++)
632 INIT_HLIST_HEAD(&h
[i
]);
637 static void qdisc_class_hash_free(struct hlist_head
*h
, unsigned int n
)
639 unsigned int size
= n
* sizeof(struct hlist_head
);
641 if (size
<= PAGE_SIZE
)
644 free_pages((unsigned long)h
, get_order(size
));
647 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
649 struct Qdisc_class_common
*cl
;
650 struct hlist_node
*next
;
651 struct hlist_head
*nhash
, *ohash
;
652 unsigned int nsize
, nmask
, osize
;
655 /* Rehash when load factor exceeds 0.75 */
656 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
658 nsize
= clhash
->hashsize
* 2;
660 nhash
= qdisc_class_hash_alloc(nsize
);
664 ohash
= clhash
->hash
;
665 osize
= clhash
->hashsize
;
668 for (i
= 0; i
< osize
; i
++) {
669 hlist_for_each_entry_safe(cl
, next
, &ohash
[i
], hnode
) {
670 h
= qdisc_class_hash(cl
->classid
, nmask
);
671 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
674 clhash
->hash
= nhash
;
675 clhash
->hashsize
= nsize
;
676 clhash
->hashmask
= nmask
;
677 sch_tree_unlock(sch
);
679 qdisc_class_hash_free(ohash
, osize
);
681 EXPORT_SYMBOL(qdisc_class_hash_grow
);
683 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
685 unsigned int size
= 4;
687 clhash
->hash
= qdisc_class_hash_alloc(size
);
688 if (clhash
->hash
== NULL
)
690 clhash
->hashsize
= size
;
691 clhash
->hashmask
= size
- 1;
692 clhash
->hashelems
= 0;
695 EXPORT_SYMBOL(qdisc_class_hash_init
);
697 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
699 qdisc_class_hash_free(clhash
->hash
, clhash
->hashsize
);
701 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
703 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
704 struct Qdisc_class_common
*cl
)
708 INIT_HLIST_NODE(&cl
->hnode
);
709 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
710 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
713 EXPORT_SYMBOL(qdisc_class_hash_insert
);
715 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
716 struct Qdisc_class_common
*cl
)
718 hlist_del(&cl
->hnode
);
721 EXPORT_SYMBOL(qdisc_class_hash_remove
);
723 /* Allocate an unique handle from space managed by kernel
724 * Possible range is [8000-FFFF]:0000 (0x8000 values)
726 static u32
qdisc_alloc_handle(struct net_device
*dev
)
729 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
732 autohandle
+= TC_H_MAKE(0x10000U
, 0);
733 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
734 autohandle
= TC_H_MAKE(0x80000000U
, 0);
735 if (!qdisc_lookup(dev
, autohandle
))
743 void qdisc_tree_decrease_qlen(struct Qdisc
*sch
, unsigned int n
)
745 const struct Qdisc_class_ops
*cops
;
752 drops
= max_t(int, n
, 0);
753 while ((parentid
= sch
->parent
)) {
754 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
757 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
759 WARN_ON(parentid
!= TC_H_ROOT
);
762 cops
= sch
->ops
->cl_ops
;
763 if (cops
->qlen_notify
) {
764 cl
= cops
->get(sch
, parentid
);
765 cops
->qlen_notify(sch
, cl
);
769 __qdisc_qstats_drop(sch
, drops
);
772 EXPORT_SYMBOL(qdisc_tree_decrease_qlen
);
774 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
775 struct nlmsghdr
*n
, u32 clid
,
776 struct Qdisc
*old
, struct Qdisc
*new)
779 qdisc_notify(net
, skb
, n
, clid
, old
, new);
785 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
788 * When appropriate send a netlink notification using 'skb'
791 * On success, destroy old qdisc.
794 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
795 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
796 struct Qdisc
*new, struct Qdisc
*old
)
798 struct Qdisc
*q
= old
;
799 struct net
*net
= dev_net(dev
);
802 if (parent
== NULL
) {
803 unsigned int i
, num_q
, ingress
;
806 num_q
= dev
->num_tx_queues
;
807 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
808 (new && new->flags
& TCQ_F_INGRESS
)) {
811 if (!dev_ingress_queue(dev
))
815 if (dev
->flags
& IFF_UP
)
818 if (new && new->ops
->attach
)
821 for (i
= 0; i
< num_q
; i
++) {
822 struct netdev_queue
*dev_queue
= dev_ingress_queue(dev
);
825 dev_queue
= netdev_get_tx_queue(dev
, i
);
827 old
= dev_graft_qdisc(dev_queue
, new);
829 atomic_inc(&new->refcnt
);
837 notify_and_destroy(net
, skb
, n
, classid
,
839 if (new && !new->ops
->attach
)
840 atomic_inc(&new->refcnt
);
841 dev
->qdisc
= new ? : &noop_qdisc
;
843 if (new && new->ops
->attach
)
844 new->ops
->attach(new);
846 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
849 if (dev
->flags
& IFF_UP
)
852 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
855 if (cops
&& cops
->graft
) {
856 unsigned long cl
= cops
->get(parent
, classid
);
858 err
= cops
->graft(parent
, cl
, new, &old
);
859 cops
->put(parent
, cl
);
864 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
869 /* lockdep annotation is needed for ingress; egress gets it only for name */
870 static struct lock_class_key qdisc_tx_lock
;
871 static struct lock_class_key qdisc_rx_lock
;
874 Allocate and initialize new qdisc.
876 Parameters are passed via opt.
879 static struct Qdisc
*
880 qdisc_create(struct net_device
*dev
, struct netdev_queue
*dev_queue
,
881 struct Qdisc
*p
, u32 parent
, u32 handle
,
882 struct nlattr
**tca
, int *errp
)
885 struct nlattr
*kind
= tca
[TCA_KIND
];
887 struct Qdisc_ops
*ops
;
888 struct qdisc_size_table
*stab
;
890 ops
= qdisc_lookup_ops(kind
);
891 #ifdef CONFIG_MODULES
892 if (ops
== NULL
&& kind
!= NULL
) {
894 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
895 /* We dropped the RTNL semaphore in order to
896 * perform the module load. So, even if we
897 * succeeded in loading the module we have to
898 * tell the caller to replay the request. We
899 * indicate this using -EAGAIN.
900 * We replay the request because the device may
901 * go away in the mean time.
904 request_module("sch_%s", name
);
906 ops
= qdisc_lookup_ops(kind
);
908 /* We will try again qdisc_lookup_ops,
909 * so don't keep a reference.
911 module_put(ops
->owner
);
923 sch
= qdisc_alloc(dev_queue
, ops
);
929 sch
->parent
= parent
;
931 if (handle
== TC_H_INGRESS
) {
932 sch
->flags
|= TCQ_F_INGRESS
;
933 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
934 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
937 handle
= qdisc_alloc_handle(dev
);
942 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
943 if (!netif_is_multiqueue(dev
))
944 sch
->flags
|= TCQ_F_ONETXQUEUE
;
947 sch
->handle
= handle
;
949 if (!ops
->init
|| (err
= ops
->init(sch
, tca
[TCA_OPTIONS
])) == 0) {
950 if (qdisc_is_percpu_stats(sch
)) {
952 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
953 if (!sch
->cpu_bstats
)
956 sch
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
957 if (!sch
->cpu_qstats
)
962 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
967 rcu_assign_pointer(sch
->stab
, stab
);
970 spinlock_t
*root_lock
;
973 if (sch
->flags
& TCQ_F_MQROOT
)
976 if ((sch
->parent
!= TC_H_ROOT
) &&
977 !(sch
->flags
& TCQ_F_INGRESS
) &&
978 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
979 root_lock
= qdisc_root_sleeping_lock(sch
);
981 root_lock
= qdisc_lock(sch
);
983 err
= gen_new_estimator(&sch
->bstats
,
998 kfree((char *) sch
- sch
->padded
);
1000 module_put(ops
->owner
);
1006 free_percpu(sch
->cpu_bstats
);
1007 free_percpu(sch
->cpu_qstats
);
1009 * Any broken qdiscs that would require a ops->reset() here?
1010 * The qdisc was never in action so it shouldn't be necessary.
1012 qdisc_put_stab(rtnl_dereference(sch
->stab
));
1018 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
1020 struct qdisc_size_table
*ostab
, *stab
= NULL
;
1023 if (tca
[TCA_OPTIONS
]) {
1024 if (sch
->ops
->change
== NULL
)
1026 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
1031 if (tca
[TCA_STAB
]) {
1032 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1034 return PTR_ERR(stab
);
1037 ostab
= rtnl_dereference(sch
->stab
);
1038 rcu_assign_pointer(sch
->stab
, stab
);
1039 qdisc_put_stab(ostab
);
1041 if (tca
[TCA_RATE
]) {
1042 /* NB: ignores errors from replace_estimator
1043 because change can't be undone. */
1044 if (sch
->flags
& TCQ_F_MQROOT
)
1046 gen_replace_estimator(&sch
->bstats
,
1049 qdisc_root_sleeping_lock(sch
),
1056 struct check_loop_arg
{
1057 struct qdisc_walker w
;
1062 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
);
1064 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
1066 struct check_loop_arg arg
;
1068 if (q
->ops
->cl_ops
== NULL
)
1071 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
1072 arg
.w
.fn
= check_loop_fn
;
1075 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1076 return arg
.w
.stop
? -ELOOP
: 0;
1080 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
1083 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1084 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
1086 leaf
= cops
->leaf(q
, cl
);
1088 if (leaf
== arg
->p
|| arg
->depth
> 7)
1090 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
1099 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1101 struct net
*net
= sock_net(skb
->sk
);
1102 struct tcmsg
*tcm
= nlmsg_data(n
);
1103 struct nlattr
*tca
[TCA_MAX
+ 1];
1104 struct net_device
*dev
;
1106 struct Qdisc
*q
= NULL
;
1107 struct Qdisc
*p
= NULL
;
1110 if ((n
->nlmsg_type
!= RTM_GETQDISC
) &&
1111 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1114 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1118 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1122 clid
= tcm
->tcm_parent
;
1124 if (clid
!= TC_H_ROOT
) {
1125 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
1126 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1129 q
= qdisc_leaf(p
, clid
);
1130 } else if (dev_ingress_queue(dev
)) {
1131 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1139 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
1142 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1147 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1150 if (n
->nlmsg_type
== RTM_DELQDISC
) {
1155 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
);
1159 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1165 * Create/change qdisc.
1168 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1170 struct net
*net
= sock_net(skb
->sk
);
1172 struct nlattr
*tca
[TCA_MAX
+ 1];
1173 struct net_device
*dev
;
1175 struct Qdisc
*q
, *p
;
1178 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1182 /* Reinit, just in case something touches this. */
1183 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1187 tcm
= nlmsg_data(n
);
1188 clid
= tcm
->tcm_parent
;
1191 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1197 if (clid
!= TC_H_ROOT
) {
1198 if (clid
!= TC_H_INGRESS
) {
1199 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1202 q
= qdisc_leaf(p
, clid
);
1203 } else if (dev_ingress_queue_create(dev
)) {
1204 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1210 /* It may be default qdisc, ignore it */
1211 if (q
&& q
->handle
== 0)
1214 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1215 if (tcm
->tcm_handle
) {
1216 if (q
&& !(n
->nlmsg_flags
& NLM_F_REPLACE
))
1218 if (TC_H_MIN(tcm
->tcm_handle
))
1220 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1222 goto create_n_graft
;
1223 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1225 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1228 (p
&& check_loop(q
, p
, 0)))
1230 atomic_inc(&q
->refcnt
);
1234 goto create_n_graft
;
1236 /* This magic test requires explanation.
1238 * We know, that some child q is already
1239 * attached to this parent and have choice:
1240 * either to change it or to create/graft new one.
1242 * 1. We are allowed to create/graft only
1243 * if CREATE and REPLACE flags are set.
1245 * 2. If EXCL is set, requestor wanted to say,
1246 * that qdisc tcm_handle is not expected
1247 * to exist, so that we choose create/graft too.
1249 * 3. The last case is when no flags are set.
1250 * Alas, it is sort of hole in API, we
1251 * cannot decide what to do unambiguously.
1252 * For now we select create/graft, if
1253 * user gave KIND, which does not match existing.
1255 if ((n
->nlmsg_flags
& NLM_F_CREATE
) &&
1256 (n
->nlmsg_flags
& NLM_F_REPLACE
) &&
1257 ((n
->nlmsg_flags
& NLM_F_EXCL
) ||
1259 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1260 goto create_n_graft
;
1264 if (!tcm
->tcm_handle
)
1266 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1269 /* Change qdisc parameters */
1272 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1274 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1276 err
= qdisc_change(q
, tca
);
1278 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1282 if (!(n
->nlmsg_flags
& NLM_F_CREATE
))
1284 if (clid
== TC_H_INGRESS
) {
1285 if (dev_ingress_queue(dev
))
1286 q
= qdisc_create(dev
, dev_ingress_queue(dev
), p
,
1287 tcm
->tcm_parent
, tcm
->tcm_parent
,
1292 struct netdev_queue
*dev_queue
;
1294 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1295 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1297 dev_queue
= p
->dev_queue
;
1299 dev_queue
= netdev_get_tx_queue(dev
, 0);
1301 q
= qdisc_create(dev
, dev_queue
, p
,
1302 tcm
->tcm_parent
, tcm
->tcm_handle
,
1312 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1322 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
1323 u32 portid
, u32 seq
, u16 flags
, int event
)
1325 struct gnet_stats_basic_cpu __percpu
*cpu_bstats
= NULL
;
1326 struct gnet_stats_queue __percpu
*cpu_qstats
= NULL
;
1328 struct nlmsghdr
*nlh
;
1329 unsigned char *b
= skb_tail_pointer(skb
);
1331 struct qdisc_size_table
*stab
;
1335 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1337 goto out_nlmsg_trim
;
1338 tcm
= nlmsg_data(nlh
);
1339 tcm
->tcm_family
= AF_UNSPEC
;
1342 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1343 tcm
->tcm_parent
= clid
;
1344 tcm
->tcm_handle
= q
->handle
;
1345 tcm
->tcm_info
= atomic_read(&q
->refcnt
);
1346 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1347 goto nla_put_failure
;
1348 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
1349 goto nla_put_failure
;
1352 stab
= rtnl_dereference(q
->stab
);
1353 if (stab
&& qdisc_dump_stab(skb
, stab
) < 0)
1354 goto nla_put_failure
;
1356 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1357 qdisc_root_sleeping_lock(q
), &d
) < 0)
1358 goto nla_put_failure
;
1360 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
1361 goto nla_put_failure
;
1363 if (qdisc_is_percpu_stats(q
)) {
1364 cpu_bstats
= q
->cpu_bstats
;
1365 cpu_qstats
= q
->cpu_qstats
;
1368 if (gnet_stats_copy_basic(&d
, cpu_bstats
, &q
->bstats
) < 0 ||
1369 gnet_stats_copy_rate_est(&d
, &q
->bstats
, &q
->rate_est
) < 0 ||
1370 gnet_stats_copy_queue(&d
, cpu_qstats
, &q
->qstats
, qlen
) < 0)
1371 goto nla_put_failure
;
1373 if (gnet_stats_finish_copy(&d
) < 0)
1374 goto nla_put_failure
;
1376 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1385 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
)
1387 return (q
->flags
& TCQ_F_BUILTIN
) ? true : false;
1390 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
1391 struct nlmsghdr
*n
, u32 clid
,
1392 struct Qdisc
*old
, struct Qdisc
*new)
1394 struct sk_buff
*skb
;
1395 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1397 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1401 if (old
&& !tc_qdisc_dump_ignore(old
)) {
1402 if (tc_fill_qdisc(skb
, old
, clid
, portid
, n
->nlmsg_seq
,
1403 0, RTM_DELQDISC
) < 0)
1406 if (new && !tc_qdisc_dump_ignore(new)) {
1407 if (tc_fill_qdisc(skb
, new, clid
, portid
, n
->nlmsg_seq
,
1408 old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
1413 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1414 n
->nlmsg_flags
& NLM_F_ECHO
);
1421 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1422 struct netlink_callback
*cb
,
1423 int *q_idx_p
, int s_q_idx
)
1425 int ret
= 0, q_idx
= *q_idx_p
;
1432 if (q_idx
< s_q_idx
) {
1435 if (!tc_qdisc_dump_ignore(q
) &&
1436 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1437 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1441 list_for_each_entry(q
, &root
->list
, list
) {
1442 if (q_idx
< s_q_idx
) {
1446 if (!tc_qdisc_dump_ignore(q
) &&
1447 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1448 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWQDISC
) <= 0)
1461 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1463 struct net
*net
= sock_net(skb
->sk
);
1466 struct net_device
*dev
;
1468 s_idx
= cb
->args
[0];
1469 s_q_idx
= q_idx
= cb
->args
[1];
1473 for_each_netdev(net
, dev
) {
1474 struct netdev_queue
*dev_queue
;
1482 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
) < 0)
1485 dev_queue
= dev_ingress_queue(dev
);
1487 tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
,
1488 &q_idx
, s_q_idx
) < 0)
1497 cb
->args
[1] = q_idx
;
1504 /************************************************
1505 * Traffic classes manipulation. *
1506 ************************************************/
1510 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1512 struct net
*net
= sock_net(skb
->sk
);
1513 struct tcmsg
*tcm
= nlmsg_data(n
);
1514 struct nlattr
*tca
[TCA_MAX
+ 1];
1515 struct net_device
*dev
;
1516 struct Qdisc
*q
= NULL
;
1517 const struct Qdisc_class_ops
*cops
;
1518 unsigned long cl
= 0;
1519 unsigned long new_cl
;
1525 if ((n
->nlmsg_type
!= RTM_GETTCLASS
) &&
1526 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1529 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1533 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1538 parent == TC_H_UNSPEC - unspecified parent.
1539 parent == TC_H_ROOT - class is root, which has no parent.
1540 parent == X:0 - parent is root class.
1541 parent == X:Y - parent is a node in hierarchy.
1542 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1544 handle == 0:0 - generate handle from kernel pool.
1545 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1546 handle == X:Y - clear.
1547 handle == X:0 - root class.
1550 /* Step 1. Determine qdisc handle X:0 */
1552 portid
= tcm
->tcm_parent
;
1553 clid
= tcm
->tcm_handle
;
1554 qid
= TC_H_MAJ(clid
);
1556 if (portid
!= TC_H_ROOT
) {
1557 u32 qid1
= TC_H_MAJ(portid
);
1560 /* If both majors are known, they must be identical. */
1565 } else if (qid
== 0)
1566 qid
= dev
->qdisc
->handle
;
1568 /* Now qid is genuine qdisc handle consistent
1569 * both with parent and child.
1571 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1574 portid
= TC_H_MAKE(qid
, portid
);
1577 qid
= dev
->qdisc
->handle
;
1580 /* OK. Locate qdisc */
1581 q
= qdisc_lookup(dev
, qid
);
1585 /* An check that it supports classes */
1586 cops
= q
->ops
->cl_ops
;
1590 /* Now try to get class */
1592 if (portid
== TC_H_ROOT
)
1595 clid
= TC_H_MAKE(qid
, clid
);
1598 cl
= cops
->get(q
, clid
);
1602 if (n
->nlmsg_type
!= RTM_NEWTCLASS
||
1603 !(n
->nlmsg_flags
& NLM_F_CREATE
))
1606 switch (n
->nlmsg_type
) {
1609 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1615 err
= cops
->delete(q
, cl
);
1617 tclass_notify(net
, skb
, n
, q
, cl
, RTM_DELTCLASS
);
1620 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1631 err
= cops
->change(q
, clid
, portid
, tca
, &new_cl
);
1633 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1643 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1645 u32 portid
, u32 seq
, u16 flags
, int event
)
1648 struct nlmsghdr
*nlh
;
1649 unsigned char *b
= skb_tail_pointer(skb
);
1651 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1654 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1656 goto out_nlmsg_trim
;
1657 tcm
= nlmsg_data(nlh
);
1658 tcm
->tcm_family
= AF_UNSPEC
;
1661 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1662 tcm
->tcm_parent
= q
->handle
;
1663 tcm
->tcm_handle
= q
->handle
;
1665 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1666 goto nla_put_failure
;
1667 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1668 goto nla_put_failure
;
1670 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1671 qdisc_root_sleeping_lock(q
), &d
) < 0)
1672 goto nla_put_failure
;
1674 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1675 goto nla_put_failure
;
1677 if (gnet_stats_finish_copy(&d
) < 0)
1678 goto nla_put_failure
;
1680 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1689 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1690 struct nlmsghdr
*n
, struct Qdisc
*q
,
1691 unsigned long cl
, int event
)
1693 struct sk_buff
*skb
;
1694 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1696 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1700 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0, event
) < 0) {
1705 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1706 n
->nlmsg_flags
& NLM_F_ECHO
);
1709 struct qdisc_dump_args
{
1710 struct qdisc_walker w
;
1711 struct sk_buff
*skb
;
1712 struct netlink_callback
*cb
;
1715 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*arg
)
1717 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1719 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).portid
,
1720 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
, RTM_NEWTCLASS
);
1723 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1724 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1727 struct qdisc_dump_args arg
;
1729 if (tc_qdisc_dump_ignore(q
) ||
1730 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1732 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1737 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1738 arg
.w
.fn
= qdisc_class_dump
;
1742 arg
.w
.skip
= cb
->args
[1];
1744 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1745 cb
->args
[1] = arg
.w
.count
;
1752 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1753 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1761 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1764 list_for_each_entry(q
, &root
->list
, list
) {
1765 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1772 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1774 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
1775 struct net
*net
= sock_net(skb
->sk
);
1776 struct netdev_queue
*dev_queue
;
1777 struct net_device
*dev
;
1780 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
1782 dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
);
1789 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1792 dev_queue
= dev_ingress_queue(dev
);
1794 tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
,
1805 /* Main classifier routine: scans classifier chain attached
1806 * to this qdisc, (optionally) tests for protocol and asks
1807 * specific classifiers.
1809 int tc_classify_compat(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
1810 struct tcf_result
*res
)
1812 __be16 protocol
= tc_skb_protocol(skb
);
1815 for (; tp
; tp
= rcu_dereference_bh(tp
->next
)) {
1816 if (tp
->protocol
!= protocol
&&
1817 tp
->protocol
!= htons(ETH_P_ALL
))
1819 err
= tp
->classify(skb
, tp
, res
);
1826 EXPORT_SYMBOL(tc_classify_compat
);
1828 int tc_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
1829 struct tcf_result
*res
)
1832 #ifdef CONFIG_NET_CLS_ACT
1833 const struct tcf_proto
*otp
= tp
;
1838 err
= tc_classify_compat(skb
, tp
, res
);
1839 #ifdef CONFIG_NET_CLS_ACT
1840 if (err
== TC_ACT_RECLASSIFY
) {
1843 if (unlikely(limit
++ >= MAX_REC_LOOP
)) {
1844 net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
1847 ntohs(tp
->protocol
));
1855 EXPORT_SYMBOL(tc_classify
);
1857 bool tcf_destroy(struct tcf_proto
*tp
, bool force
)
1859 if (tp
->ops
->destroy(tp
, force
)) {
1860 module_put(tp
->ops
->owner
);
1868 void tcf_destroy_chain(struct tcf_proto __rcu
**fl
)
1870 struct tcf_proto
*tp
;
1872 while ((tp
= rtnl_dereference(*fl
)) != NULL
) {
1873 RCU_INIT_POINTER(*fl
, tp
->next
);
1874 tcf_destroy(tp
, true);
1877 EXPORT_SYMBOL(tcf_destroy_chain
);
1879 #ifdef CONFIG_PROC_FS
1880 static int psched_show(struct seq_file
*seq
, void *v
)
1882 seq_printf(seq
, "%08x %08x %08x %08x\n",
1883 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1885 (u32
)NSEC_PER_SEC
/ hrtimer_resolution
);
1890 static int psched_open(struct inode
*inode
, struct file
*file
)
1892 return single_open(file
, psched_show
, NULL
);
1895 static const struct file_operations psched_fops
= {
1896 .owner
= THIS_MODULE
,
1897 .open
= psched_open
,
1899 .llseek
= seq_lseek
,
1900 .release
= single_release
,
1903 static int __net_init
psched_net_init(struct net
*net
)
1905 struct proc_dir_entry
*e
;
1907 e
= proc_create("psched", 0, net
->proc_net
, &psched_fops
);
1914 static void __net_exit
psched_net_exit(struct net
*net
)
1916 remove_proc_entry("psched", net
->proc_net
);
1919 static int __net_init
psched_net_init(struct net
*net
)
1924 static void __net_exit
psched_net_exit(struct net
*net
)
1929 static struct pernet_operations psched_net_ops
= {
1930 .init
= psched_net_init
,
1931 .exit
= psched_net_exit
,
1934 static int __init
pktsched_init(void)
1938 err
= register_pernet_subsys(&psched_net_ops
);
1940 pr_err("pktsched_init: "
1941 "cannot initialize per netns operations\n");
1945 register_qdisc(&pfifo_fast_ops
);
1946 register_qdisc(&pfifo_qdisc_ops
);
1947 register_qdisc(&bfifo_qdisc_ops
);
1948 register_qdisc(&pfifo_head_drop_qdisc_ops
);
1949 register_qdisc(&mq_qdisc_ops
);
1951 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
, NULL
);
1952 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
, NULL
);
1953 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
, NULL
);
1954 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1955 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1956 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
, NULL
);
1961 subsys_initcall(pktsched_init
);