2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
34 #include <net/net_namespace.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
39 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
40 struct nlmsghdr
*n
, u32 clid
,
41 struct Qdisc
*old
, struct Qdisc
*new);
42 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
43 struct nlmsghdr
*n
, struct Qdisc
*q
,
44 unsigned long cl
, int event
);
51 This file consists of two interrelated parts:
53 1. queueing disciplines manager frontend.
54 2. traffic classes manager frontend.
56 Generally, queueing discipline ("qdisc") is a black box,
57 which is able to enqueue packets and to dequeue them (when
58 device is ready to send something) in order and at times
59 determined by algorithm hidden in it.
61 qdisc's are divided to two categories:
62 - "queues", which have no internal structure visible from outside.
63 - "schedulers", which split all the packets to "traffic classes",
64 using "packet classifiers" (look at cls_api.c)
66 In turn, classes may have child qdiscs (as rule, queues)
67 attached to them etc. etc. etc.
69 The goal of the routines in this file is to translate
70 information supplied by user in the form of handles
71 to more intelligible for kernel form, to make some sanity
72 checks and part of work, which is common to all qdiscs
73 and to provide rtnetlink notifications.
75 All real intelligent work is done inside qdisc modules.
79 Every discipline has two major routines: enqueue and dequeue.
83 dequeue usually returns a skb to send. It is allowed to return NULL,
84 but it does not mean that queue is empty, it just means that
85 discipline does not want to send anything this time.
86 Queue is really empty if q->q.qlen == 0.
87 For complicated disciplines with multiple queues q->q is not
88 real packet queue, but however q->q.qlen must be valid.
92 enqueue returns 0, if packet was enqueued successfully.
93 If packet (this one or another one) was dropped, it returns
95 NET_XMIT_DROP - this packet dropped
96 Expected action: do not backoff, but wait until queue will clear.
97 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
98 Expected action: backoff or ignore
104 like dequeue but without removing a packet from the queue
108 returns qdisc to initial state: purge all buffers, clear all
109 timers, counters (except for statistics) etc.
113 initializes newly created qdisc.
117 destroys resources allocated by init and during lifetime of qdisc.
121 changes qdisc parameters.
124 /* Protects list of registered TC modules. It is pure SMP lock. */
125 static DEFINE_RWLOCK(qdisc_mod_lock
);
128 /************************************************
129 * Queueing disciplines manipulation. *
130 ************************************************/
133 /* The list of all installed queueing disciplines. */
135 static struct Qdisc_ops
*qdisc_base
;
137 /* Register/unregister queueing discipline */
139 int register_qdisc(struct Qdisc_ops
*qops
)
141 struct Qdisc_ops
*q
, **qp
;
144 write_lock(&qdisc_mod_lock
);
145 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
146 if (!strcmp(qops
->id
, q
->id
))
149 if (qops
->enqueue
== NULL
)
150 qops
->enqueue
= noop_qdisc_ops
.enqueue
;
151 if (qops
->peek
== NULL
) {
152 if (qops
->dequeue
== NULL
)
153 qops
->peek
= noop_qdisc_ops
.peek
;
157 if (qops
->dequeue
== NULL
)
158 qops
->dequeue
= noop_qdisc_ops
.dequeue
;
161 const struct Qdisc_class_ops
*cops
= qops
->cl_ops
;
163 if (!(cops
->get
&& cops
->put
&& cops
->walk
&& cops
->leaf
))
166 if (cops
->tcf_chain
&& !(cops
->bind_tcf
&& cops
->unbind_tcf
))
174 write_unlock(&qdisc_mod_lock
);
181 EXPORT_SYMBOL(register_qdisc
);
183 int unregister_qdisc(struct Qdisc_ops
*qops
)
185 struct Qdisc_ops
*q
, **qp
;
188 write_lock(&qdisc_mod_lock
);
189 for (qp
= &qdisc_base
; (q
= *qp
) != NULL
; qp
= &q
->next
)
197 write_unlock(&qdisc_mod_lock
);
200 EXPORT_SYMBOL(unregister_qdisc
);
202 /* Get default qdisc if not otherwise specified */
203 void qdisc_get_default(char *name
, size_t len
)
205 read_lock(&qdisc_mod_lock
);
206 strlcpy(name
, default_qdisc_ops
->id
, len
);
207 read_unlock(&qdisc_mod_lock
);
210 static struct Qdisc_ops
*qdisc_lookup_default(const char *name
)
212 struct Qdisc_ops
*q
= NULL
;
214 for (q
= qdisc_base
; q
; q
= q
->next
) {
215 if (!strcmp(name
, q
->id
)) {
216 if (!try_module_get(q
->owner
))
225 /* Set new default qdisc to use */
226 int qdisc_set_default(const char *name
)
228 const struct Qdisc_ops
*ops
;
230 if (!capable(CAP_NET_ADMIN
))
233 write_lock(&qdisc_mod_lock
);
234 ops
= qdisc_lookup_default(name
);
236 /* Not found, drop lock and try to load module */
237 write_unlock(&qdisc_mod_lock
);
238 request_module("sch_%s", name
);
239 write_lock(&qdisc_mod_lock
);
241 ops
= qdisc_lookup_default(name
);
245 /* Set new default */
246 module_put(default_qdisc_ops
->owner
);
247 default_qdisc_ops
= ops
;
249 write_unlock(&qdisc_mod_lock
);
251 return ops
? 0 : -ENOENT
;
254 /* We know handle. Find qdisc among all qdisc's attached to device
255 * (root qdisc, all its children, children of children etc.)
256 * Note: caller either uses rtnl or rcu_read_lock()
259 static struct Qdisc
*qdisc_match_from_root(struct Qdisc
*root
, u32 handle
)
263 if (!qdisc_dev(root
))
264 return (root
->handle
== handle
? root
: NULL
);
266 if (!(root
->flags
& TCQ_F_BUILTIN
) &&
267 root
->handle
== handle
)
270 hash_for_each_possible_rcu(qdisc_dev(root
)->qdisc_hash
, q
, hash
, handle
) {
271 if (q
->handle
== handle
)
277 void qdisc_hash_add(struct Qdisc
*q
)
279 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
281 hash_add_rcu(qdisc_dev(q
)->qdisc_hash
, &q
->hash
, q
->handle
);
284 EXPORT_SYMBOL(qdisc_hash_add
);
286 void qdisc_hash_del(struct Qdisc
*q
)
288 if ((q
->parent
!= TC_H_ROOT
) && !(q
->flags
& TCQ_F_INGRESS
)) {
290 hash_del_rcu(&q
->hash
);
293 EXPORT_SYMBOL(qdisc_hash_del
);
295 struct Qdisc
*qdisc_lookup(struct net_device
*dev
, u32 handle
)
301 q
= qdisc_match_from_root(dev
->qdisc
, handle
);
305 if (dev_ingress_queue(dev
))
306 q
= qdisc_match_from_root(
307 dev_ingress_queue(dev
)->qdisc_sleeping
,
313 static struct Qdisc
*qdisc_leaf(struct Qdisc
*p
, u32 classid
)
317 const struct Qdisc_class_ops
*cops
= p
->ops
->cl_ops
;
321 cl
= cops
->get(p
, classid
);
325 leaf
= cops
->leaf(p
, cl
);
330 /* Find queueing discipline by name */
332 static struct Qdisc_ops
*qdisc_lookup_ops(struct nlattr
*kind
)
334 struct Qdisc_ops
*q
= NULL
;
337 read_lock(&qdisc_mod_lock
);
338 for (q
= qdisc_base
; q
; q
= q
->next
) {
339 if (nla_strcmp(kind
, q
->id
) == 0) {
340 if (!try_module_get(q
->owner
))
345 read_unlock(&qdisc_mod_lock
);
350 /* The linklayer setting were not transferred from iproute2, in older
351 * versions, and the rate tables lookup systems have been dropped in
352 * the kernel. To keep backward compatible with older iproute2 tc
353 * utils, we detect the linklayer setting by detecting if the rate
354 * table were modified.
356 * For linklayer ATM table entries, the rate table will be aligned to
357 * 48 bytes, thus some table entries will contain the same value. The
358 * mpu (min packet unit) is also encoded into the old rate table, thus
359 * starting from the mpu, we find low and high table entries for
360 * mapping this cell. If these entries contain the same value, when
361 * the rate tables have been modified for linklayer ATM.
363 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
364 * and then roundup to the next cell, calc the table entry one below,
367 static __u8
__detect_linklayer(struct tc_ratespec
*r
, __u32
*rtab
)
369 int low
= roundup(r
->mpu
, 48);
370 int high
= roundup(low
+1, 48);
371 int cell_low
= low
>> r
->cell_log
;
372 int cell_high
= (high
>> r
->cell_log
) - 1;
374 /* rtab is too inaccurate at rates > 100Mbit/s */
375 if ((r
->rate
> (100000000/8)) || (rtab
[0] == 0)) {
376 pr_debug("TC linklayer: Giving up ATM detection\n");
377 return TC_LINKLAYER_ETHERNET
;
380 if ((cell_high
> cell_low
) && (cell_high
< 256)
381 && (rtab
[cell_low
] == rtab
[cell_high
])) {
382 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
383 cell_low
, cell_high
, rtab
[cell_high
]);
384 return TC_LINKLAYER_ATM
;
386 return TC_LINKLAYER_ETHERNET
;
389 static struct qdisc_rate_table
*qdisc_rtab_list
;
391 struct qdisc_rate_table
*qdisc_get_rtab(struct tc_ratespec
*r
,
394 struct qdisc_rate_table
*rtab
;
396 if (tab
== NULL
|| r
->rate
== 0 || r
->cell_log
== 0 ||
397 nla_len(tab
) != TC_RTAB_SIZE
)
400 for (rtab
= qdisc_rtab_list
; rtab
; rtab
= rtab
->next
) {
401 if (!memcmp(&rtab
->rate
, r
, sizeof(struct tc_ratespec
)) &&
402 !memcmp(&rtab
->data
, nla_data(tab
), 1024)) {
408 rtab
= kmalloc(sizeof(*rtab
), GFP_KERNEL
);
412 memcpy(rtab
->data
, nla_data(tab
), 1024);
413 if (r
->linklayer
== TC_LINKLAYER_UNAWARE
)
414 r
->linklayer
= __detect_linklayer(r
, rtab
->data
);
415 rtab
->next
= qdisc_rtab_list
;
416 qdisc_rtab_list
= rtab
;
420 EXPORT_SYMBOL(qdisc_get_rtab
);
422 void qdisc_put_rtab(struct qdisc_rate_table
*tab
)
424 struct qdisc_rate_table
*rtab
, **rtabp
;
426 if (!tab
|| --tab
->refcnt
)
429 for (rtabp
= &qdisc_rtab_list
;
430 (rtab
= *rtabp
) != NULL
;
431 rtabp
= &rtab
->next
) {
439 EXPORT_SYMBOL(qdisc_put_rtab
);
441 static LIST_HEAD(qdisc_stab_list
);
442 static DEFINE_SPINLOCK(qdisc_stab_lock
);
444 static const struct nla_policy stab_policy
[TCA_STAB_MAX
+ 1] = {
445 [TCA_STAB_BASE
] = { .len
= sizeof(struct tc_sizespec
) },
446 [TCA_STAB_DATA
] = { .type
= NLA_BINARY
},
449 static struct qdisc_size_table
*qdisc_get_stab(struct nlattr
*opt
)
451 struct nlattr
*tb
[TCA_STAB_MAX
+ 1];
452 struct qdisc_size_table
*stab
;
453 struct tc_sizespec
*s
;
454 unsigned int tsize
= 0;
458 err
= nla_parse_nested(tb
, TCA_STAB_MAX
, opt
, stab_policy
);
461 if (!tb
[TCA_STAB_BASE
])
462 return ERR_PTR(-EINVAL
);
464 s
= nla_data(tb
[TCA_STAB_BASE
]);
467 if (!tb
[TCA_STAB_DATA
])
468 return ERR_PTR(-EINVAL
);
469 tab
= nla_data(tb
[TCA_STAB_DATA
]);
470 tsize
= nla_len(tb
[TCA_STAB_DATA
]) / sizeof(u16
);
473 if (tsize
!= s
->tsize
|| (!tab
&& tsize
> 0))
474 return ERR_PTR(-EINVAL
);
476 spin_lock(&qdisc_stab_lock
);
478 list_for_each_entry(stab
, &qdisc_stab_list
, list
) {
479 if (memcmp(&stab
->szopts
, s
, sizeof(*s
)))
481 if (tsize
> 0 && memcmp(stab
->data
, tab
, tsize
* sizeof(u16
)))
484 spin_unlock(&qdisc_stab_lock
);
488 spin_unlock(&qdisc_stab_lock
);
490 stab
= kmalloc(sizeof(*stab
) + tsize
* sizeof(u16
), GFP_KERNEL
);
492 return ERR_PTR(-ENOMEM
);
497 memcpy(stab
->data
, tab
, tsize
* sizeof(u16
));
499 spin_lock(&qdisc_stab_lock
);
500 list_add_tail(&stab
->list
, &qdisc_stab_list
);
501 spin_unlock(&qdisc_stab_lock
);
506 static void stab_kfree_rcu(struct rcu_head
*head
)
508 kfree(container_of(head
, struct qdisc_size_table
, rcu
));
511 void qdisc_put_stab(struct qdisc_size_table
*tab
)
516 spin_lock(&qdisc_stab_lock
);
518 if (--tab
->refcnt
== 0) {
519 list_del(&tab
->list
);
520 call_rcu_bh(&tab
->rcu
, stab_kfree_rcu
);
523 spin_unlock(&qdisc_stab_lock
);
525 EXPORT_SYMBOL(qdisc_put_stab
);
527 static int qdisc_dump_stab(struct sk_buff
*skb
, struct qdisc_size_table
*stab
)
531 nest
= nla_nest_start(skb
, TCA_STAB
);
533 goto nla_put_failure
;
534 if (nla_put(skb
, TCA_STAB_BASE
, sizeof(stab
->szopts
), &stab
->szopts
))
535 goto nla_put_failure
;
536 nla_nest_end(skb
, nest
);
544 void __qdisc_calculate_pkt_len(struct sk_buff
*skb
,
545 const struct qdisc_size_table
*stab
)
549 pkt_len
= skb
->len
+ stab
->szopts
.overhead
;
550 if (unlikely(!stab
->szopts
.tsize
))
553 slot
= pkt_len
+ stab
->szopts
.cell_align
;
554 if (unlikely(slot
< 0))
557 slot
>>= stab
->szopts
.cell_log
;
558 if (likely(slot
< stab
->szopts
.tsize
))
559 pkt_len
= stab
->data
[slot
];
561 pkt_len
= stab
->data
[stab
->szopts
.tsize
- 1] *
562 (slot
/ stab
->szopts
.tsize
) +
563 stab
->data
[slot
% stab
->szopts
.tsize
];
565 pkt_len
<<= stab
->szopts
.size_log
;
567 if (unlikely(pkt_len
< 1))
569 qdisc_skb_cb(skb
)->pkt_len
= pkt_len
;
571 EXPORT_SYMBOL(__qdisc_calculate_pkt_len
);
573 void qdisc_warn_nonwc(const char *txt
, struct Qdisc
*qdisc
)
575 if (!(qdisc
->flags
& TCQ_F_WARN_NONWC
)) {
576 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
577 txt
, qdisc
->ops
->id
, qdisc
->handle
>> 16);
578 qdisc
->flags
|= TCQ_F_WARN_NONWC
;
581 EXPORT_SYMBOL(qdisc_warn_nonwc
);
583 static enum hrtimer_restart
qdisc_watchdog(struct hrtimer
*timer
)
585 struct qdisc_watchdog
*wd
= container_of(timer
, struct qdisc_watchdog
,
589 __netif_schedule(qdisc_root(wd
->qdisc
));
592 return HRTIMER_NORESTART
;
595 void qdisc_watchdog_init(struct qdisc_watchdog
*wd
, struct Qdisc
*qdisc
)
597 hrtimer_init(&wd
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS_PINNED
);
598 wd
->timer
.function
= qdisc_watchdog
;
601 EXPORT_SYMBOL(qdisc_watchdog_init
);
603 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog
*wd
, u64 expires
)
605 if (test_bit(__QDISC_STATE_DEACTIVATED
,
606 &qdisc_root_sleeping(wd
->qdisc
)->state
))
609 if (wd
->last_expires
== expires
)
612 wd
->last_expires
= expires
;
613 hrtimer_start(&wd
->timer
,
614 ns_to_ktime(expires
),
615 HRTIMER_MODE_ABS_PINNED
);
617 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns
);
619 void qdisc_watchdog_cancel(struct qdisc_watchdog
*wd
)
621 hrtimer_cancel(&wd
->timer
);
623 EXPORT_SYMBOL(qdisc_watchdog_cancel
);
625 static struct hlist_head
*qdisc_class_hash_alloc(unsigned int n
)
627 unsigned int size
= n
* sizeof(struct hlist_head
), i
;
628 struct hlist_head
*h
;
630 if (size
<= PAGE_SIZE
)
631 h
= kmalloc(size
, GFP_KERNEL
);
633 h
= (struct hlist_head
*)
634 __get_free_pages(GFP_KERNEL
, get_order(size
));
637 for (i
= 0; i
< n
; i
++)
638 INIT_HLIST_HEAD(&h
[i
]);
643 static void qdisc_class_hash_free(struct hlist_head
*h
, unsigned int n
)
645 unsigned int size
= n
* sizeof(struct hlist_head
);
647 if (size
<= PAGE_SIZE
)
650 free_pages((unsigned long)h
, get_order(size
));
653 void qdisc_class_hash_grow(struct Qdisc
*sch
, struct Qdisc_class_hash
*clhash
)
655 struct Qdisc_class_common
*cl
;
656 struct hlist_node
*next
;
657 struct hlist_head
*nhash
, *ohash
;
658 unsigned int nsize
, nmask
, osize
;
661 /* Rehash when load factor exceeds 0.75 */
662 if (clhash
->hashelems
* 4 <= clhash
->hashsize
* 3)
664 nsize
= clhash
->hashsize
* 2;
666 nhash
= qdisc_class_hash_alloc(nsize
);
670 ohash
= clhash
->hash
;
671 osize
= clhash
->hashsize
;
674 for (i
= 0; i
< osize
; i
++) {
675 hlist_for_each_entry_safe(cl
, next
, &ohash
[i
], hnode
) {
676 h
= qdisc_class_hash(cl
->classid
, nmask
);
677 hlist_add_head(&cl
->hnode
, &nhash
[h
]);
680 clhash
->hash
= nhash
;
681 clhash
->hashsize
= nsize
;
682 clhash
->hashmask
= nmask
;
683 sch_tree_unlock(sch
);
685 qdisc_class_hash_free(ohash
, osize
);
687 EXPORT_SYMBOL(qdisc_class_hash_grow
);
689 int qdisc_class_hash_init(struct Qdisc_class_hash
*clhash
)
691 unsigned int size
= 4;
693 clhash
->hash
= qdisc_class_hash_alloc(size
);
694 if (clhash
->hash
== NULL
)
696 clhash
->hashsize
= size
;
697 clhash
->hashmask
= size
- 1;
698 clhash
->hashelems
= 0;
701 EXPORT_SYMBOL(qdisc_class_hash_init
);
703 void qdisc_class_hash_destroy(struct Qdisc_class_hash
*clhash
)
705 qdisc_class_hash_free(clhash
->hash
, clhash
->hashsize
);
707 EXPORT_SYMBOL(qdisc_class_hash_destroy
);
709 void qdisc_class_hash_insert(struct Qdisc_class_hash
*clhash
,
710 struct Qdisc_class_common
*cl
)
714 INIT_HLIST_NODE(&cl
->hnode
);
715 h
= qdisc_class_hash(cl
->classid
, clhash
->hashmask
);
716 hlist_add_head(&cl
->hnode
, &clhash
->hash
[h
]);
719 EXPORT_SYMBOL(qdisc_class_hash_insert
);
721 void qdisc_class_hash_remove(struct Qdisc_class_hash
*clhash
,
722 struct Qdisc_class_common
*cl
)
724 hlist_del(&cl
->hnode
);
727 EXPORT_SYMBOL(qdisc_class_hash_remove
);
729 /* Allocate an unique handle from space managed by kernel
730 * Possible range is [8000-FFFF]:0000 (0x8000 values)
732 static u32
qdisc_alloc_handle(struct net_device
*dev
)
735 static u32 autohandle
= TC_H_MAKE(0x80000000U
, 0);
738 autohandle
+= TC_H_MAKE(0x10000U
, 0);
739 if (autohandle
== TC_H_MAKE(TC_H_ROOT
, 0))
740 autohandle
= TC_H_MAKE(0x80000000U
, 0);
741 if (!qdisc_lookup(dev
, autohandle
))
749 void qdisc_tree_reduce_backlog(struct Qdisc
*sch
, unsigned int n
,
752 const struct Qdisc_class_ops
*cops
;
757 if (n
== 0 && len
== 0)
759 drops
= max_t(int, n
, 0);
761 while ((parentid
= sch
->parent
)) {
762 if (TC_H_MAJ(parentid
) == TC_H_MAJ(TC_H_INGRESS
))
765 if (sch
->flags
& TCQ_F_NOPARENT
)
767 /* TODO: perform the search on a per txq basis */
768 sch
= qdisc_lookup(qdisc_dev(sch
), TC_H_MAJ(parentid
));
770 WARN_ON_ONCE(parentid
!= TC_H_ROOT
);
773 cops
= sch
->ops
->cl_ops
;
774 if (cops
->qlen_notify
) {
775 cl
= cops
->get(sch
, parentid
);
776 cops
->qlen_notify(sch
, cl
);
780 sch
->qstats
.backlog
-= len
;
781 __qdisc_qstats_drop(sch
, drops
);
785 EXPORT_SYMBOL(qdisc_tree_reduce_backlog
);
787 static void notify_and_destroy(struct net
*net
, struct sk_buff
*skb
,
788 struct nlmsghdr
*n
, u32 clid
,
789 struct Qdisc
*old
, struct Qdisc
*new)
792 qdisc_notify(net
, skb
, n
, clid
, old
, new);
798 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
801 * When appropriate send a netlink notification using 'skb'
804 * On success, destroy old qdisc.
807 static int qdisc_graft(struct net_device
*dev
, struct Qdisc
*parent
,
808 struct sk_buff
*skb
, struct nlmsghdr
*n
, u32 classid
,
809 struct Qdisc
*new, struct Qdisc
*old
)
811 struct Qdisc
*q
= old
;
812 struct net
*net
= dev_net(dev
);
815 if (parent
== NULL
) {
816 unsigned int i
, num_q
, ingress
;
819 num_q
= dev
->num_tx_queues
;
820 if ((q
&& q
->flags
& TCQ_F_INGRESS
) ||
821 (new && new->flags
& TCQ_F_INGRESS
)) {
824 if (!dev_ingress_queue(dev
))
828 if (dev
->flags
& IFF_UP
)
831 if (new && new->ops
->attach
)
834 for (i
= 0; i
< num_q
; i
++) {
835 struct netdev_queue
*dev_queue
= dev_ingress_queue(dev
);
838 dev_queue
= netdev_get_tx_queue(dev
, i
);
840 old
= dev_graft_qdisc(dev_queue
, new);
842 atomic_inc(&new->refcnt
);
850 notify_and_destroy(net
, skb
, n
, classid
,
852 if (new && !new->ops
->attach
)
853 atomic_inc(&new->refcnt
);
854 dev
->qdisc
= new ? : &noop_qdisc
;
856 if (new && new->ops
->attach
)
857 new->ops
->attach(new);
859 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
862 if (dev
->flags
& IFF_UP
)
865 const struct Qdisc_class_ops
*cops
= parent
->ops
->cl_ops
;
868 if (cops
&& cops
->graft
) {
869 unsigned long cl
= cops
->get(parent
, classid
);
871 err
= cops
->graft(parent
, cl
, new, &old
);
872 cops
->put(parent
, cl
);
877 notify_and_destroy(net
, skb
, n
, classid
, old
, new);
882 /* lockdep annotation is needed for ingress; egress gets it only for name */
883 static struct lock_class_key qdisc_tx_lock
;
884 static struct lock_class_key qdisc_rx_lock
;
887 Allocate and initialize new qdisc.
889 Parameters are passed via opt.
892 static struct Qdisc
*qdisc_create(struct net_device
*dev
,
893 struct netdev_queue
*dev_queue
,
894 struct Qdisc
*p
, u32 parent
, u32 handle
,
895 struct nlattr
**tca
, int *errp
)
898 struct nlattr
*kind
= tca
[TCA_KIND
];
900 struct Qdisc_ops
*ops
;
901 struct qdisc_size_table
*stab
;
903 ops
= qdisc_lookup_ops(kind
);
904 #ifdef CONFIG_MODULES
905 if (ops
== NULL
&& kind
!= NULL
) {
907 if (nla_strlcpy(name
, kind
, IFNAMSIZ
) < IFNAMSIZ
) {
908 /* We dropped the RTNL semaphore in order to
909 * perform the module load. So, even if we
910 * succeeded in loading the module we have to
911 * tell the caller to replay the request. We
912 * indicate this using -EAGAIN.
913 * We replay the request because the device may
914 * go away in the mean time.
917 request_module("sch_%s", name
);
919 ops
= qdisc_lookup_ops(kind
);
921 /* We will try again qdisc_lookup_ops,
922 * so don't keep a reference.
924 module_put(ops
->owner
);
936 sch
= qdisc_alloc(dev_queue
, ops
);
942 sch
->parent
= parent
;
944 if (handle
== TC_H_INGRESS
) {
945 sch
->flags
|= TCQ_F_INGRESS
;
946 handle
= TC_H_MAKE(TC_H_INGRESS
, 0);
947 lockdep_set_class(qdisc_lock(sch
), &qdisc_rx_lock
);
950 handle
= qdisc_alloc_handle(dev
);
955 lockdep_set_class(qdisc_lock(sch
), &qdisc_tx_lock
);
956 if (!netif_is_multiqueue(dev
))
957 sch
->flags
|= TCQ_F_ONETXQUEUE
;
960 sch
->handle
= handle
;
962 if (!ops
->init
|| (err
= ops
->init(sch
, tca
[TCA_OPTIONS
])) == 0) {
963 if (qdisc_is_percpu_stats(sch
)) {
965 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
966 if (!sch
->cpu_bstats
)
969 sch
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
970 if (!sch
->cpu_qstats
)
975 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
980 rcu_assign_pointer(sch
->stab
, stab
);
986 if (sch
->flags
& TCQ_F_MQROOT
)
989 if ((sch
->parent
!= TC_H_ROOT
) &&
990 !(sch
->flags
& TCQ_F_INGRESS
) &&
991 (!p
|| !(p
->flags
& TCQ_F_MQROOT
)))
992 running
= qdisc_root_sleeping_running(sch
);
994 running
= &sch
->running
;
996 err
= gen_new_estimator(&sch
->bstats
,
1006 qdisc_hash_add(sch
);
1010 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1015 kfree((char *) sch
- sch
->padded
);
1017 module_put(ops
->owner
);
1023 free_percpu(sch
->cpu_bstats
);
1024 free_percpu(sch
->cpu_qstats
);
1026 * Any broken qdiscs that would require a ops->reset() here?
1027 * The qdisc was never in action so it shouldn't be necessary.
1029 qdisc_put_stab(rtnl_dereference(sch
->stab
));
1035 static int qdisc_change(struct Qdisc
*sch
, struct nlattr
**tca
)
1037 struct qdisc_size_table
*ostab
, *stab
= NULL
;
1040 if (tca
[TCA_OPTIONS
]) {
1041 if (sch
->ops
->change
== NULL
)
1043 err
= sch
->ops
->change(sch
, tca
[TCA_OPTIONS
]);
1048 if (tca
[TCA_STAB
]) {
1049 stab
= qdisc_get_stab(tca
[TCA_STAB
]);
1051 return PTR_ERR(stab
);
1054 ostab
= rtnl_dereference(sch
->stab
);
1055 rcu_assign_pointer(sch
->stab
, stab
);
1056 qdisc_put_stab(ostab
);
1058 if (tca
[TCA_RATE
]) {
1059 /* NB: ignores errors from replace_estimator
1060 because change can't be undone. */
1061 if (sch
->flags
& TCQ_F_MQROOT
)
1063 gen_replace_estimator(&sch
->bstats
,
1067 qdisc_root_sleeping_running(sch
),
1074 struct check_loop_arg
{
1075 struct qdisc_walker w
;
1080 static int check_loop_fn(struct Qdisc
*q
, unsigned long cl
,
1081 struct qdisc_walker
*w
);
1083 static int check_loop(struct Qdisc
*q
, struct Qdisc
*p
, int depth
)
1085 struct check_loop_arg arg
;
1087 if (q
->ops
->cl_ops
== NULL
)
1090 arg
.w
.stop
= arg
.w
.skip
= arg
.w
.count
= 0;
1091 arg
.w
.fn
= check_loop_fn
;
1094 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1095 return arg
.w
.stop
? -ELOOP
: 0;
1099 check_loop_fn(struct Qdisc
*q
, unsigned long cl
, struct qdisc_walker
*w
)
1102 const struct Qdisc_class_ops
*cops
= q
->ops
->cl_ops
;
1103 struct check_loop_arg
*arg
= (struct check_loop_arg
*)w
;
1105 leaf
= cops
->leaf(q
, cl
);
1107 if (leaf
== arg
->p
|| arg
->depth
> 7)
1109 return check_loop(leaf
, arg
->p
, arg
->depth
+ 1);
1118 static int tc_get_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1120 struct net
*net
= sock_net(skb
->sk
);
1121 struct tcmsg
*tcm
= nlmsg_data(n
);
1122 struct nlattr
*tca
[TCA_MAX
+ 1];
1123 struct net_device
*dev
;
1125 struct Qdisc
*q
= NULL
;
1126 struct Qdisc
*p
= NULL
;
1129 if ((n
->nlmsg_type
!= RTM_GETQDISC
) &&
1130 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1133 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1137 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1141 clid
= tcm
->tcm_parent
;
1143 if (clid
!= TC_H_ROOT
) {
1144 if (TC_H_MAJ(clid
) != TC_H_MAJ(TC_H_INGRESS
)) {
1145 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1148 q
= qdisc_leaf(p
, clid
);
1149 } else if (dev_ingress_queue(dev
)) {
1150 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1158 if (tcm
->tcm_handle
&& q
->handle
!= tcm
->tcm_handle
)
1161 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1166 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1169 if (n
->nlmsg_type
== RTM_DELQDISC
) {
1174 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, NULL
, q
);
1178 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1184 * Create/change qdisc.
1187 static int tc_modify_qdisc(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1189 struct net
*net
= sock_net(skb
->sk
);
1191 struct nlattr
*tca
[TCA_MAX
+ 1];
1192 struct net_device
*dev
;
1194 struct Qdisc
*q
, *p
;
1197 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1201 /* Reinit, just in case something touches this. */
1202 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1206 tcm
= nlmsg_data(n
);
1207 clid
= tcm
->tcm_parent
;
1210 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1216 if (clid
!= TC_H_ROOT
) {
1217 if (clid
!= TC_H_INGRESS
) {
1218 p
= qdisc_lookup(dev
, TC_H_MAJ(clid
));
1221 q
= qdisc_leaf(p
, clid
);
1222 } else if (dev_ingress_queue_create(dev
)) {
1223 q
= dev_ingress_queue(dev
)->qdisc_sleeping
;
1229 /* It may be default qdisc, ignore it */
1230 if (q
&& q
->handle
== 0)
1233 if (!q
|| !tcm
->tcm_handle
|| q
->handle
!= tcm
->tcm_handle
) {
1234 if (tcm
->tcm_handle
) {
1235 if (q
&& !(n
->nlmsg_flags
& NLM_F_REPLACE
))
1237 if (TC_H_MIN(tcm
->tcm_handle
))
1239 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1241 goto create_n_graft
;
1242 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1244 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1247 (p
&& check_loop(q
, p
, 0)))
1249 atomic_inc(&q
->refcnt
);
1253 goto create_n_graft
;
1255 /* This magic test requires explanation.
1257 * We know, that some child q is already
1258 * attached to this parent and have choice:
1259 * either to change it or to create/graft new one.
1261 * 1. We are allowed to create/graft only
1262 * if CREATE and REPLACE flags are set.
1264 * 2. If EXCL is set, requestor wanted to say,
1265 * that qdisc tcm_handle is not expected
1266 * to exist, so that we choose create/graft too.
1268 * 3. The last case is when no flags are set.
1269 * Alas, it is sort of hole in API, we
1270 * cannot decide what to do unambiguously.
1271 * For now we select create/graft, if
1272 * user gave KIND, which does not match existing.
1274 if ((n
->nlmsg_flags
& NLM_F_CREATE
) &&
1275 (n
->nlmsg_flags
& NLM_F_REPLACE
) &&
1276 ((n
->nlmsg_flags
& NLM_F_EXCL
) ||
1278 nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))))
1279 goto create_n_graft
;
1283 if (!tcm
->tcm_handle
)
1285 q
= qdisc_lookup(dev
, tcm
->tcm_handle
);
1288 /* Change qdisc parameters */
1291 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1293 if (tca
[TCA_KIND
] && nla_strcmp(tca
[TCA_KIND
], q
->ops
->id
))
1295 err
= qdisc_change(q
, tca
);
1297 qdisc_notify(net
, skb
, n
, clid
, NULL
, q
);
1301 if (!(n
->nlmsg_flags
& NLM_F_CREATE
))
1303 if (clid
== TC_H_INGRESS
) {
1304 if (dev_ingress_queue(dev
))
1305 q
= qdisc_create(dev
, dev_ingress_queue(dev
), p
,
1306 tcm
->tcm_parent
, tcm
->tcm_parent
,
1311 struct netdev_queue
*dev_queue
;
1313 if (p
&& p
->ops
->cl_ops
&& p
->ops
->cl_ops
->select_queue
)
1314 dev_queue
= p
->ops
->cl_ops
->select_queue(p
, tcm
);
1316 dev_queue
= p
->dev_queue
;
1318 dev_queue
= netdev_get_tx_queue(dev
, 0);
1320 q
= qdisc_create(dev
, dev_queue
, p
,
1321 tcm
->tcm_parent
, tcm
->tcm_handle
,
1331 err
= qdisc_graft(dev
, p
, skb
, n
, clid
, q
, NULL
);
1341 static int tc_fill_qdisc(struct sk_buff
*skb
, struct Qdisc
*q
, u32 clid
,
1342 u32 portid
, u32 seq
, u16 flags
, int event
)
1344 struct gnet_stats_basic_cpu __percpu
*cpu_bstats
= NULL
;
1345 struct gnet_stats_queue __percpu
*cpu_qstats
= NULL
;
1347 struct nlmsghdr
*nlh
;
1348 unsigned char *b
= skb_tail_pointer(skb
);
1350 struct qdisc_size_table
*stab
;
1354 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1356 goto out_nlmsg_trim
;
1357 tcm
= nlmsg_data(nlh
);
1358 tcm
->tcm_family
= AF_UNSPEC
;
1361 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1362 tcm
->tcm_parent
= clid
;
1363 tcm
->tcm_handle
= q
->handle
;
1364 tcm
->tcm_info
= atomic_read(&q
->refcnt
);
1365 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1366 goto nla_put_failure
;
1367 if (q
->ops
->dump
&& q
->ops
->dump(q
, skb
) < 0)
1368 goto nla_put_failure
;
1371 stab
= rtnl_dereference(q
->stab
);
1372 if (stab
&& qdisc_dump_stab(skb
, stab
) < 0)
1373 goto nla_put_failure
;
1375 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1376 NULL
, &d
, TCA_PAD
) < 0)
1377 goto nla_put_failure
;
1379 if (q
->ops
->dump_stats
&& q
->ops
->dump_stats(q
, &d
) < 0)
1380 goto nla_put_failure
;
1382 if (qdisc_is_percpu_stats(q
)) {
1383 cpu_bstats
= q
->cpu_bstats
;
1384 cpu_qstats
= q
->cpu_qstats
;
1387 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q
),
1388 &d
, cpu_bstats
, &q
->bstats
) < 0 ||
1389 gnet_stats_copy_rate_est(&d
, &q
->bstats
, &q
->rate_est
) < 0 ||
1390 gnet_stats_copy_queue(&d
, cpu_qstats
, &q
->qstats
, qlen
) < 0)
1391 goto nla_put_failure
;
1393 if (gnet_stats_finish_copy(&d
) < 0)
1394 goto nla_put_failure
;
1396 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1405 static bool tc_qdisc_dump_ignore(struct Qdisc
*q
)
1407 return (q
->flags
& TCQ_F_BUILTIN
) ? true : false;
1410 static int qdisc_notify(struct net
*net
, struct sk_buff
*oskb
,
1411 struct nlmsghdr
*n
, u32 clid
,
1412 struct Qdisc
*old
, struct Qdisc
*new)
1414 struct sk_buff
*skb
;
1415 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1417 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1421 if (old
&& !tc_qdisc_dump_ignore(old
)) {
1422 if (tc_fill_qdisc(skb
, old
, clid
, portid
, n
->nlmsg_seq
,
1423 0, RTM_DELQDISC
) < 0)
1426 if (new && !tc_qdisc_dump_ignore(new)) {
1427 if (tc_fill_qdisc(skb
, new, clid
, portid
, n
->nlmsg_seq
,
1428 old
? NLM_F_REPLACE
: 0, RTM_NEWQDISC
) < 0)
1433 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1434 n
->nlmsg_flags
& NLM_F_ECHO
);
1441 static int tc_dump_qdisc_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1442 struct netlink_callback
*cb
,
1443 int *q_idx_p
, int s_q_idx
, bool recur
)
1445 int ret
= 0, q_idx
= *q_idx_p
;
1453 if (q_idx
< s_q_idx
) {
1456 if (!tc_qdisc_dump_ignore(q
) &&
1457 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1458 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1464 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1465 * itself has already been dumped.
1467 * If we've already dumped the top-level (ingress) qdisc above and the global
1468 * qdisc hashtable, we don't want to hit it again
1470 if (!qdisc_dev(root
) || !recur
)
1473 hash_for_each(qdisc_dev(root
)->qdisc_hash
, b
, q
, hash
) {
1474 if (q_idx
< s_q_idx
) {
1478 if (!tc_qdisc_dump_ignore(q
) &&
1479 tc_fill_qdisc(skb
, q
, q
->parent
, NETLINK_CB(cb
->skb
).portid
,
1480 cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1494 static int tc_dump_qdisc(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1496 struct net
*net
= sock_net(skb
->sk
);
1499 struct net_device
*dev
;
1501 s_idx
= cb
->args
[0];
1502 s_q_idx
= q_idx
= cb
->args
[1];
1506 for_each_netdev(net
, dev
) {
1507 struct netdev_queue
*dev_queue
;
1515 if (tc_dump_qdisc_root(dev
->qdisc
, skb
, cb
, &q_idx
, s_q_idx
,
1519 dev_queue
= dev_ingress_queue(dev
);
1521 tc_dump_qdisc_root(dev_queue
->qdisc_sleeping
, skb
, cb
,
1522 &q_idx
, s_q_idx
, false) < 0)
1531 cb
->args
[1] = q_idx
;
1538 /************************************************
1539 * Traffic classes manipulation. *
1540 ************************************************/
1544 static int tc_ctl_tclass(struct sk_buff
*skb
, struct nlmsghdr
*n
)
1546 struct net
*net
= sock_net(skb
->sk
);
1547 struct tcmsg
*tcm
= nlmsg_data(n
);
1548 struct nlattr
*tca
[TCA_MAX
+ 1];
1549 struct net_device
*dev
;
1550 struct Qdisc
*q
= NULL
;
1551 const struct Qdisc_class_ops
*cops
;
1552 unsigned long cl
= 0;
1553 unsigned long new_cl
;
1559 if ((n
->nlmsg_type
!= RTM_GETTCLASS
) &&
1560 !netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
))
1563 err
= nlmsg_parse(n
, sizeof(*tcm
), tca
, TCA_MAX
, NULL
);
1567 dev
= __dev_get_by_index(net
, tcm
->tcm_ifindex
);
1572 parent == TC_H_UNSPEC - unspecified parent.
1573 parent == TC_H_ROOT - class is root, which has no parent.
1574 parent == X:0 - parent is root class.
1575 parent == X:Y - parent is a node in hierarchy.
1576 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1578 handle == 0:0 - generate handle from kernel pool.
1579 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1580 handle == X:Y - clear.
1581 handle == X:0 - root class.
1584 /* Step 1. Determine qdisc handle X:0 */
1586 portid
= tcm
->tcm_parent
;
1587 clid
= tcm
->tcm_handle
;
1588 qid
= TC_H_MAJ(clid
);
1590 if (portid
!= TC_H_ROOT
) {
1591 u32 qid1
= TC_H_MAJ(portid
);
1594 /* If both majors are known, they must be identical. */
1599 } else if (qid
== 0)
1600 qid
= dev
->qdisc
->handle
;
1602 /* Now qid is genuine qdisc handle consistent
1603 * both with parent and child.
1605 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1608 portid
= TC_H_MAKE(qid
, portid
);
1611 qid
= dev
->qdisc
->handle
;
1614 /* OK. Locate qdisc */
1615 q
= qdisc_lookup(dev
, qid
);
1619 /* An check that it supports classes */
1620 cops
= q
->ops
->cl_ops
;
1624 /* Now try to get class */
1626 if (portid
== TC_H_ROOT
)
1629 clid
= TC_H_MAKE(qid
, clid
);
1632 cl
= cops
->get(q
, clid
);
1636 if (n
->nlmsg_type
!= RTM_NEWTCLASS
||
1637 !(n
->nlmsg_flags
& NLM_F_CREATE
))
1640 switch (n
->nlmsg_type
) {
1643 if (n
->nlmsg_flags
& NLM_F_EXCL
)
1649 err
= cops
->delete(q
, cl
);
1651 tclass_notify(net
, skb
, n
, q
, cl
,
1655 err
= tclass_notify(net
, skb
, n
, q
, cl
, RTM_NEWTCLASS
);
1666 err
= cops
->change(q
, clid
, portid
, tca
, &new_cl
);
1668 tclass_notify(net
, skb
, n
, q
, new_cl
, RTM_NEWTCLASS
);
1678 static int tc_fill_tclass(struct sk_buff
*skb
, struct Qdisc
*q
,
1680 u32 portid
, u32 seq
, u16 flags
, int event
)
1683 struct nlmsghdr
*nlh
;
1684 unsigned char *b
= skb_tail_pointer(skb
);
1686 const struct Qdisc_class_ops
*cl_ops
= q
->ops
->cl_ops
;
1689 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*tcm
), flags
);
1691 goto out_nlmsg_trim
;
1692 tcm
= nlmsg_data(nlh
);
1693 tcm
->tcm_family
= AF_UNSPEC
;
1696 tcm
->tcm_ifindex
= qdisc_dev(q
)->ifindex
;
1697 tcm
->tcm_parent
= q
->handle
;
1698 tcm
->tcm_handle
= q
->handle
;
1700 if (nla_put_string(skb
, TCA_KIND
, q
->ops
->id
))
1701 goto nla_put_failure
;
1702 if (cl_ops
->dump
&& cl_ops
->dump(q
, cl
, skb
, tcm
) < 0)
1703 goto nla_put_failure
;
1705 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
, TCA_XSTATS
,
1706 NULL
, &d
, TCA_PAD
) < 0)
1707 goto nla_put_failure
;
1709 if (cl_ops
->dump_stats
&& cl_ops
->dump_stats(q
, cl
, &d
) < 0)
1710 goto nla_put_failure
;
1712 if (gnet_stats_finish_copy(&d
) < 0)
1713 goto nla_put_failure
;
1715 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1724 static int tclass_notify(struct net
*net
, struct sk_buff
*oskb
,
1725 struct nlmsghdr
*n
, struct Qdisc
*q
,
1726 unsigned long cl
, int event
)
1728 struct sk_buff
*skb
;
1729 u32 portid
= oskb
? NETLINK_CB(oskb
).portid
: 0;
1731 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1735 if (tc_fill_tclass(skb
, q
, cl
, portid
, n
->nlmsg_seq
, 0, event
) < 0) {
1740 return rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1741 n
->nlmsg_flags
& NLM_F_ECHO
);
1744 struct qdisc_dump_args
{
1745 struct qdisc_walker w
;
1746 struct sk_buff
*skb
;
1747 struct netlink_callback
*cb
;
1750 static int qdisc_class_dump(struct Qdisc
*q
, unsigned long cl
,
1751 struct qdisc_walker
*arg
)
1753 struct qdisc_dump_args
*a
= (struct qdisc_dump_args
*)arg
;
1755 return tc_fill_tclass(a
->skb
, q
, cl
, NETLINK_CB(a
->cb
->skb
).portid
,
1756 a
->cb
->nlh
->nlmsg_seq
, NLM_F_MULTI
,
1760 static int tc_dump_tclass_qdisc(struct Qdisc
*q
, struct sk_buff
*skb
,
1761 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1764 struct qdisc_dump_args arg
;
1766 if (tc_qdisc_dump_ignore(q
) ||
1767 *t_p
< s_t
|| !q
->ops
->cl_ops
||
1769 TC_H_MAJ(tcm
->tcm_parent
) != q
->handle
)) {
1774 memset(&cb
->args
[1], 0, sizeof(cb
->args
)-sizeof(cb
->args
[0]));
1775 arg
.w
.fn
= qdisc_class_dump
;
1779 arg
.w
.skip
= cb
->args
[1];
1781 q
->ops
->cl_ops
->walk(q
, &arg
.w
);
1782 cb
->args
[1] = arg
.w
.count
;
1789 static int tc_dump_tclass_root(struct Qdisc
*root
, struct sk_buff
*skb
,
1790 struct tcmsg
*tcm
, struct netlink_callback
*cb
,
1799 if (tc_dump_tclass_qdisc(root
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1802 if (!qdisc_dev(root
))
1805 hash_for_each(qdisc_dev(root
)->qdisc_hash
, b
, q
, hash
) {
1806 if (tc_dump_tclass_qdisc(q
, skb
, tcm
, cb
, t_p
, s_t
) < 0)
1813 static int tc_dump_tclass(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1815 struct tcmsg
*tcm
= nlmsg_data(cb
->nlh
);
1816 struct net
*net
= sock_net(skb
->sk
);
1817 struct netdev_queue
*dev_queue
;
1818 struct net_device
*dev
;
1821 if (nlmsg_len(cb
->nlh
) < sizeof(*tcm
))
1823 dev
= dev_get_by_index(net
, tcm
->tcm_ifindex
);
1830 if (tc_dump_tclass_root(dev
->qdisc
, skb
, tcm
, cb
, &t
, s_t
) < 0)
1833 dev_queue
= dev_ingress_queue(dev
);
1835 tc_dump_tclass_root(dev_queue
->qdisc_sleeping
, skb
, tcm
, cb
,
1846 /* Main classifier routine: scans classifier chain attached
1847 * to this qdisc, (optionally) tests for protocol and asks
1848 * specific classifiers.
1850 int tc_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
1851 struct tcf_result
*res
, bool compat_mode
)
1853 #ifdef CONFIG_NET_CLS_ACT
1854 const struct tcf_proto
*old_tp
= tp
;
1859 for (; tp
; tp
= rcu_dereference_bh(tp
->next
)) {
1860 __be16 protocol
= tc_skb_protocol(skb
);
1863 if (tp
->protocol
!= protocol
&&
1864 tp
->protocol
!= htons(ETH_P_ALL
))
1867 err
= tp
->classify(skb
, tp
, res
);
1868 #ifdef CONFIG_NET_CLS_ACT
1869 if (unlikely(err
== TC_ACT_RECLASSIFY
&& !compat_mode
))
1876 return TC_ACT_UNSPEC
; /* signal: continue lookup */
1877 #ifdef CONFIG_NET_CLS_ACT
1879 if (unlikely(limit
++ >= MAX_REC_LOOP
)) {
1880 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1881 tp
->q
->ops
->id
, tp
->prio
& 0xffff,
1882 ntohs(tp
->protocol
));
1890 EXPORT_SYMBOL(tc_classify
);
1892 bool tcf_destroy(struct tcf_proto
*tp
, bool force
)
1894 if (tp
->ops
->destroy(tp
, force
)) {
1895 module_put(tp
->ops
->owner
);
1903 void tcf_destroy_chain(struct tcf_proto __rcu
**fl
)
1905 struct tcf_proto
*tp
;
1907 while ((tp
= rtnl_dereference(*fl
)) != NULL
) {
1908 RCU_INIT_POINTER(*fl
, tp
->next
);
1909 tcf_destroy(tp
, true);
1912 EXPORT_SYMBOL(tcf_destroy_chain
);
1914 #ifdef CONFIG_PROC_FS
1915 static int psched_show(struct seq_file
*seq
, void *v
)
1917 seq_printf(seq
, "%08x %08x %08x %08x\n",
1918 (u32
)NSEC_PER_USEC
, (u32
)PSCHED_TICKS2NS(1),
1920 (u32
)NSEC_PER_SEC
/ hrtimer_resolution
);
1925 static int psched_open(struct inode
*inode
, struct file
*file
)
1927 return single_open(file
, psched_show
, NULL
);
1930 static const struct file_operations psched_fops
= {
1931 .owner
= THIS_MODULE
,
1932 .open
= psched_open
,
1934 .llseek
= seq_lseek
,
1935 .release
= single_release
,
1938 static int __net_init
psched_net_init(struct net
*net
)
1940 struct proc_dir_entry
*e
;
1942 e
= proc_create("psched", 0, net
->proc_net
, &psched_fops
);
1949 static void __net_exit
psched_net_exit(struct net
*net
)
1951 remove_proc_entry("psched", net
->proc_net
);
1954 static int __net_init
psched_net_init(struct net
*net
)
1959 static void __net_exit
psched_net_exit(struct net
*net
)
1964 static struct pernet_operations psched_net_ops
= {
1965 .init
= psched_net_init
,
1966 .exit
= psched_net_exit
,
1969 static int __init
pktsched_init(void)
1973 err
= register_pernet_subsys(&psched_net_ops
);
1975 pr_err("pktsched_init: "
1976 "cannot initialize per netns operations\n");
1980 register_qdisc(&pfifo_fast_ops
);
1981 register_qdisc(&pfifo_qdisc_ops
);
1982 register_qdisc(&bfifo_qdisc_ops
);
1983 register_qdisc(&pfifo_head_drop_qdisc_ops
);
1984 register_qdisc(&mq_qdisc_ops
);
1985 register_qdisc(&noqueue_qdisc_ops
);
1987 rtnl_register(PF_UNSPEC
, RTM_NEWQDISC
, tc_modify_qdisc
, NULL
, NULL
);
1988 rtnl_register(PF_UNSPEC
, RTM_DELQDISC
, tc_get_qdisc
, NULL
, NULL
);
1989 rtnl_register(PF_UNSPEC
, RTM_GETQDISC
, tc_get_qdisc
, tc_dump_qdisc
,
1991 rtnl_register(PF_UNSPEC
, RTM_NEWTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1992 rtnl_register(PF_UNSPEC
, RTM_DELTCLASS
, tc_ctl_tclass
, NULL
, NULL
);
1993 rtnl_register(PF_UNSPEC
, RTM_GETTCLASS
, tc_ctl_tclass
, tc_dump_tclass
,
1999 subsys_initcall(pktsched_init
);