misc: rtsx_usb: Use USB remote wakeup signaling for card insertion detection
[linux/fpc-iii.git] / net / sched / sch_mq.c
blobf20f3a0f842432a8c2672b7137bf8975a1de0218
1 /*
2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
11 #include <linux/types.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <net/netlink.h>
19 #include <net/pkt_cls.h>
20 #include <net/pkt_sched.h>
21 #include <net/sch_generic.h>
23 struct mq_sched {
24 struct Qdisc **qdiscs;
27 static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
29 struct net_device *dev = qdisc_dev(sch);
30 struct tc_mq_qopt_offload opt = {
31 .command = cmd,
32 .handle = sch->handle,
35 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
36 return -EOPNOTSUPP;
38 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
41 static void mq_offload_stats(struct Qdisc *sch)
43 struct net_device *dev = qdisc_dev(sch);
44 struct tc_mq_qopt_offload opt = {
45 .command = TC_MQ_STATS,
46 .handle = sch->handle,
47 .stats = {
48 .bstats = &sch->bstats,
49 .qstats = &sch->qstats,
53 if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc)
54 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
57 static void mq_destroy(struct Qdisc *sch)
59 struct net_device *dev = qdisc_dev(sch);
60 struct mq_sched *priv = qdisc_priv(sch);
61 unsigned int ntx;
63 mq_offload(sch, TC_MQ_DESTROY);
65 if (!priv->qdiscs)
66 return;
67 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
68 qdisc_put(priv->qdiscs[ntx]);
69 kfree(priv->qdiscs);
72 static int mq_init(struct Qdisc *sch, struct nlattr *opt,
73 struct netlink_ext_ack *extack)
75 struct net_device *dev = qdisc_dev(sch);
76 struct mq_sched *priv = qdisc_priv(sch);
77 struct netdev_queue *dev_queue;
78 struct Qdisc *qdisc;
79 unsigned int ntx;
81 if (sch->parent != TC_H_ROOT)
82 return -EOPNOTSUPP;
84 if (!netif_is_multiqueue(dev))
85 return -EOPNOTSUPP;
87 /* pre-allocate qdiscs, attachment can't fail */
88 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
89 GFP_KERNEL);
90 if (!priv->qdiscs)
91 return -ENOMEM;
93 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
94 dev_queue = netdev_get_tx_queue(dev, ntx);
95 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
96 TC_H_MAKE(TC_H_MAJ(sch->handle),
97 TC_H_MIN(ntx + 1)),
98 extack);
99 if (!qdisc)
100 return -ENOMEM;
101 priv->qdiscs[ntx] = qdisc;
102 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
105 sch->flags |= TCQ_F_MQROOT;
107 mq_offload(sch, TC_MQ_CREATE);
108 return 0;
111 static void mq_attach(struct Qdisc *sch)
113 struct net_device *dev = qdisc_dev(sch);
114 struct mq_sched *priv = qdisc_priv(sch);
115 struct Qdisc *qdisc, *old;
116 unsigned int ntx;
118 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
119 qdisc = priv->qdiscs[ntx];
120 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
121 if (old)
122 qdisc_put(old);
123 #ifdef CONFIG_NET_SCHED
124 if (ntx < dev->real_num_tx_queues)
125 qdisc_hash_add(qdisc, false);
126 #endif
129 kfree(priv->qdiscs);
130 priv->qdiscs = NULL;
133 static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
135 struct net_device *dev = qdisc_dev(sch);
136 struct Qdisc *qdisc;
137 unsigned int ntx;
138 __u32 qlen = 0;
140 sch->q.qlen = 0;
141 memset(&sch->bstats, 0, sizeof(sch->bstats));
142 memset(&sch->qstats, 0, sizeof(sch->qstats));
144 /* MQ supports lockless qdiscs. However, statistics accounting needs
145 * to account for all, none, or a mix of locked and unlocked child
146 * qdiscs. Percpu stats are added to counters in-band and locking
147 * qdisc totals are added at end.
149 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
150 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
151 spin_lock_bh(qdisc_lock(qdisc));
153 if (qdisc_is_percpu_stats(qdisc)) {
154 qlen = qdisc_qlen_sum(qdisc);
155 __gnet_stats_copy_basic(NULL, &sch->bstats,
156 qdisc->cpu_bstats,
157 &qdisc->bstats);
158 __gnet_stats_copy_queue(&sch->qstats,
159 qdisc->cpu_qstats,
160 &qdisc->qstats, qlen);
161 } else {
162 sch->q.qlen += qdisc->q.qlen;
163 sch->bstats.bytes += qdisc->bstats.bytes;
164 sch->bstats.packets += qdisc->bstats.packets;
165 sch->qstats.qlen += qdisc->qstats.qlen;
166 sch->qstats.backlog += qdisc->qstats.backlog;
167 sch->qstats.drops += qdisc->qstats.drops;
168 sch->qstats.requeues += qdisc->qstats.requeues;
169 sch->qstats.overlimits += qdisc->qstats.overlimits;
172 spin_unlock_bh(qdisc_lock(qdisc));
174 mq_offload_stats(sch);
176 return 0;
179 static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
181 struct net_device *dev = qdisc_dev(sch);
182 unsigned long ntx = cl - 1;
184 if (ntx >= dev->num_tx_queues)
185 return NULL;
186 return netdev_get_tx_queue(dev, ntx);
189 static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
190 struct tcmsg *tcm)
192 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
195 static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
196 struct Qdisc **old, struct netlink_ext_ack *extack)
198 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
199 struct net_device *dev = qdisc_dev(sch);
201 if (dev->flags & IFF_UP)
202 dev_deactivate(dev);
204 *old = dev_graft_qdisc(dev_queue, new);
205 if (new)
206 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
207 if (dev->flags & IFF_UP)
208 dev_activate(dev);
209 return 0;
212 static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
214 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
216 return dev_queue->qdisc_sleeping;
219 static unsigned long mq_find(struct Qdisc *sch, u32 classid)
221 unsigned int ntx = TC_H_MIN(classid);
223 if (!mq_queue_get(sch, ntx))
224 return 0;
225 return ntx;
228 static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
229 struct sk_buff *skb, struct tcmsg *tcm)
231 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
233 tcm->tcm_parent = TC_H_ROOT;
234 tcm->tcm_handle |= TC_H_MIN(cl);
235 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
236 return 0;
239 static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
240 struct gnet_dump *d)
242 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
244 sch = dev_queue->qdisc_sleeping;
245 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
246 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
247 return -1;
248 return 0;
251 static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
253 struct net_device *dev = qdisc_dev(sch);
254 unsigned int ntx;
256 if (arg->stop)
257 return;
259 arg->count = arg->skip;
260 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
261 if (arg->fn(sch, ntx + 1, arg) < 0) {
262 arg->stop = 1;
263 break;
265 arg->count++;
269 static const struct Qdisc_class_ops mq_class_ops = {
270 .select_queue = mq_select_queue,
271 .graft = mq_graft,
272 .leaf = mq_leaf,
273 .find = mq_find,
274 .walk = mq_walk,
275 .dump = mq_dump_class,
276 .dump_stats = mq_dump_class_stats,
279 struct Qdisc_ops mq_qdisc_ops __read_mostly = {
280 .cl_ops = &mq_class_ops,
281 .id = "mq",
282 .priv_size = sizeof(struct mq_sched),
283 .init = mq_init,
284 .destroy = mq_destroy,
285 .attach = mq_attach,
286 .dump = mq_dump,
287 .owner = THIS_MODULE,