fed up with those stupid warnings
[mmotm.git] / include / net / sch_generic.h
blobc33180dd42b4916e9cb6b73544b757b71755aecb
1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
4 #include <linux/netdevice.h>
5 #include <linux/types.h>
6 #include <linux/rcupdate.h>
7 #include <linux/module.h>
8 #include <linux/pkt_sched.h>
9 #include <linux/pkt_cls.h>
10 #include <net/gen_stats.h>
11 #include <net/rtnetlink.h>
13 struct Qdisc_ops;
14 struct qdisc_walker;
15 struct tcf_walker;
16 struct module;
18 struct qdisc_rate_table
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
26 enum qdisc_state_t
28 __QDISC_STATE_RUNNING,
29 __QDISC_STATE_SCHED,
30 __QDISC_STATE_DEACTIVATED,
33 struct qdisc_size_table {
34 struct list_head list;
35 struct tc_sizespec szopts;
36 int refcnt;
37 u16 data[];
40 struct Qdisc
42 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
43 struct sk_buff * (*dequeue)(struct Qdisc *dev);
44 unsigned flags;
45 #define TCQ_F_BUILTIN 1
46 #define TCQ_F_THROTTLED 2
47 #define TCQ_F_INGRESS 4
48 #define TCQ_F_CAN_BYPASS 8
49 #define TCQ_F_MQROOT 16
50 #define TCQ_F_WARN_NONWC (1 << 16)
51 int padded;
52 struct Qdisc_ops *ops;
53 struct qdisc_size_table *stab;
54 struct list_head list;
55 u32 handle;
56 u32 parent;
57 atomic_t refcnt;
58 struct gnet_stats_rate_est rate_est;
59 int (*reshape_fail)(struct sk_buff *skb,
60 struct Qdisc *q);
62 void *u32_node;
64 /* This field is deprecated, but it is still used by CBQ
65 * and it will live until better solution will be invented.
67 struct Qdisc *__parent;
68 struct netdev_queue *dev_queue;
69 struct Qdisc *next_sched;
71 struct sk_buff *gso_skb;
73 * For performance sake on SMP, we put highly modified fields at the end
75 unsigned long state;
76 struct sk_buff_head q;
77 struct gnet_stats_basic_packed bstats;
78 struct gnet_stats_queue qstats;
81 struct Qdisc_class_ops
83 /* Child qdisc manipulation */
84 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
85 int (*graft)(struct Qdisc *, unsigned long cl,
86 struct Qdisc *, struct Qdisc **);
87 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
88 void (*qlen_notify)(struct Qdisc *, unsigned long);
90 /* Class manipulation routines */
91 unsigned long (*get)(struct Qdisc *, u32 classid);
92 void (*put)(struct Qdisc *, unsigned long);
93 int (*change)(struct Qdisc *, u32, u32,
94 struct nlattr **, unsigned long *);
95 int (*delete)(struct Qdisc *, unsigned long);
96 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
98 /* Filter manipulation */
99 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
100 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
101 u32 classid);
102 void (*unbind_tcf)(struct Qdisc *, unsigned long);
104 /* rtnetlink specific */
105 int (*dump)(struct Qdisc *, unsigned long,
106 struct sk_buff *skb, struct tcmsg*);
107 int (*dump_stats)(struct Qdisc *, unsigned long,
108 struct gnet_dump *);
111 struct Qdisc_ops
113 struct Qdisc_ops *next;
114 const struct Qdisc_class_ops *cl_ops;
115 char id[IFNAMSIZ];
116 int priv_size;
118 int (*enqueue)(struct sk_buff *, struct Qdisc *);
119 struct sk_buff * (*dequeue)(struct Qdisc *);
120 struct sk_buff * (*peek)(struct Qdisc *);
121 unsigned int (*drop)(struct Qdisc *);
123 int (*init)(struct Qdisc *, struct nlattr *arg);
124 void (*reset)(struct Qdisc *);
125 void (*destroy)(struct Qdisc *);
126 int (*change)(struct Qdisc *, struct nlattr *arg);
127 void (*attach)(struct Qdisc *);
129 int (*dump)(struct Qdisc *, struct sk_buff *);
130 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
132 struct module *owner;
136 struct tcf_result
138 unsigned long class;
139 u32 classid;
142 struct tcf_proto_ops
144 struct tcf_proto_ops *next;
145 char kind[IFNAMSIZ];
147 int (*classify)(struct sk_buff*, struct tcf_proto*,
148 struct tcf_result *);
149 int (*init)(struct tcf_proto*);
150 void (*destroy)(struct tcf_proto*);
152 unsigned long (*get)(struct tcf_proto*, u32 handle);
153 void (*put)(struct tcf_proto*, unsigned long);
154 int (*change)(struct tcf_proto*, unsigned long,
155 u32 handle, struct nlattr **,
156 unsigned long *);
157 int (*delete)(struct tcf_proto*, unsigned long);
158 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
160 /* rtnetlink specific */
161 int (*dump)(struct tcf_proto*, unsigned long,
162 struct sk_buff *skb, struct tcmsg*);
164 struct module *owner;
167 struct tcf_proto
169 /* Fast access part */
170 struct tcf_proto *next;
171 void *root;
172 int (*classify)(struct sk_buff*, struct tcf_proto*,
173 struct tcf_result *);
174 __be16 protocol;
176 /* All the rest */
177 u32 prio;
178 u32 classid;
179 struct Qdisc *q;
180 void *data;
181 struct tcf_proto_ops *ops;
184 struct qdisc_skb_cb {
185 unsigned int pkt_len;
186 char data[];
189 static inline int qdisc_qlen(struct Qdisc *q)
191 return q->q.qlen;
194 static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
196 return (struct qdisc_skb_cb *)skb->cb;
199 static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
201 return &qdisc->q.lock;
204 static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
206 return qdisc->dev_queue->qdisc;
209 static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
211 return qdisc->dev_queue->qdisc_sleeping;
214 /* The qdisc root lock is a mechanism by which to top level
215 * of a qdisc tree can be locked from any qdisc node in the
216 * forest. This allows changing the configuration of some
217 * aspect of the qdisc tree while blocking out asynchronous
218 * qdisc access in the packet processing paths.
220 * It is only legal to do this when the root will not change
221 * on us. Otherwise we'll potentially lock the wrong qdisc
222 * root. This is enforced by holding the RTNL semaphore, which
223 * all users of this lock accessor must do.
225 static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
227 struct Qdisc *root = qdisc_root(qdisc);
229 ASSERT_RTNL();
230 return qdisc_lock(root);
233 static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
235 struct Qdisc *root = qdisc_root_sleeping(qdisc);
237 ASSERT_RTNL();
238 return qdisc_lock(root);
241 static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
243 return qdisc->dev_queue->dev;
246 static inline void sch_tree_lock(struct Qdisc *q)
248 spin_lock_bh(qdisc_root_sleeping_lock(q));
251 static inline void sch_tree_unlock(struct Qdisc *q)
253 spin_unlock_bh(qdisc_root_sleeping_lock(q));
256 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
257 #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
259 extern struct Qdisc noop_qdisc;
260 extern struct Qdisc_ops noop_qdisc_ops;
261 extern struct Qdisc_ops pfifo_fast_ops;
262 extern struct Qdisc_ops mq_qdisc_ops;
264 struct Qdisc_class_common
266 u32 classid;
267 struct hlist_node hnode;
270 struct Qdisc_class_hash
272 struct hlist_head *hash;
273 unsigned int hashsize;
274 unsigned int hashmask;
275 unsigned int hashelems;
278 static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
280 id ^= id >> 8;
281 id ^= id >> 4;
282 return id & mask;
285 static inline struct Qdisc_class_common *
286 qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
288 struct Qdisc_class_common *cl;
289 struct hlist_node *n;
290 unsigned int h;
292 h = qdisc_class_hash(id, hash->hashmask);
293 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
294 if (cl->classid == id)
295 return cl;
297 return NULL;
300 extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
301 extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
302 extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
303 extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
304 extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
306 extern void dev_init_scheduler(struct net_device *dev);
307 extern void dev_shutdown(struct net_device *dev);
308 extern void dev_activate(struct net_device *dev);
309 extern void dev_deactivate(struct net_device *dev);
310 extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
311 struct Qdisc *qdisc);
312 extern void qdisc_reset(struct Qdisc *qdisc);
313 extern void qdisc_destroy(struct Qdisc *qdisc);
314 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
315 extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
316 struct Qdisc_ops *ops);
317 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
318 struct netdev_queue *dev_queue,
319 struct Qdisc_ops *ops, u32 parentid);
320 extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
321 struct qdisc_size_table *stab);
322 extern void tcf_destroy(struct tcf_proto *tp);
323 extern void tcf_destroy_chain(struct tcf_proto **fl);
325 /* Reset all TX qdiscs of a device. */
326 static inline void qdisc_reset_all_tx(struct net_device *dev)
328 unsigned int i;
329 for (i = 0; i < dev->num_tx_queues; i++)
330 qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
333 /* Are all TX queues of the device empty? */
334 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
336 unsigned int i;
337 for (i = 0; i < dev->num_tx_queues; i++) {
338 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
339 const struct Qdisc *q = txq->qdisc;
341 if (q->q.qlen)
342 return false;
344 return true;
347 /* Are any of the TX qdiscs changing? */
348 static inline bool qdisc_tx_changing(struct net_device *dev)
350 unsigned int i;
351 for (i = 0; i < dev->num_tx_queues; i++) {
352 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
353 if (txq->qdisc != txq->qdisc_sleeping)
354 return true;
356 return false;
359 /* Is the device using the noop qdisc on all queues? */
360 static inline bool qdisc_tx_is_noop(const struct net_device *dev)
362 unsigned int i;
363 for (i = 0; i < dev->num_tx_queues; i++) {
364 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
365 if (txq->qdisc != &noop_qdisc)
366 return false;
368 return true;
371 static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
373 return qdisc_skb_cb(skb)->pkt_len;
376 /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
377 enum net_xmit_qdisc_t {
378 __NET_XMIT_STOLEN = 0x00010000,
379 __NET_XMIT_BYPASS = 0x00020000,
382 #ifdef CONFIG_NET_CLS_ACT
383 #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
384 #else
385 #define net_xmit_drop_count(e) (1)
386 #endif
388 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 #ifdef CONFIG_NET_SCHED
391 if (sch->stab)
392 qdisc_calculate_pkt_len(skb, sch->stab);
393 #endif
394 return sch->enqueue(skb, sch);
397 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
399 qdisc_skb_cb(skb)->pkt_len = skb->len;
400 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
403 static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
405 sch->bstats.bytes += len;
406 sch->bstats.packets++;
409 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
410 struct sk_buff_head *list)
412 __skb_queue_tail(list, skb);
413 sch->qstats.backlog += qdisc_pkt_len(skb);
414 __qdisc_update_bstats(sch, qdisc_pkt_len(skb));
416 return NET_XMIT_SUCCESS;
419 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
421 return __qdisc_enqueue_tail(skb, sch, &sch->q);
424 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
425 struct sk_buff_head *list)
427 struct sk_buff *skb = __skb_dequeue(list);
429 if (likely(skb != NULL))
430 sch->qstats.backlog -= qdisc_pkt_len(skb);
432 return skb;
435 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
437 return __qdisc_dequeue_head(sch, &sch->q);
440 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
441 struct sk_buff_head *list)
443 struct sk_buff *skb = __skb_dequeue_tail(list);
445 if (likely(skb != NULL))
446 sch->qstats.backlog -= qdisc_pkt_len(skb);
448 return skb;
451 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
453 return __qdisc_dequeue_tail(sch, &sch->q);
456 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
458 return skb_peek(&sch->q);
461 /* generic pseudo peek method for non-work-conserving qdisc */
462 static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
464 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
465 if (!sch->gso_skb) {
466 sch->gso_skb = sch->dequeue(sch);
467 if (sch->gso_skb)
468 /* it's still part of the queue */
469 sch->q.qlen++;
472 return sch->gso_skb;
475 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
476 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
478 struct sk_buff *skb = sch->gso_skb;
480 if (skb) {
481 sch->gso_skb = NULL;
482 sch->q.qlen--;
483 } else {
484 skb = sch->dequeue(sch);
487 return skb;
490 static inline void __qdisc_reset_queue(struct Qdisc *sch,
491 struct sk_buff_head *list)
494 * We do not know the backlog in bytes of this list, it
495 * is up to the caller to correct it
497 __skb_queue_purge(list);
500 static inline void qdisc_reset_queue(struct Qdisc *sch)
502 __qdisc_reset_queue(sch, &sch->q);
503 sch->qstats.backlog = 0;
506 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
507 struct sk_buff_head *list)
509 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
511 if (likely(skb != NULL)) {
512 unsigned int len = qdisc_pkt_len(skb);
513 kfree_skb(skb);
514 return len;
517 return 0;
520 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
522 return __qdisc_queue_drop(sch, &sch->q);
525 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
527 kfree_skb(skb);
528 sch->qstats.drops++;
530 return NET_XMIT_DROP;
533 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
535 sch->qstats.drops++;
537 #ifdef CONFIG_NET_CLS_ACT
538 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
539 goto drop;
541 return NET_XMIT_SUCCESS;
543 drop:
544 #endif
545 kfree_skb(skb);
546 return NET_XMIT_DROP;
549 /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
550 long it will take to send a packet given its size.
552 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
554 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
555 if (slot < 0)
556 slot = 0;
557 slot >>= rtab->rate.cell_log;
558 if (slot > 255)
559 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]);
560 return rtab->data[slot];
563 #ifdef CONFIG_NET_CLS_ACT
564 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
566 struct sk_buff *n = skb_clone(skb, gfp_mask);
568 if (n) {
569 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
570 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
571 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
573 return n;
575 #endif
577 #endif