[PATCH] w1: Adds a sysfs entry (w1_master_search) that allows you to disable/enable...
[linux-2.6/verdex.git] / include / net / sch_generic.h
blob7b97405e2dbf8054ede8f52d44274804d58b1d87
1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
4 #include <linux/config.h>
5 #include <linux/netdevice.h>
6 #include <linux/types.h>
7 #include <linux/rcupdate.h>
8 #include <linux/module.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/pkt_sched.h>
11 #include <linux/pkt_cls.h>
12 #include <net/gen_stats.h>
14 struct Qdisc_ops;
15 struct qdisc_walker;
16 struct tcf_walker;
17 struct module;
19 struct qdisc_rate_table
21 struct tc_ratespec rate;
22 u32 data[256];
23 struct qdisc_rate_table *next;
24 int refcnt;
27 struct Qdisc
29 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
30 struct sk_buff * (*dequeue)(struct Qdisc *dev);
31 unsigned flags;
32 #define TCQ_F_BUILTIN 1
33 #define TCQ_F_THROTTLED 2
34 #define TCQ_F_INGRESS 4
35 int padded;
36 struct Qdisc_ops *ops;
37 u32 handle;
38 u32 parent;
39 atomic_t refcnt;
40 struct sk_buff_head q;
41 struct net_device *dev;
42 struct list_head list;
44 struct gnet_stats_basic bstats;
45 struct gnet_stats_queue qstats;
46 struct gnet_stats_rate_est rate_est;
47 spinlock_t *stats_lock;
48 struct rcu_head q_rcu;
49 int (*reshape_fail)(struct sk_buff *skb,
50 struct Qdisc *q);
52 /* This field is deprecated, but it is still used by CBQ
53 * and it will live until better solution will be invented.
55 struct Qdisc *__parent;
58 struct Qdisc_class_ops
60 /* Child qdisc manipulation */
61 int (*graft)(struct Qdisc *, unsigned long cl,
62 struct Qdisc *, struct Qdisc **);
63 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
65 /* Class manipulation routines */
66 unsigned long (*get)(struct Qdisc *, u32 classid);
67 void (*put)(struct Qdisc *, unsigned long);
68 int (*change)(struct Qdisc *, u32, u32,
69 struct rtattr **, unsigned long *);
70 int (*delete)(struct Qdisc *, unsigned long);
71 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
73 /* Filter manipulation */
74 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
75 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
76 u32 classid);
77 void (*unbind_tcf)(struct Qdisc *, unsigned long);
79 /* rtnetlink specific */
80 int (*dump)(struct Qdisc *, unsigned long,
81 struct sk_buff *skb, struct tcmsg*);
82 int (*dump_stats)(struct Qdisc *, unsigned long,
83 struct gnet_dump *);
86 struct Qdisc_ops
88 struct Qdisc_ops *next;
89 struct Qdisc_class_ops *cl_ops;
90 char id[IFNAMSIZ];
91 int priv_size;
93 int (*enqueue)(struct sk_buff *, struct Qdisc *);
94 struct sk_buff * (*dequeue)(struct Qdisc *);
95 int (*requeue)(struct sk_buff *, struct Qdisc *);
96 unsigned int (*drop)(struct Qdisc *);
98 int (*init)(struct Qdisc *, struct rtattr *arg);
99 void (*reset)(struct Qdisc *);
100 void (*destroy)(struct Qdisc *);
101 int (*change)(struct Qdisc *, struct rtattr *arg);
103 int (*dump)(struct Qdisc *, struct sk_buff *);
104 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
106 struct module *owner;
110 struct tcf_result
112 unsigned long class;
113 u32 classid;
116 struct tcf_proto_ops
118 struct tcf_proto_ops *next;
119 char kind[IFNAMSIZ];
121 int (*classify)(struct sk_buff*, struct tcf_proto*,
122 struct tcf_result *);
123 int (*init)(struct tcf_proto*);
124 void (*destroy)(struct tcf_proto*);
126 unsigned long (*get)(struct tcf_proto*, u32 handle);
127 void (*put)(struct tcf_proto*, unsigned long);
128 int (*change)(struct tcf_proto*, unsigned long,
129 u32 handle, struct rtattr **,
130 unsigned long *);
131 int (*delete)(struct tcf_proto*, unsigned long);
132 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
134 /* rtnetlink specific */
135 int (*dump)(struct tcf_proto*, unsigned long,
136 struct sk_buff *skb, struct tcmsg*);
138 struct module *owner;
141 struct tcf_proto
143 /* Fast access part */
144 struct tcf_proto *next;
145 void *root;
146 int (*classify)(struct sk_buff*, struct tcf_proto*,
147 struct tcf_result *);
148 u32 protocol;
150 /* All the rest */
151 u32 prio;
152 u32 classid;
153 struct Qdisc *q;
154 void *data;
155 struct tcf_proto_ops *ops;
159 extern void qdisc_lock_tree(struct net_device *dev);
160 extern void qdisc_unlock_tree(struct net_device *dev);
162 #define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
163 #define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
164 #define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
165 #define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
167 static inline void
168 tcf_destroy(struct tcf_proto *tp)
170 tp->ops->destroy(tp);
171 module_put(tp->ops->owner);
172 kfree(tp);
175 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
176 struct sk_buff_head *list)
178 __skb_queue_tail(list, skb);
179 sch->qstats.backlog += skb->len;
180 sch->bstats.bytes += skb->len;
181 sch->bstats.packets++;
183 return NET_XMIT_SUCCESS;
186 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
188 return __qdisc_enqueue_tail(skb, sch, &sch->q);
191 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
192 struct sk_buff_head *list)
194 struct sk_buff *skb = __skb_dequeue(list);
196 if (likely(skb != NULL))
197 sch->qstats.backlog -= skb->len;
199 return skb;
202 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
204 return __qdisc_dequeue_head(sch, &sch->q);
207 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
208 struct sk_buff_head *list)
210 struct sk_buff *skb = __skb_dequeue_tail(list);
212 if (likely(skb != NULL))
213 sch->qstats.backlog -= skb->len;
215 return skb;
218 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
220 return __qdisc_dequeue_tail(sch, &sch->q);
223 static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
224 struct sk_buff_head *list)
226 __skb_queue_head(list, skb);
227 sch->qstats.backlog += skb->len;
228 sch->qstats.requeues++;
230 return NET_XMIT_SUCCESS;
233 static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
235 return __qdisc_requeue(skb, sch, &sch->q);
238 static inline void __qdisc_reset_queue(struct Qdisc *sch,
239 struct sk_buff_head *list)
242 * We do not know the backlog in bytes of this list, it
243 * is up to the caller to correct it
245 skb_queue_purge(list);
248 static inline void qdisc_reset_queue(struct Qdisc *sch)
250 __qdisc_reset_queue(sch, &sch->q);
251 sch->qstats.backlog = 0;
254 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
255 struct sk_buff_head *list)
257 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
259 if (likely(skb != NULL)) {
260 unsigned int len = skb->len;
261 kfree_skb(skb);
262 return len;
265 return 0;
268 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
270 return __qdisc_queue_drop(sch, &sch->q);
273 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
275 kfree_skb(skb);
276 sch->qstats.drops++;
278 return NET_XMIT_DROP;
281 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
283 sch->qstats.drops++;
285 #ifdef CONFIG_NET_CLS_POLICE
286 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
287 goto drop;
289 return NET_XMIT_SUCCESS;
291 drop:
292 #endif
293 kfree_skb(skb);
294 return NET_XMIT_DROP;
297 #endif