[PATCH] uml: add -ffreestanding to CFLAGS
[linux/fpc-iii.git] / include / net / sch_generic.h
blob7b6ec99867157d430a057b567718642f8581126e
1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
4 #include <linux/config.h>
5 #include <linux/netdevice.h>
6 #include <linux/types.h>
7 #include <linux/rcupdate.h>
8 #include <linux/module.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/pkt_sched.h>
11 #include <linux/pkt_cls.h>
12 #include <net/gen_stats.h>
14 struct Qdisc_ops;
15 struct qdisc_walker;
16 struct tcf_walker;
17 struct module;
19 struct qdisc_rate_table
21 struct tc_ratespec rate;
22 u32 data[256];
23 struct qdisc_rate_table *next;
24 int refcnt;
27 struct Qdisc
29 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
30 struct sk_buff * (*dequeue)(struct Qdisc *dev);
31 unsigned flags;
32 #define TCQ_F_BUILTIN 1
33 #define TCQ_F_THROTTLED 2
34 #define TCQ_F_INGRESS 4
35 int padded;
36 struct Qdisc_ops *ops;
37 u32 handle;
38 u32 parent;
39 atomic_t refcnt;
40 struct sk_buff_head q;
41 struct net_device *dev;
42 struct list_head list;
44 struct gnet_stats_basic bstats;
45 struct gnet_stats_queue qstats;
46 struct gnet_stats_rate_est rate_est;
47 spinlock_t *stats_lock;
48 struct rcu_head q_rcu;
49 int (*reshape_fail)(struct sk_buff *skb,
50 struct Qdisc *q);
52 /* This field is deprecated, but it is still used by CBQ
53 * and it will live until better solution will be invented.
55 struct Qdisc *__parent;
58 struct Qdisc_class_ops
60 /* Child qdisc manipulation */
61 int (*graft)(struct Qdisc *, unsigned long cl,
62 struct Qdisc *, struct Qdisc **);
63 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
65 /* Class manipulation routines */
66 unsigned long (*get)(struct Qdisc *, u32 classid);
67 void (*put)(struct Qdisc *, unsigned long);
68 int (*change)(struct Qdisc *, u32, u32,
69 struct rtattr **, unsigned long *);
70 int (*delete)(struct Qdisc *, unsigned long);
71 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
73 /* Filter manipulation */
74 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
75 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
76 u32 classid);
77 void (*unbind_tcf)(struct Qdisc *, unsigned long);
79 /* rtnetlink specific */
80 int (*dump)(struct Qdisc *, unsigned long,
81 struct sk_buff *skb, struct tcmsg*);
82 int (*dump_stats)(struct Qdisc *, unsigned long,
83 struct gnet_dump *);
86 struct Qdisc_ops
88 struct Qdisc_ops *next;
89 struct Qdisc_class_ops *cl_ops;
90 char id[IFNAMSIZ];
91 int priv_size;
93 int (*enqueue)(struct sk_buff *, struct Qdisc *);
94 struct sk_buff * (*dequeue)(struct Qdisc *);
95 int (*requeue)(struct sk_buff *, struct Qdisc *);
96 unsigned int (*drop)(struct Qdisc *);
98 int (*init)(struct Qdisc *, struct rtattr *arg);
99 void (*reset)(struct Qdisc *);
100 void (*destroy)(struct Qdisc *);
101 int (*change)(struct Qdisc *, struct rtattr *arg);
103 int (*dump)(struct Qdisc *, struct sk_buff *);
104 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
106 struct module *owner;
110 struct tcf_result
112 unsigned long class;
113 u32 classid;
116 struct tcf_proto_ops
118 struct tcf_proto_ops *next;
119 char kind[IFNAMSIZ];
121 int (*classify)(struct sk_buff*, struct tcf_proto*,
122 struct tcf_result *);
123 int (*init)(struct tcf_proto*);
124 void (*destroy)(struct tcf_proto*);
126 unsigned long (*get)(struct tcf_proto*, u32 handle);
127 void (*put)(struct tcf_proto*, unsigned long);
128 int (*change)(struct tcf_proto*, unsigned long,
129 u32 handle, struct rtattr **,
130 unsigned long *);
131 int (*delete)(struct tcf_proto*, unsigned long);
132 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
134 /* rtnetlink specific */
135 int (*dump)(struct tcf_proto*, unsigned long,
136 struct sk_buff *skb, struct tcmsg*);
138 struct module *owner;
141 struct tcf_proto
143 /* Fast access part */
144 struct tcf_proto *next;
145 void *root;
146 int (*classify)(struct sk_buff*, struct tcf_proto*,
147 struct tcf_result *);
148 u32 protocol;
150 /* All the rest */
151 u32 prio;
152 u32 classid;
153 struct Qdisc *q;
154 void *data;
155 struct tcf_proto_ops *ops;
159 extern void qdisc_lock_tree(struct net_device *dev);
160 extern void qdisc_unlock_tree(struct net_device *dev);
162 #define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
163 #define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
164 #define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
165 #define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
167 extern struct Qdisc noop_qdisc;
168 extern struct Qdisc_ops noop_qdisc_ops;
170 extern void dev_init_scheduler(struct net_device *dev);
171 extern void dev_shutdown(struct net_device *dev);
172 extern void dev_activate(struct net_device *dev);
173 extern void dev_deactivate(struct net_device *dev);
174 extern void qdisc_reset(struct Qdisc *qdisc);
175 extern void qdisc_destroy(struct Qdisc *qdisc);
176 extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops);
177 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
178 struct Qdisc_ops *ops);
180 static inline void
181 tcf_destroy(struct tcf_proto *tp)
183 tp->ops->destroy(tp);
184 module_put(tp->ops->owner);
185 kfree(tp);
188 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
189 struct sk_buff_head *list)
191 __skb_queue_tail(list, skb);
192 sch->qstats.backlog += skb->len;
193 sch->bstats.bytes += skb->len;
194 sch->bstats.packets++;
196 return NET_XMIT_SUCCESS;
199 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
201 return __qdisc_enqueue_tail(skb, sch, &sch->q);
204 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
205 struct sk_buff_head *list)
207 struct sk_buff *skb = __skb_dequeue(list);
209 if (likely(skb != NULL))
210 sch->qstats.backlog -= skb->len;
212 return skb;
215 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
217 return __qdisc_dequeue_head(sch, &sch->q);
220 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
221 struct sk_buff_head *list)
223 struct sk_buff *skb = __skb_dequeue_tail(list);
225 if (likely(skb != NULL))
226 sch->qstats.backlog -= skb->len;
228 return skb;
231 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
233 return __qdisc_dequeue_tail(sch, &sch->q);
236 static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
237 struct sk_buff_head *list)
239 __skb_queue_head(list, skb);
240 sch->qstats.backlog += skb->len;
241 sch->qstats.requeues++;
243 return NET_XMIT_SUCCESS;
246 static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
248 return __qdisc_requeue(skb, sch, &sch->q);
251 static inline void __qdisc_reset_queue(struct Qdisc *sch,
252 struct sk_buff_head *list)
255 * We do not know the backlog in bytes of this list, it
256 * is up to the caller to correct it
258 skb_queue_purge(list);
261 static inline void qdisc_reset_queue(struct Qdisc *sch)
263 __qdisc_reset_queue(sch, &sch->q);
264 sch->qstats.backlog = 0;
267 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
268 struct sk_buff_head *list)
270 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
272 if (likely(skb != NULL)) {
273 unsigned int len = skb->len;
274 kfree_skb(skb);
275 return len;
278 return 0;
281 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
283 return __qdisc_queue_drop(sch, &sch->q);
286 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
288 kfree_skb(skb);
289 sch->qstats.drops++;
291 return NET_XMIT_DROP;
294 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
296 sch->qstats.drops++;
298 #ifdef CONFIG_NET_CLS_POLICE
299 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
300 goto drop;
302 return NET_XMIT_SUCCESS;
304 drop:
305 #endif
306 kfree_skb(skb);
307 return NET_XMIT_DROP;
310 #endif