arch/um/kernel/irq.c: clean up some functions
[linux-2.6/next.git] / include / linux / pkt_sched.h
blobe5de421ac7b422dc2526837512183311446b0884
1 #ifndef __LINUX_PKT_SCHED_H
2 #define __LINUX_PKT_SCHED_H
4 /* Logical priority bands not depending on specific packet scheduler.
5 Every scheduler will map them to real traffic classes, if it has
6 no more precise mechanism to classify packets.
8 These numbers have no special meaning, though their coincidence
9 with obsolete IPv6 values is not occasional :-). New IPv6 drafts
10 preferred full anarchy inspired by diffserv group.
12 Note: TC_PRIO_BESTEFFORT does not mean that it is the most unhappy
13 class, actually, as rule it will be handled with more care than
14 filler or even bulk.
17 #define TC_PRIO_BESTEFFORT 0
18 #define TC_PRIO_FILLER 1
19 #define TC_PRIO_BULK 2
20 #define TC_PRIO_INTERACTIVE_BULK 4
21 #define TC_PRIO_INTERACTIVE 6
22 #define TC_PRIO_CONTROL 7
24 #define TC_PRIO_MAX 15
26 /* Generic queue statistics, available for all the elements.
27 Particular schedulers may have also their private records.
30 struct tc_stats
32 __u64 bytes; /* NUmber of enqueues bytes */
33 __u32 packets; /* Number of enqueued packets */
34 __u32 drops; /* Packets dropped because of lack of resources */
35 __u32 overlimits; /* Number of throttle events when this
36 * flow goes out of allocated bandwidth */
37 __u32 bps; /* Current flow byte rate */
38 __u32 pps; /* Current flow packet rate */
39 __u32 qlen;
40 __u32 backlog;
43 struct tc_estimator
45 signed char interval;
46 unsigned char ewma_log;
49 /* "Handles"
50 ---------
52 All the traffic control objects have 32bit identifiers, or "handles".
54 They can be considered as opaque numbers from user API viewpoint,
55 but actually they always consist of two fields: major and
56 minor numbers, which are interpreted by kernel specially,
57 that may be used by applications, though not recommended.
59 F.e. qdisc handles always have minor number equal to zero,
60 classes (or flows) have major equal to parent qdisc major, and
61 minor uniquely identifying class inside qdisc.
63 Macros to manipulate handles:
66 #define TC_H_MAJ_MASK (0xFFFF0000U)
67 #define TC_H_MIN_MASK (0x0000FFFFU)
68 #define TC_H_MAJ(h) ((h)&TC_H_MAJ_MASK)
69 #define TC_H_MIN(h) ((h)&TC_H_MIN_MASK)
70 #define TC_H_MAKE(maj,min) (((maj)&TC_H_MAJ_MASK)|((min)&TC_H_MIN_MASK))
72 #define TC_H_UNSPEC (0U)
73 #define TC_H_ROOT (0xFFFFFFFFU)
74 #define TC_H_INGRESS (0xFFFFFFF1U)
76 struct tc_ratespec
78 unsigned char cell_log;
79 unsigned char __reserved;
80 unsigned short overhead;
81 short cell_align;
82 unsigned short mpu;
83 __u32 rate;
86 #define TC_RTAB_SIZE 1024
88 struct tc_sizespec {
89 unsigned char cell_log;
90 unsigned char size_log;
91 short cell_align;
92 int overhead;
93 unsigned int linklayer;
94 unsigned int mpu;
95 unsigned int mtu;
96 unsigned int tsize;
99 enum {
100 TCA_STAB_UNSPEC,
101 TCA_STAB_BASE,
102 TCA_STAB_DATA,
103 __TCA_STAB_MAX
106 #define TCA_STAB_MAX (__TCA_STAB_MAX - 1)
108 /* FIFO section */
110 struct tc_fifo_qopt
112 __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
115 /* PRIO section */
117 #define TCQ_PRIO_BANDS 16
118 #define TCQ_MIN_PRIO_BANDS 2
120 struct tc_prio_qopt
122 int bands; /* Number of bands */
123 __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */
126 /* TBF section */
128 struct tc_tbf_qopt
130 struct tc_ratespec rate;
131 struct tc_ratespec peakrate;
132 __u32 limit;
133 __u32 buffer;
134 __u32 mtu;
137 enum
139 TCA_TBF_UNSPEC,
140 TCA_TBF_PARMS,
141 TCA_TBF_RTAB,
142 TCA_TBF_PTAB,
143 __TCA_TBF_MAX,
146 #define TCA_TBF_MAX (__TCA_TBF_MAX - 1)
149 /* TEQL section */
151 /* TEQL does not require any parameters */
153 /* SFQ section */
155 struct tc_sfq_qopt
157 unsigned quantum; /* Bytes per round allocated to flow */
158 int perturb_period; /* Period of hash perturbation */
159 __u32 limit; /* Maximal packets in queue */
160 unsigned divisor; /* Hash divisor */
161 unsigned flows; /* Maximal number of flows */
164 struct tc_sfq_xstats
166 __s32 allot;
170 * NOTE: limit, divisor and flows are hardwired to code at the moment.
172 * limit=flows=128, divisor=1024;
174 * The only reason for this is efficiency, it is possible
175 * to change these parameters in compile time.
178 /* RED section */
180 enum
182 TCA_RED_UNSPEC,
183 TCA_RED_PARMS,
184 TCA_RED_STAB,
185 __TCA_RED_MAX,
188 #define TCA_RED_MAX (__TCA_RED_MAX - 1)
190 struct tc_red_qopt
192 __u32 limit; /* HARD maximal queue length (bytes) */
193 __u32 qth_min; /* Min average length threshold (bytes) */
194 __u32 qth_max; /* Max average length threshold (bytes) */
195 unsigned char Wlog; /* log(W) */
196 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
197 unsigned char Scell_log; /* cell size for idle damping */
198 unsigned char flags;
199 #define TC_RED_ECN 1
200 #define TC_RED_HARDDROP 2
203 struct tc_red_xstats
205 __u32 early; /* Early drops */
206 __u32 pdrop; /* Drops due to queue limits */
207 __u32 other; /* Drops due to drop() calls */
208 __u32 marked; /* Marked packets */
211 /* GRED section */
213 #define MAX_DPs 16
215 enum
217 TCA_GRED_UNSPEC,
218 TCA_GRED_PARMS,
219 TCA_GRED_STAB,
220 TCA_GRED_DPS,
221 __TCA_GRED_MAX,
224 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
226 struct tc_gred_qopt
228 __u32 limit; /* HARD maximal queue length (bytes) */
229 __u32 qth_min; /* Min average length threshold (bytes) */
230 __u32 qth_max; /* Max average length threshold (bytes) */
231 __u32 DP; /* upto 2^32 DPs */
232 __u32 backlog;
233 __u32 qave;
234 __u32 forced;
235 __u32 early;
236 __u32 other;
237 __u32 pdrop;
238 __u8 Wlog; /* log(W) */
239 __u8 Plog; /* log(P_max/(qth_max-qth_min)) */
240 __u8 Scell_log; /* cell size for idle damping */
241 __u8 prio; /* prio of this VQ */
242 __u32 packets;
243 __u32 bytesin;
246 /* gred setup */
247 struct tc_gred_sopt
249 __u32 DPs;
250 __u32 def_DP;
251 __u8 grio;
252 __u8 flags;
253 __u16 pad1;
256 /* HTB section */
257 #define TC_HTB_NUMPRIO 8
258 #define TC_HTB_MAXDEPTH 8
259 #define TC_HTB_PROTOVER 3 /* the same as HTB and TC's major */
261 struct tc_htb_opt
263 struct tc_ratespec rate;
264 struct tc_ratespec ceil;
265 __u32 buffer;
266 __u32 cbuffer;
267 __u32 quantum;
268 __u32 level; /* out only */
269 __u32 prio;
271 struct tc_htb_glob
273 __u32 version; /* to match HTB/TC */
274 __u32 rate2quantum; /* bps->quantum divisor */
275 __u32 defcls; /* default class number */
276 __u32 debug; /* debug flags */
278 /* stats */
279 __u32 direct_pkts; /* count of non shapped packets */
281 enum
283 TCA_HTB_UNSPEC,
284 TCA_HTB_PARMS,
285 TCA_HTB_INIT,
286 TCA_HTB_CTAB,
287 TCA_HTB_RTAB,
288 __TCA_HTB_MAX,
291 #define TCA_HTB_MAX (__TCA_HTB_MAX - 1)
293 struct tc_htb_xstats
295 __u32 lends;
296 __u32 borrows;
297 __u32 giants; /* too big packets (rate will not be accurate) */
298 __u32 tokens;
299 __u32 ctokens;
302 /* HFSC section */
304 struct tc_hfsc_qopt
306 __u16 defcls; /* default class */
309 struct tc_service_curve
311 __u32 m1; /* slope of the first segment in bps */
312 __u32 d; /* x-projection of the first segment in us */
313 __u32 m2; /* slope of the second segment in bps */
316 struct tc_hfsc_stats
318 __u64 work; /* total work done */
319 __u64 rtwork; /* work done by real-time criteria */
320 __u32 period; /* current period */
321 __u32 level; /* class level in hierarchy */
324 enum
326 TCA_HFSC_UNSPEC,
327 TCA_HFSC_RSC,
328 TCA_HFSC_FSC,
329 TCA_HFSC_USC,
330 __TCA_HFSC_MAX,
333 #define TCA_HFSC_MAX (__TCA_HFSC_MAX - 1)
336 /* CBQ section */
338 #define TC_CBQ_MAXPRIO 8
339 #define TC_CBQ_MAXLEVEL 8
340 #define TC_CBQ_DEF_EWMA 5
342 struct tc_cbq_lssopt
344 unsigned char change;
345 unsigned char flags;
346 #define TCF_CBQ_LSS_BOUNDED 1
347 #define TCF_CBQ_LSS_ISOLATED 2
348 unsigned char ewma_log;
349 unsigned char level;
350 #define TCF_CBQ_LSS_FLAGS 1
351 #define TCF_CBQ_LSS_EWMA 2
352 #define TCF_CBQ_LSS_MAXIDLE 4
353 #define TCF_CBQ_LSS_MINIDLE 8
354 #define TCF_CBQ_LSS_OFFTIME 0x10
355 #define TCF_CBQ_LSS_AVPKT 0x20
356 __u32 maxidle;
357 __u32 minidle;
358 __u32 offtime;
359 __u32 avpkt;
362 struct tc_cbq_wrropt
364 unsigned char flags;
365 unsigned char priority;
366 unsigned char cpriority;
367 unsigned char __reserved;
368 __u32 allot;
369 __u32 weight;
372 struct tc_cbq_ovl
374 unsigned char strategy;
375 #define TC_CBQ_OVL_CLASSIC 0
376 #define TC_CBQ_OVL_DELAY 1
377 #define TC_CBQ_OVL_LOWPRIO 2
378 #define TC_CBQ_OVL_DROP 3
379 #define TC_CBQ_OVL_RCLASSIC 4
380 unsigned char priority2;
381 __u16 pad;
382 __u32 penalty;
385 struct tc_cbq_police
387 unsigned char police;
388 unsigned char __res1;
389 unsigned short __res2;
392 struct tc_cbq_fopt
394 __u32 split;
395 __u32 defmap;
396 __u32 defchange;
399 struct tc_cbq_xstats
401 __u32 borrows;
402 __u32 overactions;
403 __s32 avgidle;
404 __s32 undertime;
407 enum
409 TCA_CBQ_UNSPEC,
410 TCA_CBQ_LSSOPT,
411 TCA_CBQ_WRROPT,
412 TCA_CBQ_FOPT,
413 TCA_CBQ_OVL_STRATEGY,
414 TCA_CBQ_RATE,
415 TCA_CBQ_RTAB,
416 TCA_CBQ_POLICE,
417 __TCA_CBQ_MAX,
420 #define TCA_CBQ_MAX (__TCA_CBQ_MAX - 1)
422 /* dsmark section */
424 enum {
425 TCA_DSMARK_UNSPEC,
426 TCA_DSMARK_INDICES,
427 TCA_DSMARK_DEFAULT_INDEX,
428 TCA_DSMARK_SET_TC_INDEX,
429 TCA_DSMARK_MASK,
430 TCA_DSMARK_VALUE,
431 __TCA_DSMARK_MAX,
434 #define TCA_DSMARK_MAX (__TCA_DSMARK_MAX - 1)
436 /* ATM section */
438 enum {
439 TCA_ATM_UNSPEC,
440 TCA_ATM_FD, /* file/socket descriptor */
441 TCA_ATM_PTR, /* pointer to descriptor - later */
442 TCA_ATM_HDR, /* LL header */
443 TCA_ATM_EXCESS, /* excess traffic class (0 for CLP) */
444 TCA_ATM_ADDR, /* PVC address (for output only) */
445 TCA_ATM_STATE, /* VC state (ATM_VS_*; for output only) */
446 __TCA_ATM_MAX,
449 #define TCA_ATM_MAX (__TCA_ATM_MAX - 1)
451 /* Network emulator */
453 enum
455 TCA_NETEM_UNSPEC,
456 TCA_NETEM_CORR,
457 TCA_NETEM_DELAY_DIST,
458 TCA_NETEM_REORDER,
459 TCA_NETEM_CORRUPT,
460 __TCA_NETEM_MAX,
463 #define TCA_NETEM_MAX (__TCA_NETEM_MAX - 1)
465 struct tc_netem_qopt
467 __u32 latency; /* added delay (us) */
468 __u32 limit; /* fifo limit (packets) */
469 __u32 loss; /* random packet loss (0=none ~0=100%) */
470 __u32 gap; /* re-ordering gap (0 for none) */
471 __u32 duplicate; /* random packet dup (0=none ~0=100%) */
472 __u32 jitter; /* random jitter in latency (us) */
475 struct tc_netem_corr
477 __u32 delay_corr; /* delay correlation */
478 __u32 loss_corr; /* packet loss correlation */
479 __u32 dup_corr; /* duplicate correlation */
482 struct tc_netem_reorder
484 __u32 probability;
485 __u32 correlation;
488 struct tc_netem_corrupt
490 __u32 probability;
491 __u32 correlation;
494 #define NETEM_DIST_SCALE 8192
496 #endif