1 /* $NetBSD: altq_rio.c,v 1.20 2009/03/18 17:06:41 cegger Exp $ */
2 /* $KAME: altq_rio.c,v 1.19 2005/04/13 03:44:25 suz Exp $ */
5 * Copyright (C) 1998-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the Computer Systems
44 * Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 * to endorse or promote products derived from this software without
47 * specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: altq_rio.c,v 1.20 2009/03/18 17:06:41 cegger Exp $");
71 #ifdef ALTQ_RIO /* rio is enabled by ALTQ_RIO option in opt_altq.h */
73 #include <sys/param.h>
74 #include <sys/malloc.h>
76 #include <sys/socket.h>
77 #include <sys/systm.h>
78 #include <sys/errno.h>
79 #include <sys/kauth.h>
80 #if 1 /* ALTQ3_COMPAT */
82 #include <sys/sockio.h>
83 #include <sys/kernel.h>
88 #include <netinet/in.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/ip.h>
92 #include <netinet/ip6.h>
96 #include <net/pfvar.h>
98 #include <altq/altq.h>
99 #include <altq/altq_cdnr.h>
100 #include <altq/altq_red.h>
101 #include <altq/altq_rio.h>
103 #include <altq/altq_conf.h>
107 * RIO: RED with IN/OUT bit
109 * "Explicit Allocation of Best Effort Packet Delivery Service"
110 * David D. Clark and Wenjia Fang, MIT Lab for Computer Science
111 * http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
113 * this implementation is extended to support more than 2 drop precedence
114 * values as described in RFC2597 (Assured Forwarding PHB Group).
118 * AF DS (differentiated service) codepoints.
119 * (classes can be mapped to CBQ or H-FSC classes.)
122 * +---+---+---+---+---+---+---+---+
123 * | CLASS |DropPre| 0 | CU |
124 * +---+---+---+---+---+---+---+---+
132 * medium drop prec: 10
136 /* normal red parameters */
137 #define W_WEIGHT 512 /* inverse of weight of EWMA (511/512) */
138 /* q_weight = 0.00195 */
140 /* red parameters for a slow link */
141 #define W_WEIGHT_1 128 /* inverse of weight of EWMA (127/128) */
142 /* q_weight = 0.0078125 */
144 /* red parameters for a very slow link (e.g., dialup) */
145 #define W_WEIGHT_2 64 /* inverse of weight of EWMA (63/64) */
146 /* q_weight = 0.015625 */
148 /* fixed-point uses 12-bit decimal places */
149 #define FP_SHIFT 12 /* fixed-point shift */
151 /* red parameters for drop probability */
152 #define INV_P_MAX 10 /* inverse of max drop probability */
153 #define TH_MIN 5 /* min threshold */
154 #define TH_MAX 15 /* max threshold */
156 #define RIO_LIMIT 60 /* default max queue lenght */
157 #define RIO_STATS /* collect statistics */
159 #define TV_DELTA(a, b, delta) { \
162 delta = (a)->tv_usec - (b)->tv_usec; \
163 if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { \
166 } else if (xxs > 4) { \
170 delta += xxs * 1000000; \
171 } else while (xxs > 0) { \
179 /* rio_list keeps all rio_queue_t's allocated. */
180 static rio_queue_t
*rio_list
= NULL
;
182 /* default rio parameter values */
183 static struct redparams default_rio_params
[RIO_NDROPPREC
] = {
184 /* th_min, th_max, inv_pmax */
185 { TH_MAX
* 2 + TH_MIN
, TH_MAX
* 3, INV_P_MAX
}, /* low drop precedence */
186 { TH_MAX
+ TH_MIN
, TH_MAX
* 2, INV_P_MAX
}, /* medium drop precedence */
187 { TH_MIN
, TH_MAX
, INV_P_MAX
} /* high drop precedence */
190 /* internal function prototypes */
191 static int dscp2index(u_int8_t
);
193 static int rio_enqueue(struct ifaltq
*, struct mbuf
*, struct altq_pktattr
*);
194 static struct mbuf
*rio_dequeue(struct ifaltq
*, int);
195 static int rio_request(struct ifaltq
*, int, void *);
196 static int rio_detach(rio_queue_t
*);
199 * rio device interface
203 #endif /* ALTQ3_COMPAT */
206 rio_alloc(int weight
, struct redparams
*params
, int flags
, int pkttime
)
212 rp
= malloc(sizeof(rio_t
), M_DEVBUF
, M_WAITOK
|M_ZERO
);
216 rp
->rio_flags
= flags
;
218 /* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
219 rp
->rio_pkttime
= 800;
221 rp
->rio_pkttime
= pkttime
;
224 rp
->rio_weight
= weight
;
227 rp
->rio_weight
= W_WEIGHT
;
229 /* when the link is very slow, adjust red parameters */
230 npkts_per_sec
= 1000000 / rp
->rio_pkttime
;
231 if (npkts_per_sec
< 50) {
232 /* up to about 400Kbps */
233 rp
->rio_weight
= W_WEIGHT_2
;
234 } else if (npkts_per_sec
< 300) {
235 /* up to about 2.4Mbps */
236 rp
->rio_weight
= W_WEIGHT_1
;
240 /* calculate wshift. weight must be power of 2 */
242 for (i
= 0; w
> 1; i
++)
245 w
= 1 << rp
->rio_wshift
;
246 if (w
!= rp
->rio_weight
) {
247 printf("invalid weight value %d for red! use %d\n",
252 /* allocate weight table */
253 rp
->rio_wtab
= wtab_alloc(rp
->rio_weight
);
255 for (i
= 0; i
< RIO_NDROPPREC
; i
++) {
256 struct dropprec_state
*prec
= &rp
->rio_precstate
[i
];
261 if (params
== NULL
|| params
[i
].inv_pmax
== 0)
262 prec
->inv_pmax
= default_rio_params
[i
].inv_pmax
;
264 prec
->inv_pmax
= params
[i
].inv_pmax
;
265 if (params
== NULL
|| params
[i
].th_min
== 0)
266 prec
->th_min
= default_rio_params
[i
].th_min
;
268 prec
->th_min
= params
[i
].th_min
;
269 if (params
== NULL
|| params
[i
].th_max
== 0)
270 prec
->th_max
= default_rio_params
[i
].th_max
;
272 prec
->th_max
= params
[i
].th_max
;
275 * th_min_s and th_max_s are scaled versions of th_min
276 * and th_max to be compared with avg.
278 prec
->th_min_s
= prec
->th_min
<< (rp
->rio_wshift
+ FP_SHIFT
);
279 prec
->th_max_s
= prec
->th_max
<< (rp
->rio_wshift
+ FP_SHIFT
);
282 * precompute probability denominator
283 * probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
285 prec
->probd
= (2 * (prec
->th_max
- prec
->th_min
)
286 * prec
->inv_pmax
) << FP_SHIFT
;
288 microtime(&prec
->last
);
295 rio_destroy(rio_t
*rp
)
297 wtab_destroy(rp
->rio_wtab
);
302 rio_getstats(rio_t
*rp
, struct redstats
*sp
)
306 for (i
= 0; i
< RIO_NDROPPREC
; i
++) {
307 memcpy(sp
, &rp
->q_stats
[i
], sizeof(struct redstats
));
308 sp
->q_avg
= rp
->rio_precstate
[i
].avg
>> rp
->rio_wshift
;
313 #if (RIO_NDROPPREC == 3)
315 * internally, a drop precedence value is converted to an index
319 dscp2index(u_int8_t dscp
)
321 int dpindex
= dscp
& AF_DROPPRECMASK
;
325 return ((dpindex
>> 3) - 1);
331 * kludge: when a packet is dequeued, we need to know its drop precedence
332 * in order to keep the queue length of each drop precedence.
333 * use m_pkthdr.rcvif to pass this info.
335 #define RIOM_SET_PRECINDEX(m, idx) \
336 do { (m)->m_pkthdr.rcvif = (struct ifnet *)((long)(idx)); } while (0)
337 #define RIOM_GET_PRECINDEX(m) \
338 ({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
339 (m)->m_pkthdr.rcvif = NULL; idx; })
343 rio_addq(rio_t
*rp
, class_queue_t
*q
, struct mbuf
*m
,
344 struct altq_pktattr
*pktattr
)
347 u_int8_t dsfield
, odsfield
;
348 int dpindex
, i
, n
, t
;
350 struct dropprec_state
*prec
;
352 dsfield
= odsfield
= read_dsfield(m
, pktattr
);
353 dpindex
= dscp2index(dsfield
);
356 * update avg of the precedence states whose drop precedence
357 * is larger than or equal to the drop precedence of the packet
360 for (i
= dpindex
; i
< RIO_NDROPPREC
; i
++) {
361 prec
= &rp
->rio_precstate
[i
];
367 t
= (now
.tv_sec
- prec
->last
.tv_sec
);
372 (now
.tv_usec
- prec
->last
.tv_usec
);
373 n
= t
/ rp
->rio_pkttime
;
374 /* calculate (avg = (1 - Wq)^n * avg) */
376 avg
= (avg
>> FP_SHIFT
) *
377 pow_w(rp
->rio_wtab
, n
);
381 /* run estimator. (avg is scaled by WEIGHT in fixed-point) */
382 avg
+= (prec
->qlen
<< FP_SHIFT
) - (avg
>> rp
->rio_wshift
);
383 prec
->avg
= avg
; /* save the new value */
385 * count keeps a tally of arriving traffic that has not
391 prec
= &rp
->rio_precstate
[dpindex
];
394 /* see if we drop early */
395 droptype
= DTYPE_NODROP
;
396 if (avg
>= prec
->th_min_s
&& prec
->qlen
> 1) {
397 if (avg
>= prec
->th_max_s
) {
398 /* avg >= th_max: forced drop */
399 droptype
= DTYPE_FORCED
;
400 } else if (prec
->old
== 0) {
401 /* first exceeds th_min */
404 } else if (drop_early((avg
- prec
->th_min_s
) >> rp
->rio_wshift
,
405 prec
->probd
, prec
->count
)) {
406 /* unforced drop by red */
407 droptype
= DTYPE_EARLY
;
415 * if the queue length hits the hard limit, it's a forced drop.
417 if (droptype
== DTYPE_NODROP
&& qlen(q
) >= qlimit(q
))
418 droptype
= DTYPE_FORCED
;
420 if (droptype
!= DTYPE_NODROP
) {
421 /* always drop incoming packet (as opposed to randomdrop) */
422 for (i
= dpindex
; i
< RIO_NDROPPREC
; i
++)
423 rp
->rio_precstate
[i
].count
= 0;
425 if (droptype
== DTYPE_EARLY
)
426 rp
->q_stats
[dpindex
].drop_unforced
++;
428 rp
->q_stats
[dpindex
].drop_forced
++;
429 PKTCNTR_ADD(&rp
->q_stats
[dpindex
].drop_cnt
, m_pktlen(m
));
435 for (i
= dpindex
; i
< RIO_NDROPPREC
; i
++)
436 rp
->rio_precstate
[i
].qlen
++;
438 /* save drop precedence index in mbuf hdr */
439 RIOM_SET_PRECINDEX(m
, dpindex
);
441 if (rp
->rio_flags
& RIOF_CLEARDSCP
)
442 dsfield
&= ~DSCP_MASK
;
444 if (dsfield
!= odsfield
)
445 write_dsfield(m
, pktattr
, dsfield
);
450 PKTCNTR_ADD(&rp
->q_stats
[dpindex
].xmit_cnt
, m_pktlen(m
));
456 rio_getq(rio_t
*rp
, class_queue_t
*q
)
461 if ((m
= _getq(q
)) == NULL
)
464 dpindex
= RIOM_GET_PRECINDEX(m
);
465 for (i
= dpindex
; i
< RIO_NDROPPREC
; i
++) {
466 if (--rp
->rio_precstate
[i
].qlen
== 0) {
467 if (rp
->rio_precstate
[i
].idle
== 0) {
468 rp
->rio_precstate
[i
].idle
= 1;
469 microtime(&rp
->rio_precstate
[i
].last
);
478 rioopen(dev_t dev
, int flag
, int fmt
,
481 /* everything will be done when the queueing scheme is attached. */
486 rioclose(dev_t dev
, int flag
, int fmt
,
492 while ((rqp
= rio_list
) != NULL
) {
494 err
= rio_detach(rqp
);
495 if (err
!= 0 && error
== 0)
503 rioioctl(dev_t dev
, ioctlcmd_t cmd
, void *addr
, int flag
,
507 struct rio_interface
*ifacep
;
511 /* check super-user privilege */
516 #if (__FreeBSD_version > 400000)
517 if ((error
= suser(p
)) != 0)
520 if ((error
= kauth_authorize_network(l
->l_cred
,
521 KAUTH_NETWORK_ALTQ
, KAUTH_REQ_NETWORK_ALTQ_RIO
, NULL
,
531 ifacep
= (struct rio_interface
*)addr
;
532 if ((rqp
= altq_lookup(ifacep
->rio_ifname
, ALTQT_RIO
)) == NULL
) {
536 error
= altq_enable(rqp
->rq_ifq
);
540 ifacep
= (struct rio_interface
*)addr
;
541 if ((rqp
= altq_lookup(ifacep
->rio_ifname
, ALTQT_RIO
)) == NULL
) {
545 error
= altq_disable(rqp
->rq_ifq
);
549 ifp
= ifunit(((struct rio_interface
*)addr
)->rio_ifname
);
555 /* allocate and initialize rio_queue_t */
556 rqp
= malloc(sizeof(rio_queue_t
), M_DEVBUF
, M_WAITOK
|M_ZERO
);
562 rqp
->rq_q
= malloc(sizeof(class_queue_t
), M_DEVBUF
,
564 if (rqp
->rq_q
== NULL
) {
570 rqp
->rq_rio
= rio_alloc(0, NULL
, 0, 0);
571 if (rqp
->rq_rio
== NULL
) {
572 free(rqp
->rq_q
, M_DEVBUF
);
578 rqp
->rq_ifq
= &ifp
->if_snd
;
579 qtail(rqp
->rq_q
) = NULL
;
581 qlimit(rqp
->rq_q
) = RIO_LIMIT
;
582 qtype(rqp
->rq_q
) = Q_RIO
;
585 * set RIO to this ifnet structure.
587 error
= altq_attach(rqp
->rq_ifq
, ALTQT_RIO
, rqp
,
588 rio_enqueue
, rio_dequeue
, rio_request
,
591 rio_destroy(rqp
->rq_rio
);
592 free(rqp
->rq_q
, M_DEVBUF
);
597 /* add this state to the rio list */
598 rqp
->rq_next
= rio_list
;
603 ifacep
= (struct rio_interface
*)addr
;
604 if ((rqp
= altq_lookup(ifacep
->rio_ifname
, ALTQT_RIO
)) == NULL
) {
608 error
= rio_detach(rqp
);
613 struct rio_stats
*q_stats
;
617 q_stats
= (struct rio_stats
*)addr
;
618 if ((rqp
= altq_lookup(q_stats
->iface
.rio_ifname
,
619 ALTQT_RIO
)) == NULL
) {
626 q_stats
->q_limit
= qlimit(rqp
->rq_q
);
627 q_stats
->weight
= rp
->rio_weight
;
628 q_stats
->flags
= rp
->rio_flags
;
630 for (i
= 0; i
< RIO_NDROPPREC
; i
++) {
631 q_stats
->q_len
[i
] = rp
->rio_precstate
[i
].qlen
;
632 memcpy(&q_stats
->q_stats
[i
], &rp
->q_stats
[i
],
633 sizeof(struct redstats
));
634 q_stats
->q_stats
[i
].q_avg
=
635 rp
->rio_precstate
[i
].avg
>> rp
->rio_wshift
;
637 q_stats
->q_params
[i
].inv_pmax
638 = rp
->rio_precstate
[i
].inv_pmax
;
639 q_stats
->q_params
[i
].th_min
640 = rp
->rio_precstate
[i
].th_min
;
641 q_stats
->q_params
[i
].th_max
642 = rp
->rio_precstate
[i
].th_max
;
644 } while (/*CONSTCOND*/ 0);
653 fc
= (struct rio_conf
*)addr
;
654 if ((rqp
= altq_lookup(fc
->iface
.rio_ifname
,
655 ALTQT_RIO
)) == NULL
) {
660 new = rio_alloc(fc
->rio_weight
, &fc
->q_params
[0],
661 fc
->rio_flags
, fc
->rio_pkttime
);
669 limit
= fc
->rio_limit
;
670 if (limit
< fc
->q_params
[RIO_NDROPPREC
-1].th_max
)
671 limit
= fc
->q_params
[RIO_NDROPPREC
-1].th_max
;
672 qlimit(rqp
->rq_q
) = limit
;
674 rio_destroy(rqp
->rq_rio
);
679 /* write back new values */
680 fc
->rio_limit
= limit
;
681 for (i
= 0; i
< RIO_NDROPPREC
; i
++) {
682 fc
->q_params
[i
].inv_pmax
=
683 rqp
->rq_rio
->rio_precstate
[i
].inv_pmax
;
684 fc
->q_params
[i
].th_min
=
685 rqp
->rq_rio
->rio_precstate
[i
].th_min
;
686 fc
->q_params
[i
].th_max
=
687 rqp
->rq_rio
->rio_precstate
[i
].th_max
;
689 } while (/*CONSTCOND*/ 0);
692 case RIO_SETDEFAULTS
:
694 struct redparams
*rp
;
697 rp
= (struct redparams
*)addr
;
698 for (i
= 0; i
< RIO_NDROPPREC
; i
++)
699 default_rio_params
[i
] = rp
[i
];
700 } while (/*CONSTCOND*/ 0);
712 rio_detach(rio_queue_t
*rqp
)
717 if (ALTQ_IS_ENABLED(rqp
->rq_ifq
))
718 altq_disable(rqp
->rq_ifq
);
720 if ((error
= altq_detach(rqp
->rq_ifq
)))
724 rio_list
= rqp
->rq_next
;
726 for (tmp
= rio_list
; tmp
!= NULL
; tmp
= tmp
->rq_next
)
727 if (tmp
->rq_next
== rqp
) {
728 tmp
->rq_next
= rqp
->rq_next
;
732 printf("rio_detach: no state found in rio_list!\n");
735 rio_destroy(rqp
->rq_rio
);
736 free(rqp
->rq_q
, M_DEVBUF
);
742 * rio support routines
745 rio_request(struct ifaltq
*ifq
, int req
, void *arg
)
747 rio_queue_t
*rqp
= (rio_queue_t
*)ifq
->altq_disc
;
752 if (ALTQ_IS_ENABLED(ifq
))
762 * returns: 0 when successfully queued.
763 * ENOBUFS when drop occurs.
766 rio_enqueue(struct ifaltq
*ifq
, struct mbuf
*m
, struct altq_pktattr
*pktattr
)
768 rio_queue_t
*rqp
= (rio_queue_t
*)ifq
->altq_disc
;
771 if (rio_addq(rqp
->rq_rio
, rqp
->rq_q
, m
, pktattr
) == 0)
780 * must be called in splnet.
782 * returns: mbuf dequeued.
783 * NULL when no packet is available in the queue.
787 rio_dequeue(struct ifaltq
*ifq
, int op
)
789 rio_queue_t
*rqp
= (rio_queue_t
*)ifq
->altq_disc
;
790 struct mbuf
*m
= NULL
;
792 if (op
== ALTDQ_POLL
)
793 return qhead(rqp
->rq_q
);
795 m
= rio_getq(rqp
->rq_rio
, rqp
->rq_q
);
803 static struct altqsw rio_sw
=
804 {"rio", rioopen
, rioclose
, rioioctl
};
806 ALTQ_MODULE(altq_rio
, ALTQT_RIO
, &rio_sw
);
807 MODULE_VERSION(altq_rio
, 1);
808 MODULE_DEPEND(altq_rio
, altq_red
, 1, 1, 1);
810 #endif /* KLD_MODULE */
811 #endif /* ALTQ3_COMPAT */
813 #endif /* ALTQ_RIO */