1 /* $NetBSD: altq_hfsc.c,v 1.23 2007/03/04 05:59:01 christos Exp $ */
2 /* $KAME: altq_hfsc.c,v 1.26 2005/04/13 03:44:24 suz Exp $ */
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.23 2007/03/04 05:59:01 christos Exp $");
54 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
56 #include <sys/param.h>
57 #include <sys/malloc.h>
59 #include <sys/socket.h>
60 #include <sys/systm.h>
61 #include <sys/errno.h>
62 #include <sys/queue.h>
63 #if 1 /* ALTQ3_COMPAT */
64 #include <sys/sockio.h>
66 #include <sys/kernel.h>
67 #endif /* ALTQ3_COMPAT */
68 #include <sys/kauth.h>
71 #include <netinet/in.h>
74 #include <net/pfvar.h>
76 #include <altq/altq.h>
77 #include <altq/altq_hfsc.h>
79 #include <altq/altq_conf.h>
85 static int hfsc_clear_interface(struct hfsc_if
*);
86 static int hfsc_request(struct ifaltq
*, int, void *);
87 static void hfsc_purge(struct hfsc_if
*);
88 static struct hfsc_class
*hfsc_class_create(struct hfsc_if
*,
89 struct service_curve
*, struct service_curve
*, struct service_curve
*,
90 struct hfsc_class
*, int, int, int);
91 static int hfsc_class_destroy(struct hfsc_class
*);
92 static struct hfsc_class
*hfsc_nextclass(struct hfsc_class
*);
93 static int hfsc_enqueue(struct ifaltq
*, struct mbuf
*,
94 struct altq_pktattr
*);
95 static struct mbuf
*hfsc_dequeue(struct ifaltq
*, int);
97 static int hfsc_addq(struct hfsc_class
*, struct mbuf
*);
98 static struct mbuf
*hfsc_getq(struct hfsc_class
*);
99 static struct mbuf
*hfsc_pollq(struct hfsc_class
*);
100 static void hfsc_purgeq(struct hfsc_class
*);
102 static void update_cfmin(struct hfsc_class
*);
103 static void set_active(struct hfsc_class
*, int);
104 static void set_passive(struct hfsc_class
*);
106 static void init_ed(struct hfsc_class
*, int);
107 static void update_ed(struct hfsc_class
*, int);
108 static void update_d(struct hfsc_class
*, int);
109 static void init_vf(struct hfsc_class
*, int);
110 static void update_vf(struct hfsc_class
*, int, u_int64_t
);
111 static ellist_t
*ellist_alloc(void);
112 static void ellist_destroy(ellist_t
*);
113 static void ellist_insert(struct hfsc_class
*);
114 static void ellist_remove(struct hfsc_class
*);
115 static void ellist_update(struct hfsc_class
*);
116 struct hfsc_class
*ellist_get_mindl(ellist_t
*, u_int64_t
);
117 static actlist_t
*actlist_alloc(void);
118 static void actlist_destroy(actlist_t
*);
119 static void actlist_insert(struct hfsc_class
*);
120 static void actlist_remove(struct hfsc_class
*);
121 static void actlist_update(struct hfsc_class
*);
123 static struct hfsc_class
*actlist_firstfit(struct hfsc_class
*,
126 static inline u_int64_t
seg_x2y(u_int64_t
, u_int64_t
);
127 static inline u_int64_t
seg_y2x(u_int64_t
, u_int64_t
);
128 static inline u_int64_t
m2sm(u_int
);
129 static inline u_int64_t
m2ism(u_int
);
130 static inline u_int64_t
d2dx(u_int
);
131 static u_int
sm2m(u_int64_t
);
132 static u_int
dx2d(u_int64_t
);
134 static void sc2isc(struct service_curve
*, struct internal_sc
*);
135 static void rtsc_init(struct runtime_sc
*, struct internal_sc
*,
136 u_int64_t
, u_int64_t
);
137 static u_int64_t
rtsc_y2x(struct runtime_sc
*, u_int64_t
);
138 static u_int64_t
rtsc_x2y(struct runtime_sc
*, u_int64_t
);
139 static void rtsc_min(struct runtime_sc
*, struct internal_sc
*,
140 u_int64_t
, u_int64_t
);
142 static void get_class_stats(struct hfsc_classstats
*,
143 struct hfsc_class
*);
144 static struct hfsc_class
*clh_to_clp(struct hfsc_if
*, u_int32_t
);
148 static struct hfsc_if
*hfsc_attach(struct ifaltq
*, u_int
);
149 static int hfsc_detach(struct hfsc_if
*);
150 static int hfsc_class_modify(struct hfsc_class
*, struct service_curve
*,
151 struct service_curve
*, struct service_curve
*);
153 static int hfsccmd_if_attach(struct hfsc_attach
*);
154 static int hfsccmd_if_detach(struct hfsc_interface
*);
155 static int hfsccmd_add_class(struct hfsc_add_class
*);
156 static int hfsccmd_delete_class(struct hfsc_delete_class
*);
157 static int hfsccmd_modify_class(struct hfsc_modify_class
*);
158 static int hfsccmd_add_filter(struct hfsc_add_filter
*);
159 static int hfsccmd_delete_filter(struct hfsc_delete_filter
*);
160 static int hfsccmd_class_stats(struct hfsc_class_stats
*);
163 #endif /* ALTQ3_COMPAT */
168 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
170 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
173 /* hif_list keeps all hfsc_if's allocated. */
174 static struct hfsc_if
*hif_list
= NULL
;
175 #endif /* ALTQ3_COMPAT */
179 hfsc_pfattach(struct pf_altq
*a
)
184 if ((ifp
= ifunit(a
->ifname
)) == NULL
|| a
->altq_disc
== NULL
)
187 error
= altq_attach(&ifp
->if_snd
, ALTQT_HFSC
, a
->altq_disc
,
188 hfsc_enqueue
, hfsc_dequeue
, hfsc_request
, NULL
, NULL
);
194 hfsc_add_altq(struct pf_altq
*a
)
199 if ((ifp
= ifunit(a
->ifname
)) == NULL
)
201 if (!ALTQ_IS_READY(&ifp
->if_snd
))
204 hif
= malloc(sizeof(struct hfsc_if
), M_DEVBUF
, M_WAITOK
|M_ZERO
);
208 hif
->hif_eligible
= ellist_alloc();
209 if (hif
->hif_eligible
== NULL
) {
214 hif
->hif_ifq
= &ifp
->if_snd
;
216 /* keep the state in pf_altq */
223 hfsc_remove_altq(struct pf_altq
*a
)
227 if ((hif
= a
->altq_disc
) == NULL
)
231 (void)hfsc_clear_interface(hif
);
232 (void)hfsc_class_destroy(hif
->hif_rootclass
);
234 ellist_destroy(hif
->hif_eligible
);
242 hfsc_add_queue(struct pf_altq
*a
)
245 struct hfsc_class
*cl
, *parent
;
246 struct hfsc_opts
*opts
;
247 struct service_curve rtsc
, lssc
, ulsc
;
249 if ((hif
= a
->altq_disc
) == NULL
)
252 opts
= &a
->pq_u
.hfsc_opts
;
254 if (a
->parent_qid
== HFSC_NULLCLASS_HANDLE
&&
255 hif
->hif_rootclass
== NULL
)
257 else if ((parent
= clh_to_clp(hif
, a
->parent_qid
)) == NULL
)
263 if (clh_to_clp(hif
, a
->qid
) != NULL
)
266 rtsc
.m1
= opts
->rtsc_m1
;
267 rtsc
.d
= opts
->rtsc_d
;
268 rtsc
.m2
= opts
->rtsc_m2
;
269 lssc
.m1
= opts
->lssc_m1
;
270 lssc
.d
= opts
->lssc_d
;
271 lssc
.m2
= opts
->lssc_m2
;
272 ulsc
.m1
= opts
->ulsc_m1
;
273 ulsc
.d
= opts
->ulsc_d
;
274 ulsc
.m2
= opts
->ulsc_m2
;
276 cl
= hfsc_class_create(hif
, &rtsc
, &lssc
, &ulsc
,
277 parent
, a
->qlimit
, opts
->flags
, a
->qid
);
285 hfsc_remove_queue(struct pf_altq
*a
)
288 struct hfsc_class
*cl
;
290 if ((hif
= a
->altq_disc
) == NULL
)
293 if ((cl
= clh_to_clp(hif
, a
->qid
)) == NULL
)
296 return (hfsc_class_destroy(cl
));
300 hfsc_getqstats(struct pf_altq
*a
, void *ubuf
, int *nbytes
)
303 struct hfsc_class
*cl
;
304 struct hfsc_classstats stats
;
307 if ((hif
= altq_lookup(a
->ifname
, ALTQT_HFSC
)) == NULL
)
310 if ((cl
= clh_to_clp(hif
, a
->qid
)) == NULL
)
313 if (*nbytes
< sizeof(stats
))
316 get_class_stats(&stats
, cl
);
318 if ((error
= copyout((void *)&stats
, ubuf
, sizeof(stats
))) != 0)
320 *nbytes
= sizeof(stats
);
326 * bring the interface back to the initial state by discarding
327 * all the filters and classes except the root class.
330 hfsc_clear_interface(struct hfsc_if
*hif
)
332 struct hfsc_class
*cl
;
335 /* free the filters for this interface */
336 acc_discard_filters(&hif
->hif_classifier
, NULL
, 1);
339 /* clear out the classes */
340 while (hif
->hif_rootclass
!= NULL
&&
341 (cl
= hif
->hif_rootclass
->cl_children
) != NULL
) {
343 * remove the first leaf class found in the hierarchy
346 for (; cl
!= NULL
; cl
= hfsc_nextclass(cl
)) {
347 if (!is_a_parent_class(cl
)) {
348 (void)hfsc_class_destroy(cl
);
358 hfsc_request(struct ifaltq
*ifq
, int req
, void *arg
)
360 struct hfsc_if
*hif
= (struct hfsc_if
*)ifq
->altq_disc
;
370 /* discard all the queued packets on the interface */
372 hfsc_purge(struct hfsc_if
*hif
)
374 struct hfsc_class
*cl
;
376 for (cl
= hif
->hif_rootclass
; cl
!= NULL
; cl
= hfsc_nextclass(cl
))
377 if (!qempty(cl
->cl_q
))
379 if (ALTQ_IS_ENABLED(hif
->hif_ifq
))
380 hif
->hif_ifq
->ifq_len
= 0;
384 hfsc_class_create(struct hfsc_if
*hif
, struct service_curve
*rsc
,
385 struct service_curve
*fsc
, struct service_curve
*usc
,
386 struct hfsc_class
*parent
, int qlimit
, int flags
, int qid
)
388 struct hfsc_class
*cl
, *p
;
391 if (hif
->hif_classes
>= HFSC_MAX_CLASSES
)
395 if (flags
& HFCF_RED
) {
397 printf("hfsc_class_create: RED not configured for HFSC!\n");
403 cl
= malloc(sizeof(struct hfsc_class
), M_DEVBUF
, M_WAITOK
|M_ZERO
);
407 cl
->cl_q
= malloc(sizeof(class_queue_t
), M_DEVBUF
, M_WAITOK
|M_ZERO
);
408 if (cl
->cl_q
== NULL
)
411 cl
->cl_actc
= actlist_alloc();
412 if (cl
->cl_actc
== NULL
)
416 qlimit
= 50; /* use default */
417 qlimit(cl
->cl_q
) = qlimit
;
418 qtype(cl
->cl_q
) = Q_DROPTAIL
;
420 cl
->cl_flags
= flags
;
422 if (flags
& (HFCF_RED
|HFCF_RIO
)) {
423 int red_flags
, red_pkttime
;
427 if (rsc
!= NULL
&& rsc
->m2
> m2
)
429 if (fsc
!= NULL
&& fsc
->m2
> m2
)
431 if (usc
!= NULL
&& usc
->m2
> m2
)
435 if (flags
& HFCF_ECN
)
436 red_flags
|= REDF_ECN
;
438 if (flags
& HFCF_CLEARDSCP
)
439 red_flags
|= RIOF_CLEARDSCP
;
442 red_pkttime
= 1000 * 1000 * 1000; /* 1 sec */
444 red_pkttime
= (int64_t)hif
->hif_ifq
->altq_ifp
->if_mtu
445 * 1000 * 1000 * 1000 / (m2
/ 8);
446 if (flags
& HFCF_RED
) {
447 cl
->cl_red
= red_alloc(0, 0,
448 qlimit(cl
->cl_q
) * 10/100,
449 qlimit(cl
->cl_q
) * 30/100,
450 red_flags
, red_pkttime
);
451 if (cl
->cl_red
!= NULL
)
452 qtype(cl
->cl_q
) = Q_RED
;
456 cl
->cl_red
= (red_t
*)rio_alloc(0, NULL
,
457 red_flags
, red_pkttime
);
458 if (cl
->cl_red
!= NULL
)
459 qtype(cl
->cl_q
) = Q_RIO
;
463 #endif /* ALTQ_RED */
465 if (rsc
!= NULL
&& (rsc
->m1
!= 0 || rsc
->m2
!= 0)) {
466 cl
->cl_rsc
= malloc(sizeof(struct internal_sc
), M_DEVBUF
,
468 if (cl
->cl_rsc
== NULL
)
470 sc2isc(rsc
, cl
->cl_rsc
);
471 rtsc_init(&cl
->cl_deadline
, cl
->cl_rsc
, 0, 0);
472 rtsc_init(&cl
->cl_eligible
, cl
->cl_rsc
, 0, 0);
474 if (fsc
!= NULL
&& (fsc
->m1
!= 0 || fsc
->m2
!= 0)) {
475 cl
->cl_fsc
= malloc(sizeof(struct internal_sc
), M_DEVBUF
,
477 if (cl
->cl_fsc
== NULL
)
479 sc2isc(fsc
, cl
->cl_fsc
);
480 rtsc_init(&cl
->cl_virtual
, cl
->cl_fsc
, 0, 0);
482 if (usc
!= NULL
&& (usc
->m1
!= 0 || usc
->m2
!= 0)) {
483 cl
->cl_usc
= malloc(sizeof(struct internal_sc
), M_DEVBUF
,
485 if (cl
->cl_usc
== NULL
)
487 sc2isc(usc
, cl
->cl_usc
);
488 rtsc_init(&cl
->cl_ulimit
, cl
->cl_usc
, 0, 0);
491 cl
->cl_id
= hif
->hif_classid
++;
494 cl
->cl_parent
= parent
;
500 * find a free slot in the class table. if the slot matching
501 * the lower bits of qid is free, use this slot. otherwise,
502 * use the first free slot.
504 i
= qid
% HFSC_MAX_CLASSES
;
505 if (hif
->hif_class_tbl
[i
] == NULL
)
506 hif
->hif_class_tbl
[i
] = cl
;
508 for (i
= 0; i
< HFSC_MAX_CLASSES
; i
++)
509 if (hif
->hif_class_tbl
[i
] == NULL
) {
510 hif
->hif_class_tbl
[i
] = cl
;
513 if (i
== HFSC_MAX_CLASSES
) {
519 if (flags
& HFCF_DEFAULTCLASS
)
520 hif
->hif_defaultclass
= cl
;
522 if (parent
== NULL
) {
523 /* this is root class */
524 hif
->hif_rootclass
= cl
;
526 /* add this class to the children list of the parent */
527 if ((p
= parent
->cl_children
) == NULL
)
528 parent
->cl_children
= cl
;
530 while (p
->cl_siblings
!= NULL
)
540 if (cl
->cl_actc
!= NULL
)
541 actlist_destroy(cl
->cl_actc
);
542 if (cl
->cl_red
!= NULL
) {
544 if (q_is_rio(cl
->cl_q
))
545 rio_destroy((rio_t
*)cl
->cl_red
);
548 if (q_is_red(cl
->cl_q
))
549 red_destroy(cl
->cl_red
);
552 if (cl
->cl_fsc
!= NULL
)
553 free(cl
->cl_fsc
, M_DEVBUF
);
554 if (cl
->cl_rsc
!= NULL
)
555 free(cl
->cl_rsc
, M_DEVBUF
);
556 if (cl
->cl_usc
!= NULL
)
557 free(cl
->cl_usc
, M_DEVBUF
);
558 if (cl
->cl_q
!= NULL
)
559 free(cl
->cl_q
, M_DEVBUF
);
565 hfsc_class_destroy(struct hfsc_class
*cl
)
572 if (is_a_parent_class(cl
))
578 /* delete filters referencing to this class */
579 acc_discard_filters(&cl
->cl_hif
->hif_classifier
, cl
, 0);
580 #endif /* ALTQ3_COMPAT */
582 if (!qempty(cl
->cl_q
))
585 if (cl
->cl_parent
== NULL
) {
586 /* this is root class */
588 struct hfsc_class
*p
= cl
->cl_parent
->cl_children
;
591 cl
->cl_parent
->cl_children
= cl
->cl_siblings
;
593 if (p
->cl_siblings
== cl
) {
594 p
->cl_siblings
= cl
->cl_siblings
;
597 } while ((p
= p
->cl_siblings
) != NULL
);
601 for (i
= 0; i
< HFSC_MAX_CLASSES
; i
++)
602 if (cl
->cl_hif
->hif_class_tbl
[i
] == cl
) {
603 cl
->cl_hif
->hif_class_tbl
[i
] = NULL
;
607 cl
->cl_hif
->hif_classes
--;
610 actlist_destroy(cl
->cl_actc
);
612 if (cl
->cl_red
!= NULL
) {
614 if (q_is_rio(cl
->cl_q
))
615 rio_destroy((rio_t
*)cl
->cl_red
);
618 if (q_is_red(cl
->cl_q
))
619 red_destroy(cl
->cl_red
);
623 if (cl
== cl
->cl_hif
->hif_rootclass
)
624 cl
->cl_hif
->hif_rootclass
= NULL
;
625 if (cl
== cl
->cl_hif
->hif_defaultclass
)
626 cl
->cl_hif
->hif_defaultclass
= NULL
;
628 if (cl
->cl_usc
!= NULL
)
629 free(cl
->cl_usc
, M_DEVBUF
);
630 if (cl
->cl_fsc
!= NULL
)
631 free(cl
->cl_fsc
, M_DEVBUF
);
632 if (cl
->cl_rsc
!= NULL
)
633 free(cl
->cl_rsc
, M_DEVBUF
);
634 free(cl
->cl_q
, M_DEVBUF
);
641 * hfsc_nextclass returns the next class in the tree.
643 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
646 static struct hfsc_class
*
647 hfsc_nextclass(struct hfsc_class
*cl
)
649 if (cl
->cl_children
!= NULL
)
650 cl
= cl
->cl_children
;
651 else if (cl
->cl_siblings
!= NULL
)
652 cl
= cl
->cl_siblings
;
654 while ((cl
= cl
->cl_parent
) != NULL
)
655 if (cl
->cl_siblings
) {
656 cl
= cl
->cl_siblings
;
665 * hfsc_enqueue is an enqueue function to be registered to
666 * (*altq_enqueue) in struct ifaltq.
669 hfsc_enqueue(struct ifaltq
*ifq
, struct mbuf
*m
, struct altq_pktattr
*pktattr
)
671 struct hfsc_if
*hif
= (struct hfsc_if
*)ifq
->altq_disc
;
672 struct hfsc_class
*cl
;
676 /* grab class set by classifier */
677 if ((m
->m_flags
& M_PKTHDR
) == 0) {
678 /* should not happen */
679 printf("altq: packet for %s does not have pkthdr\n",
680 ifq
->altq_ifp
->if_xname
);
685 if ((t
= m_tag_find(m
, PACKET_TAG_ALTQ_QID
, NULL
)) != NULL
)
686 cl
= clh_to_clp(hif
, ((struct altq_tag
*)(t
+1))->qid
);
688 else if ((ifq
->altq_flags
& ALTQF_CLASSIFY
) && pktattr
!= NULL
)
689 cl
= pktattr
->pattr_class
;
691 if (cl
== NULL
|| is_a_parent_class(cl
)) {
692 cl
= hif
->hif_defaultclass
;
700 cl
->cl_pktattr
= pktattr
; /* save proto hdr used by ECN */
703 cl
->cl_pktattr
= NULL
;
705 if (hfsc_addq(cl
, m
) != 0) {
706 /* drop occurred. mbuf was freed in hfsc_addq. */
707 PKTCNTR_ADD(&cl
->cl_stats
.drop_cnt
, len
);
711 cl
->cl_hif
->hif_packets
++;
713 /* successfully queued. */
714 if (qlen(cl
->cl_q
) == 1)
715 set_active(cl
, m_pktlen(m
));
721 * hfsc_dequeue is a dequeue function to be registered to
722 * (*altq_dequeue) in struct ifaltq.
724 * note: ALTDQ_POLL returns the next packet without removing the packet
725 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
726 * ALTDQ_REMOVE must return the same packet if called immediately
730 hfsc_dequeue(struct ifaltq
*ifq
, int op
)
732 struct hfsc_if
*hif
= (struct hfsc_if
*)ifq
->altq_disc
;
733 struct hfsc_class
*cl
;
739 if (hif
->hif_packets
== 0)
740 /* no packet in the tree */
743 cur_time
= read_machclk();
745 if (op
== ALTDQ_REMOVE
&& hif
->hif_pollcache
!= NULL
) {
747 cl
= hif
->hif_pollcache
;
748 hif
->hif_pollcache
= NULL
;
749 /* check if the class was scheduled by real-time criteria */
750 if (cl
->cl_rsc
!= NULL
)
751 realtime
= (cl
->cl_e
<= cur_time
);
754 * if there are eligible classes, use real-time criteria.
755 * find the class with the minimum deadline among
756 * the eligible classes.
758 if ((cl
= ellist_get_mindl(hif
->hif_eligible
, cur_time
))
766 * use link-sharing criteria
767 * get the class with the minimum vt in the hierarchy
769 cl
= hif
->hif_rootclass
;
770 while (is_a_parent_class(cl
)) {
772 cl
= actlist_firstfit(cl
, cur_time
);
776 printf("%d fit but none found\n",fits
);
781 * update parent's cl_cvtmin.
782 * don't update if the new vt is smaller.
784 if (cl
->cl_parent
->cl_cvtmin
< cl
->cl_vt
)
785 cl
->cl_parent
->cl_cvtmin
= cl
->cl_vt
;
792 if (op
== ALTDQ_POLL
) {
793 hif
->hif_pollcache
= cl
;
801 panic("hfsc_dequeue:");
803 cl
->cl_hif
->hif_packets
--;
805 PKTCNTR_ADD(&cl
->cl_stats
.xmit_cnt
, len
);
807 update_vf(cl
, len
, cur_time
);
811 if (!qempty(cl
->cl_q
)) {
812 if (cl
->cl_rsc
!= NULL
) {
814 next_len
= m_pktlen(qhead(cl
->cl_q
));
817 update_ed(cl
, next_len
);
819 update_d(cl
, next_len
);
822 /* the class becomes passive */
830 hfsc_addq(struct hfsc_class
*cl
, struct mbuf
*m
)
834 if (q_is_rio(cl
->cl_q
))
835 return rio_addq((rio_t
*)cl
->cl_red
, cl
->cl_q
,
839 if (q_is_red(cl
->cl_q
))
840 return red_addq(cl
->cl_red
, cl
->cl_q
, m
, cl
->cl_pktattr
);
842 if (qlen(cl
->cl_q
) >= qlimit(cl
->cl_q
)) {
847 if (cl
->cl_flags
& HFCF_CLEARDSCP
)
848 write_dsfield(m
, cl
->cl_pktattr
, 0);
856 hfsc_getq(struct hfsc_class
*cl
)
859 if (q_is_rio(cl
->cl_q
))
860 return rio_getq((rio_t
*)cl
->cl_red
, cl
->cl_q
);
863 if (q_is_red(cl
->cl_q
))
864 return red_getq(cl
->cl_red
, cl
->cl_q
);
866 return _getq(cl
->cl_q
);
870 hfsc_pollq(struct hfsc_class
*cl
)
872 return qhead(cl
->cl_q
);
876 hfsc_purgeq(struct hfsc_class
*cl
)
880 if (qempty(cl
->cl_q
))
883 while ((m
= _getq(cl
->cl_q
)) != NULL
) {
884 PKTCNTR_ADD(&cl
->cl_stats
.drop_cnt
, m_pktlen(m
));
886 cl
->cl_hif
->hif_packets
--;
887 IFQ_DEC_LEN(cl
->cl_hif
->hif_ifq
);
889 ASSERT(qlen(cl
->cl_q
) == 0);
891 update_vf(cl
, 0, 0); /* remove cl from the actlist */
896 set_active(struct hfsc_class
*cl
, int len
)
898 if (cl
->cl_rsc
!= NULL
)
900 if (cl
->cl_fsc
!= NULL
)
903 cl
->cl_stats
.period
++;
907 set_passive(struct hfsc_class
*cl
)
909 if (cl
->cl_rsc
!= NULL
)
913 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
914 * needs to be called explicitly to remove a class from actlist
919 init_ed(struct hfsc_class
*cl
, int next_len
)
923 cur_time
= read_machclk();
925 /* update the deadline curve */
926 rtsc_min(&cl
->cl_deadline
, cl
->cl_rsc
, cur_time
, cl
->cl_cumul
);
929 * update the eligible curve.
930 * for concave, it is equal to the deadline curve.
931 * for convex, it is a linear curve with slope m2.
933 cl
->cl_eligible
= cl
->cl_deadline
;
934 if (cl
->cl_rsc
->sm1
<= cl
->cl_rsc
->sm2
) {
935 cl
->cl_eligible
.dx
= 0;
936 cl
->cl_eligible
.dy
= 0;
939 /* compute e and d */
940 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
941 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
947 update_ed(struct hfsc_class
*cl
, int next_len
)
949 cl
->cl_e
= rtsc_y2x(&cl
->cl_eligible
, cl
->cl_cumul
);
950 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
956 update_d(struct hfsc_class
*cl
, int next_len
)
958 cl
->cl_d
= rtsc_y2x(&cl
->cl_deadline
, cl
->cl_cumul
+ next_len
);
962 init_vf(struct hfsc_class
*cl
, int len
)
964 struct hfsc_class
*max_cl
, *p
;
965 u_int64_t vt
, f
, cur_time
;
970 for ( ; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
972 if (go_active
&& cl
->cl_nactive
++ == 0)
978 max_cl
= actlist_last(cl
->cl_parent
->cl_actc
);
979 if (max_cl
!= NULL
) {
981 * set vt to the average of the min and max
982 * classes. if the parent's period didn't
983 * change, don't decrease vt of the class.
986 if (cl
->cl_parent
->cl_cvtmin
!= 0)
987 vt
= (cl
->cl_parent
->cl_cvtmin
+ vt
)/2;
989 if (cl
->cl_parent
->cl_vtperiod
!=
990 cl
->cl_parentperiod
|| vt
> cl
->cl_vt
)
994 * first child for a new parent backlog period.
995 * add parent's cvtmax to vtoff of children
996 * to make a new vt (vtoff + vt) larger than
997 * the vt in the last period for all children.
999 vt
= cl
->cl_parent
->cl_cvtmax
;
1000 for (p
= cl
->cl_parent
->cl_children
; p
!= NULL
;
1004 cl
->cl_parent
->cl_cvtmax
= 0;
1005 cl
->cl_parent
->cl_cvtmin
= 0;
1007 cl
->cl_initvt
= cl
->cl_vt
;
1009 /* update the virtual curve */
1010 vt
= cl
->cl_vt
+ cl
->cl_vtoff
;
1011 rtsc_min(&cl
->cl_virtual
, cl
->cl_fsc
, vt
, cl
->cl_total
);
1012 if (cl
->cl_virtual
.x
== vt
) {
1013 cl
->cl_virtual
.x
-= cl
->cl_vtoff
;
1018 cl
->cl_vtperiod
++; /* increment vt period */
1019 cl
->cl_parentperiod
= cl
->cl_parent
->cl_vtperiod
;
1020 if (cl
->cl_parent
->cl_nactive
== 0)
1021 cl
->cl_parentperiod
++;
1026 if (cl
->cl_usc
!= NULL
) {
1027 /* class has upper limit curve */
1029 cur_time
= read_machclk();
1031 /* update the ulimit curve */
1032 rtsc_min(&cl
->cl_ulimit
, cl
->cl_usc
, cur_time
,
1035 cl
->cl_myf
= rtsc_y2x(&cl
->cl_ulimit
,
1041 if (cl
->cl_myf
> cl
->cl_cfmin
)
1045 if (f
!= cl
->cl_f
) {
1047 update_cfmin(cl
->cl_parent
);
1053 update_vf(struct hfsc_class
*cl
, int len
, u_int64_t cur_time
)
1055 u_int64_t f
, myf_bound
, delta
;
1058 go_passive
= qempty(cl
->cl_q
);
1060 for (; cl
->cl_parent
!= NULL
; cl
= cl
->cl_parent
) {
1062 cl
->cl_total
+= len
;
1064 if (cl
->cl_fsc
== NULL
|| cl
->cl_nactive
== 0)
1067 if (go_passive
&& --cl
->cl_nactive
== 0)
1073 /* no more active child, going passive */
1075 /* update cvtmax of the parent class */
1076 if (cl
->cl_vt
> cl
->cl_parent
->cl_cvtmax
)
1077 cl
->cl_parent
->cl_cvtmax
= cl
->cl_vt
;
1079 /* remove this class from the vt list */
1082 update_cfmin(cl
->cl_parent
);
1090 cl
->cl_vt
= rtsc_y2x(&cl
->cl_virtual
, cl
->cl_total
)
1091 - cl
->cl_vtoff
+ cl
->cl_vtadj
;
1094 * if vt of the class is smaller than cvtmin,
1095 * the class was skipped in the past due to non-fit.
1096 * if so, we need to adjust vtadj.
1098 if (cl
->cl_vt
< cl
->cl_parent
->cl_cvtmin
) {
1099 cl
->cl_vtadj
+= cl
->cl_parent
->cl_cvtmin
- cl
->cl_vt
;
1100 cl
->cl_vt
= cl
->cl_parent
->cl_cvtmin
;
1103 /* update the vt list */
1106 if (cl
->cl_usc
!= NULL
) {
1107 cl
->cl_myf
= cl
->cl_myfadj
1108 + rtsc_y2x(&cl
->cl_ulimit
, cl
->cl_total
);
1111 * if myf lags behind by more than one clock tick
1112 * from the current time, adjust myfadj to prevent
1113 * a rate-limited class from going greedy.
1114 * in a steady state under rate-limiting, myf
1115 * fluctuates within one clock tick.
1117 myf_bound
= cur_time
- machclk_per_tick
;
1118 if (cl
->cl_myf
< myf_bound
) {
1119 delta
= cur_time
- cl
->cl_myf
;
1120 cl
->cl_myfadj
+= delta
;
1121 cl
->cl_myf
+= delta
;
1125 /* cl_f is max(cl_myf, cl_cfmin) */
1126 if (cl
->cl_myf
> cl
->cl_cfmin
)
1130 if (f
!= cl
->cl_f
) {
1132 update_cfmin(cl
->cl_parent
);
1138 update_cfmin(struct hfsc_class
*cl
)
1140 struct hfsc_class
*p
;
1143 if (TAILQ_EMPTY(cl
->cl_actc
)) {
1147 cfmin
= HT_INFINITY
;
1148 TAILQ_FOREACH(p
, cl
->cl_actc
, cl_actlist
) {
1153 if (p
->cl_f
< cfmin
)
1156 cl
->cl_cfmin
= cfmin
;
1160 * TAILQ based ellist and actlist implementation
1161 * (ion wanted to make a calendar queue based implementation)
1164 * eligible list holds backlogged classes being sorted by their eligible times.
1165 * there is one eligible list per interface.
1173 head
= malloc(sizeof(ellist_t
), M_DEVBUF
, M_WAITOK
);
1179 ellist_destroy(ellist_t
*head
)
1181 free(head
, M_DEVBUF
);
1185 ellist_insert(struct hfsc_class
*cl
)
1187 struct hfsc_if
*hif
= cl
->cl_hif
;
1188 struct hfsc_class
*p
;
1190 /* check the last entry first */
1191 if ((p
= TAILQ_LAST(hif
->hif_eligible
, _eligible
)) == NULL
||
1192 p
->cl_e
<= cl
->cl_e
) {
1193 TAILQ_INSERT_TAIL(hif
->hif_eligible
, cl
, cl_ellist
);
1197 TAILQ_FOREACH(p
, hif
->hif_eligible
, cl_ellist
) {
1198 if (cl
->cl_e
< p
->cl_e
) {
1199 TAILQ_INSERT_BEFORE(p
, cl
, cl_ellist
);
1203 ASSERT(0); /* should not reach here */
1207 ellist_remove(struct hfsc_class
*cl
)
1209 struct hfsc_if
*hif
= cl
->cl_hif
;
1211 TAILQ_REMOVE(hif
->hif_eligible
, cl
, cl_ellist
);
1215 ellist_update(struct hfsc_class
*cl
)
1217 struct hfsc_if
*hif
= cl
->cl_hif
;
1218 struct hfsc_class
*p
, *last
;
1221 * the eligible time of a class increases monotonically.
1222 * if the next entry has a larger eligible time, nothing to do.
1224 p
= TAILQ_NEXT(cl
, cl_ellist
);
1225 if (p
== NULL
|| cl
->cl_e
<= p
->cl_e
)
1228 /* check the last entry */
1229 last
= TAILQ_LAST(hif
->hif_eligible
, _eligible
);
1230 ASSERT(last
!= NULL
);
1231 if (last
->cl_e
<= cl
->cl_e
) {
1232 TAILQ_REMOVE(hif
->hif_eligible
, cl
, cl_ellist
);
1233 TAILQ_INSERT_TAIL(hif
->hif_eligible
, cl
, cl_ellist
);
1238 * the new position must be between the next entry
1239 * and the last entry
1241 while ((p
= TAILQ_NEXT(p
, cl_ellist
)) != NULL
) {
1242 if (cl
->cl_e
< p
->cl_e
) {
1243 TAILQ_REMOVE(hif
->hif_eligible
, cl
, cl_ellist
);
1244 TAILQ_INSERT_BEFORE(p
, cl
, cl_ellist
);
1248 ASSERT(0); /* should not reach here */
1251 /* find the class with the minimum deadline among the eligible classes */
1253 ellist_get_mindl(ellist_t
*head
, u_int64_t cur_time
)
1255 struct hfsc_class
*p
, *cl
= NULL
;
1257 TAILQ_FOREACH(p
, head
, cl_ellist
) {
1258 if (p
->cl_e
> cur_time
)
1260 if (cl
== NULL
|| p
->cl_d
< cl
->cl_d
)
1267 * active children list holds backlogged child classes being sorted
1268 * by their virtual time.
1269 * each intermediate class has one active children list.
1276 head
= malloc(sizeof(actlist_t
), M_DEVBUF
, M_WAITOK
);
1282 actlist_destroy(actlist_t
*head
)
1284 free(head
, M_DEVBUF
);
1287 actlist_insert(struct hfsc_class
*cl
)
1289 struct hfsc_class
*p
;
1291 /* check the last entry first */
1292 if ((p
= TAILQ_LAST(cl
->cl_parent
->cl_actc
, _active
)) == NULL
1293 || p
->cl_vt
<= cl
->cl_vt
) {
1294 TAILQ_INSERT_TAIL(cl
->cl_parent
->cl_actc
, cl
, cl_actlist
);
1298 TAILQ_FOREACH(p
, cl
->cl_parent
->cl_actc
, cl_actlist
) {
1299 if (cl
->cl_vt
< p
->cl_vt
) {
1300 TAILQ_INSERT_BEFORE(p
, cl
, cl_actlist
);
1304 ASSERT(0); /* should not reach here */
1308 actlist_remove(struct hfsc_class
*cl
)
1310 TAILQ_REMOVE(cl
->cl_parent
->cl_actc
, cl
, cl_actlist
);
1314 actlist_update(struct hfsc_class
*cl
)
1316 struct hfsc_class
*p
, *last
;
1319 * the virtual time of a class increases monotonically during its
1320 * backlogged period.
1321 * if the next entry has a larger virtual time, nothing to do.
1323 p
= TAILQ_NEXT(cl
, cl_actlist
);
1324 if (p
== NULL
|| cl
->cl_vt
< p
->cl_vt
)
1327 /* check the last entry */
1328 last
= TAILQ_LAST(cl
->cl_parent
->cl_actc
, _active
);
1329 ASSERT(last
!= NULL
);
1330 if (last
->cl_vt
<= cl
->cl_vt
) {
1331 TAILQ_REMOVE(cl
->cl_parent
->cl_actc
, cl
, cl_actlist
);
1332 TAILQ_INSERT_TAIL(cl
->cl_parent
->cl_actc
, cl
, cl_actlist
);
1337 * the new position must be between the next entry
1338 * and the last entry
1340 while ((p
= TAILQ_NEXT(p
, cl_actlist
)) != NULL
) {
1341 if (cl
->cl_vt
< p
->cl_vt
) {
1342 TAILQ_REMOVE(cl
->cl_parent
->cl_actc
, cl
, cl_actlist
);
1343 TAILQ_INSERT_BEFORE(p
, cl
, cl_actlist
);
1347 ASSERT(0); /* should not reach here */
1350 static struct hfsc_class
*
1351 actlist_firstfit(struct hfsc_class
*cl
, u_int64_t cur_time
)
1353 struct hfsc_class
*p
;
1355 TAILQ_FOREACH(p
, cl
->cl_actc
, cl_actlist
) {
1356 if (p
->cl_f
<= cur_time
)
1363 * service curve support functions
1365 * external service curve parameters
1368 * internal service curve parameters
1369 * sm: (bytes/tsc_interval) << SM_SHIFT
1370 * ism: (tsc_count/byte) << ISM_SHIFT
1373 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1374 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1375 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1376 * digits in decimal using the following table.
1378 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1379 * ----------+-------------------------------------------------------
1380 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1381 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1382 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1384 * nsec/byte 80000 8000 800 80 8
1385 * ism(500MHz) 40000 4000 400 40 4
1386 * ism(200MHz) 16000 1600 160 16 1.6
1389 #define ISM_SHIFT 10
1391 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1392 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1394 static inline u_int64_t
1395 seg_x2y(u_int64_t x
, u_int64_t sm
)
1401 * y = x * sm >> SM_SHIFT
1402 * but divide it for the upper and lower bits to avoid overflow
1404 y
= (x
>> SM_SHIFT
) * sm
+ (((x
& SM_MASK
) * sm
) >> SM_SHIFT
);
1408 static inline u_int64_t
1409 seg_y2x(u_int64_t y
, u_int64_t ism
)
1415 else if (ism
== HT_INFINITY
)
1418 x
= (y
>> ISM_SHIFT
) * ism
1419 + (((y
& ISM_MASK
) * ism
) >> ISM_SHIFT
);
1424 static inline u_int64_t
1429 sm
= ((u_int64_t
)m
<< SM_SHIFT
) / 8 / machclk_freq
;
1433 static inline u_int64_t
1441 ism
= ((u_int64_t
)machclk_freq
<< ISM_SHIFT
) * 8 / m
;
1445 static inline u_int64_t
1450 dx
= ((u_int64_t
)d
* machclk_freq
) / 1000;
1459 m
= (sm
* 8 * machclk_freq
) >> SM_SHIFT
;
1468 d
= dx
* 1000 / machclk_freq
;
1473 sc2isc(struct service_curve
*sc
, struct internal_sc
*isc
)
1475 isc
->sm1
= m2sm(sc
->m1
);
1476 isc
->ism1
= m2ism(sc
->m1
);
1477 isc
->dx
= d2dx(sc
->d
);
1478 isc
->dy
= seg_x2y(isc
->dx
, isc
->sm1
);
1479 isc
->sm2
= m2sm(sc
->m2
);
1480 isc
->ism2
= m2ism(sc
->m2
);
1484 * initialize the runtime service curve with the given internal
1485 * service curve starting at (x, y).
1488 rtsc_init(struct runtime_sc
*rtsc
, struct internal_sc
* isc
, u_int64_t x
,
1493 rtsc
->sm1
= isc
->sm1
;
1494 rtsc
->ism1
= isc
->ism1
;
1497 rtsc
->sm2
= isc
->sm2
;
1498 rtsc
->ism2
= isc
->ism2
;
1502 * calculate the y-projection of the runtime service curve by the
1503 * given x-projection value
1506 rtsc_y2x(struct runtime_sc
*rtsc
, u_int64_t y
)
1512 else if (y
<= rtsc
->y
+ rtsc
->dy
) {
1513 /* x belongs to the 1st segment */
1515 x
= rtsc
->x
+ rtsc
->dx
;
1517 x
= rtsc
->x
+ seg_y2x(y
- rtsc
->y
, rtsc
->ism1
);
1519 /* x belongs to the 2nd segment */
1520 x
= rtsc
->x
+ rtsc
->dx
1521 + seg_y2x(y
- rtsc
->y
- rtsc
->dy
, rtsc
->ism2
);
1527 rtsc_x2y(struct runtime_sc
*rtsc
, u_int64_t x
)
1533 else if (x
<= rtsc
->x
+ rtsc
->dx
)
1534 /* y belongs to the 1st segment */
1535 y
= rtsc
->y
+ seg_x2y(x
- rtsc
->x
, rtsc
->sm1
);
1537 /* y belongs to the 2nd segment */
1538 y
= rtsc
->y
+ rtsc
->dy
1539 + seg_x2y(x
- rtsc
->x
- rtsc
->dx
, rtsc
->sm2
);
1544 * update the runtime service curve by taking the minimum of the current
1545 * runtime service curve and the service curve starting at (x, y).
1548 rtsc_min(struct runtime_sc
*rtsc
, struct internal_sc
*isc
, u_int64_t x
,
1551 u_int64_t y1
, y2
, dx
, dy
;
1553 if (isc
->sm1
<= isc
->sm2
) {
1554 /* service curve is convex */
1555 y1
= rtsc_x2y(rtsc
, x
);
1557 /* the current rtsc is smaller */
1565 * service curve is concave
1566 * compute the two y values of the current rtsc
1570 y1
= rtsc_x2y(rtsc
, x
);
1572 /* rtsc is below isc, no change to rtsc */
1576 y2
= rtsc_x2y(rtsc
, x
+ isc
->dx
);
1577 if (y2
>= y
+ isc
->dy
) {
1578 /* rtsc is above isc, replace rtsc by isc */
1587 * the two curves intersect
1588 * compute the offsets (dx, dy) using the reverse
1589 * function of seg_x2y()
1590 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1592 dx
= ((y1
- y
) << SM_SHIFT
) / (isc
->sm1
- isc
->sm2
);
1594 * check if (x, y1) belongs to the 1st segment of rtsc.
1595 * if so, add the offset.
1597 if (rtsc
->x
+ rtsc
->dx
> x
)
1598 dx
+= rtsc
->x
+ rtsc
->dx
- x
;
1599 dy
= seg_x2y(dx
, isc
->sm1
);
1609 get_class_stats(struct hfsc_classstats
*sp
, struct hfsc_class
*cl
)
1611 sp
->class_id
= cl
->cl_id
;
1612 sp
->class_handle
= cl
->cl_handle
;
1614 if (cl
->cl_rsc
!= NULL
) {
1615 sp
->rsc
.m1
= sm2m(cl
->cl_rsc
->sm1
);
1616 sp
->rsc
.d
= dx2d(cl
->cl_rsc
->dx
);
1617 sp
->rsc
.m2
= sm2m(cl
->cl_rsc
->sm2
);
1623 if (cl
->cl_fsc
!= NULL
) {
1624 sp
->fsc
.m1
= sm2m(cl
->cl_fsc
->sm1
);
1625 sp
->fsc
.d
= dx2d(cl
->cl_fsc
->dx
);
1626 sp
->fsc
.m2
= sm2m(cl
->cl_fsc
->sm2
);
1632 if (cl
->cl_usc
!= NULL
) {
1633 sp
->usc
.m1
= sm2m(cl
->cl_usc
->sm1
);
1634 sp
->usc
.d
= dx2d(cl
->cl_usc
->dx
);
1635 sp
->usc
.m2
= sm2m(cl
->cl_usc
->sm2
);
1642 sp
->total
= cl
->cl_total
;
1643 sp
->cumul
= cl
->cl_cumul
;
1650 sp
->initvt
= cl
->cl_initvt
;
1651 sp
->vtperiod
= cl
->cl_vtperiod
;
1652 sp
->parentperiod
= cl
->cl_parentperiod
;
1653 sp
->nactive
= cl
->cl_nactive
;
1654 sp
->vtoff
= cl
->cl_vtoff
;
1655 sp
->cvtmax
= cl
->cl_cvtmax
;
1656 sp
->myf
= cl
->cl_myf
;
1657 sp
->cfmin
= cl
->cl_cfmin
;
1658 sp
->cvtmin
= cl
->cl_cvtmin
;
1659 sp
->myfadj
= cl
->cl_myfadj
;
1660 sp
->vtadj
= cl
->cl_vtadj
;
1662 sp
->cur_time
= read_machclk();
1663 sp
->machclk_freq
= machclk_freq
;
1665 sp
->qlength
= qlen(cl
->cl_q
);
1666 sp
->qlimit
= qlimit(cl
->cl_q
);
1667 sp
->xmit_cnt
= cl
->cl_stats
.xmit_cnt
;
1668 sp
->drop_cnt
= cl
->cl_stats
.drop_cnt
;
1669 sp
->period
= cl
->cl_stats
.period
;
1671 sp
->qtype
= qtype(cl
->cl_q
);
1673 if (q_is_red(cl
->cl_q
))
1674 red_getstats(cl
->cl_red
, &sp
->red
[0]);
1677 if (q_is_rio(cl
->cl_q
))
1678 rio_getstats((rio_t
*)cl
->cl_red
, &sp
->red
[0]);
1682 /* convert a class handle to the corresponding class pointer */
1683 static struct hfsc_class
*
1684 clh_to_clp(struct hfsc_if
*hif
, u_int32_t chandle
)
1687 struct hfsc_class
*cl
;
1692 * first, try optimistically the slot matching the lower bits of
1693 * the handle. if it fails, do the linear table search.
1695 i
= chandle
% HFSC_MAX_CLASSES
;
1696 if ((cl
= hif
->hif_class_tbl
[i
]) != NULL
&& cl
->cl_handle
== chandle
)
1698 for (i
= 0; i
< HFSC_MAX_CLASSES
; i
++)
1699 if ((cl
= hif
->hif_class_tbl
[i
]) != NULL
&&
1700 cl
->cl_handle
== chandle
)
1706 static struct hfsc_if
*
1707 hfsc_attach(struct ifaltq
*ifq
, u_int bandwidth
)
1709 struct hfsc_if
*hif
;
1711 hif
= malloc(sizeof(struct hfsc_if
), M_DEVBUF
, M_WAITOK
|M_ZERO
);
1715 hif
->hif_eligible
= ellist_alloc();
1716 if (hif
->hif_eligible
== NULL
) {
1717 free(hif
, M_DEVBUF
);
1723 /* add this state to the hfsc list */
1724 hif
->hif_next
= hif_list
;
1731 hfsc_detach(struct hfsc_if
*hif
)
1733 (void)hfsc_clear_interface(hif
);
1734 (void)hfsc_class_destroy(hif
->hif_rootclass
);
1736 /* remove this interface from the hif list */
1737 if (hif_list
== hif
)
1738 hif_list
= hif
->hif_next
;
1742 for (h
= hif_list
; h
!= NULL
; h
= h
->hif_next
)
1743 if (h
->hif_next
== hif
) {
1744 h
->hif_next
= hif
->hif_next
;
1750 ellist_destroy(hif
->hif_eligible
);
1752 free(hif
, M_DEVBUF
);
1758 hfsc_class_modify(struct hfsc_class
*cl
, struct service_curve
*rsc
,
1759 struct service_curve
*fsc
, struct service_curve
*usc
)
1761 struct internal_sc
*rsc_tmp
, *fsc_tmp
, *usc_tmp
;
1765 rsc_tmp
= fsc_tmp
= usc_tmp
= NULL
;
1766 if (rsc
!= NULL
&& (rsc
->m1
!= 0 || rsc
->m2
!= 0) &&
1767 cl
->cl_rsc
== NULL
) {
1768 rsc_tmp
= malloc(sizeof(struct internal_sc
), M_DEVBUF
,
1770 if (rsc_tmp
== NULL
)
1773 if (fsc
!= NULL
&& (fsc
->m1
!= 0 || fsc
->m2
!= 0) &&
1774 cl
->cl_fsc
== NULL
) {
1775 fsc_tmp
= malloc(sizeof(struct internal_sc
), M_DEVBUF
,
1777 if (fsc_tmp
== NULL
)
1780 if (usc
!= NULL
&& (usc
->m1
!= 0 || usc
->m2
!= 0) &&
1781 cl
->cl_usc
== NULL
) {
1782 usc_tmp
= malloc(sizeof(struct internal_sc
), M_DEVBUF
,
1784 if (usc_tmp
== NULL
)
1788 cur_time
= read_machclk();
1792 if (rsc
->m1
== 0 && rsc
->m2
== 0) {
1793 if (cl
->cl_rsc
!= NULL
) {
1794 if (!qempty(cl
->cl_q
))
1796 free(cl
->cl_rsc
, M_DEVBUF
);
1800 if (cl
->cl_rsc
== NULL
)
1801 cl
->cl_rsc
= rsc_tmp
;
1802 sc2isc(rsc
, cl
->cl_rsc
);
1803 rtsc_init(&cl
->cl_deadline
, cl
->cl_rsc
, cur_time
,
1805 cl
->cl_eligible
= cl
->cl_deadline
;
1806 if (cl
->cl_rsc
->sm1
<= cl
->cl_rsc
->sm2
) {
1807 cl
->cl_eligible
.dx
= 0;
1808 cl
->cl_eligible
.dy
= 0;
1814 if (fsc
->m1
== 0 && fsc
->m2
== 0) {
1815 if (cl
->cl_fsc
!= NULL
) {
1816 if (!qempty(cl
->cl_q
))
1818 free(cl
->cl_fsc
, M_DEVBUF
);
1822 if (cl
->cl_fsc
== NULL
)
1823 cl
->cl_fsc
= fsc_tmp
;
1824 sc2isc(fsc
, cl
->cl_fsc
);
1825 rtsc_init(&cl
->cl_virtual
, cl
->cl_fsc
, cl
->cl_vt
,
1831 if (usc
->m1
== 0 && usc
->m2
== 0) {
1832 if (cl
->cl_usc
!= NULL
) {
1833 free(cl
->cl_usc
, M_DEVBUF
);
1838 if (cl
->cl_usc
== NULL
)
1839 cl
->cl_usc
= usc_tmp
;
1840 sc2isc(usc
, cl
->cl_usc
);
1841 rtsc_init(&cl
->cl_ulimit
, cl
->cl_usc
, cur_time
,
1846 if (!qempty(cl
->cl_q
)) {
1847 if (cl
->cl_rsc
!= NULL
)
1848 update_ed(cl
, m_pktlen(qhead(cl
->cl_q
)));
1849 if (cl
->cl_fsc
!= NULL
)
1850 update_vf(cl
, 0, cur_time
);
1851 /* is this enough? */
1860 * hfsc device interface
1863 hfscopen(dev_t dev
, int flag
, int fmt
,
1866 if (machclk_freq
== 0)
1869 if (machclk_freq
== 0) {
1870 printf("hfsc: no CPU clock available!\n");
1874 /* everything will be done when the queueing scheme is attached. */
1879 hfscclose(dev_t dev
, int flag
, int fmt
,
1882 struct hfsc_if
*hif
;
1885 while ((hif
= hif_list
) != NULL
) {
1887 if (ALTQ_IS_ENABLED(hif
->hif_ifq
))
1888 altq_disable(hif
->hif_ifq
);
1890 err
= altq_detach(hif
->hif_ifq
);
1892 err
= hfsc_detach(hif
);
1893 if (err
!= 0 && error
== 0)
1901 hfscioctl(dev_t dev
, ioctlcmd_t cmd
, void *addr
, int flag
,
1904 struct hfsc_if
*hif
;
1905 struct hfsc_interface
*ifacep
;
1908 /* check super-user privilege */
1913 #if (__FreeBSD_version > 400000)
1914 if ((error
= suser(p
)) != 0)
1917 if ((error
= kauth_authorize_network(l
->l_cred
,
1918 KAUTH_NETWORK_ALTQ
, KAUTH_REQ_NETWORK_ALTQ_HFSC
, NULL
,
1927 case HFSC_IF_ATTACH
:
1928 error
= hfsccmd_if_attach((struct hfsc_attach
*)addr
);
1931 case HFSC_IF_DETACH
:
1932 error
= hfsccmd_if_detach((struct hfsc_interface
*)addr
);
1937 case HFSC_CLEAR_HIERARCHY
:
1938 ifacep
= (struct hfsc_interface
*)addr
;
1939 if ((hif
= altq_lookup(ifacep
->hfsc_ifname
,
1940 ALTQT_HFSC
)) == NULL
) {
1948 if (hif
->hif_defaultclass
== NULL
) {
1950 printf("hfsc: no default class\n");
1955 error
= altq_enable(hif
->hif_ifq
);
1959 error
= altq_disable(hif
->hif_ifq
);
1962 case HFSC_CLEAR_HIERARCHY
:
1963 hfsc_clear_interface(hif
);
1968 case HFSC_ADD_CLASS
:
1969 error
= hfsccmd_add_class((struct hfsc_add_class
*)addr
);
1972 case HFSC_DEL_CLASS
:
1973 error
= hfsccmd_delete_class((struct hfsc_delete_class
*)addr
);
1976 case HFSC_MOD_CLASS
:
1977 error
= hfsccmd_modify_class((struct hfsc_modify_class
*)addr
);
1980 case HFSC_ADD_FILTER
:
1981 error
= hfsccmd_add_filter((struct hfsc_add_filter
*)addr
);
1984 case HFSC_DEL_FILTER
:
1985 error
= hfsccmd_delete_filter((struct hfsc_delete_filter
*)addr
);
1989 error
= hfsccmd_class_stats((struct hfsc_class_stats
*)addr
);
2000 hfsccmd_if_attach(struct hfsc_attach
*ap
)
2002 struct hfsc_if
*hif
;
2006 if ((ifp
= ifunit(ap
->iface
.hfsc_ifname
)) == NULL
)
2009 if ((hif
= hfsc_attach(&ifp
->if_snd
, ap
->bandwidth
)) == NULL
)
2013 * set HFSC to this ifnet structure.
2015 if ((error
= altq_attach(&ifp
->if_snd
, ALTQT_HFSC
, hif
,
2016 hfsc_enqueue
, hfsc_dequeue
, hfsc_request
,
2017 &hif
->hif_classifier
, acc_classify
)) != 0)
2018 (void)hfsc_detach(hif
);
2024 hfsccmd_if_detach(struct hfsc_interface
*ap
)
2026 struct hfsc_if
*hif
;
2029 if ((hif
= altq_lookup(ap
->hfsc_ifname
, ALTQT_HFSC
)) == NULL
)
2032 if (ALTQ_IS_ENABLED(hif
->hif_ifq
))
2033 altq_disable(hif
->hif_ifq
);
2035 if ((error
= altq_detach(hif
->hif_ifq
)))
2038 return hfsc_detach(hif
);
2042 hfsccmd_add_class(struct hfsc_add_class
*ap
)
2044 struct hfsc_if
*hif
;
2045 struct hfsc_class
*cl
, *parent
;
2048 if ((hif
= altq_lookup(ap
->iface
.hfsc_ifname
, ALTQT_HFSC
)) == NULL
)
2051 if (ap
->parent_handle
== HFSC_NULLCLASS_HANDLE
&&
2052 hif
->hif_rootclass
== NULL
)
2054 else if ((parent
= clh_to_clp(hif
, ap
->parent_handle
)) == NULL
)
2057 /* assign a class handle (use a free slot number for now) */
2058 for (i
= 1; i
< HFSC_MAX_CLASSES
; i
++)
2059 if (hif
->hif_class_tbl
[i
] == NULL
)
2061 if (i
== HFSC_MAX_CLASSES
)
2064 if ((cl
= hfsc_class_create(hif
, &ap
->service_curve
, NULL
, NULL
,
2065 parent
, ap
->qlimit
, ap
->flags
, i
)) == NULL
)
2068 /* return a class handle to the user */
2069 ap
->class_handle
= i
;
2075 hfsccmd_delete_class(struct hfsc_delete_class
*ap
)
2077 struct hfsc_if
*hif
;
2078 struct hfsc_class
*cl
;
2080 if ((hif
= altq_lookup(ap
->iface
.hfsc_ifname
, ALTQT_HFSC
)) == NULL
)
2083 if ((cl
= clh_to_clp(hif
, ap
->class_handle
)) == NULL
)
2086 return hfsc_class_destroy(cl
);
2090 hfsccmd_modify_class(struct hfsc_modify_class
*ap
)
2092 struct hfsc_if
*hif
;
2093 struct hfsc_class
*cl
;
2094 struct service_curve
*rsc
= NULL
;
2095 struct service_curve
*fsc
= NULL
;
2096 struct service_curve
*usc
= NULL
;
2098 if ((hif
= altq_lookup(ap
->iface
.hfsc_ifname
, ALTQT_HFSC
)) == NULL
)
2101 if ((cl
= clh_to_clp(hif
, ap
->class_handle
)) == NULL
)
2104 if (ap
->sctype
& HFSC_REALTIMESC
)
2105 rsc
= &ap
->service_curve
;
2106 if (ap
->sctype
& HFSC_LINKSHARINGSC
)
2107 fsc
= &ap
->service_curve
;
2108 if (ap
->sctype
& HFSC_UPPERLIMITSC
)
2109 usc
= &ap
->service_curve
;
2111 return hfsc_class_modify(cl
, rsc
, fsc
, usc
);
2115 hfsccmd_add_filter(struct hfsc_add_filter
*ap
)
2117 struct hfsc_if
*hif
;
2118 struct hfsc_class
*cl
;
2120 if ((hif
= altq_lookup(ap
->iface
.hfsc_ifname
, ALTQT_HFSC
)) == NULL
)
2123 if ((cl
= clh_to_clp(hif
, ap
->class_handle
)) == NULL
)
2126 if (is_a_parent_class(cl
)) {
2128 printf("hfsccmd_add_filter: not a leaf class!\n");
2133 return acc_add_filter(&hif
->hif_classifier
, &ap
->filter
,
2134 cl
, &ap
->filter_handle
);
2138 hfsccmd_delete_filter(struct hfsc_delete_filter
*ap
)
2140 struct hfsc_if
*hif
;
2142 if ((hif
= altq_lookup(ap
->iface
.hfsc_ifname
, ALTQT_HFSC
)) == NULL
)
2145 return acc_delete_filter(&hif
->hif_classifier
,
2150 hfsccmd_class_stats(struct hfsc_class_stats
*ap
)
2152 struct hfsc_if
*hif
;
2153 struct hfsc_class
*cl
;
2154 struct hfsc_classstats stats
, *usp
;
2155 int n
, nclasses
, error
;
2157 if ((hif
= altq_lookup(ap
->iface
.hfsc_ifname
, ALTQT_HFSC
)) == NULL
)
2160 ap
->cur_time
= read_machclk();
2161 ap
->machclk_freq
= machclk_freq
;
2162 ap
->hif_classes
= hif
->hif_classes
;
2163 ap
->hif_packets
= hif
->hif_packets
;
2165 /* skip the first N classes in the tree */
2166 nclasses
= ap
->nskip
;
2167 for (cl
= hif
->hif_rootclass
, n
= 0; cl
!= NULL
&& n
< nclasses
;
2168 cl
= hfsc_nextclass(cl
), n
++)
2173 /* then, read the next N classes in the tree */
2174 nclasses
= ap
->nclasses
;
2176 for (n
= 0; cl
!= NULL
&& n
< nclasses
; cl
= hfsc_nextclass(cl
), n
++) {
2178 get_class_stats(&stats
, cl
);
2180 if ((error
= copyout((void *)&stats
, (void *)usp
++,
2181 sizeof(stats
))) != 0)
2192 static struct altqsw hfsc_sw
=
2193 {"hfsc", hfscopen
, hfscclose
, hfscioctl
};
2195 ALTQ_MODULE(altq_hfsc
, ALTQT_HFSC
, &hfsc_sw
);
2196 MODULE_DEPEND(altq_hfsc
, altq_red
, 1, 1, 1);
2197 MODULE_DEPEND(altq_hfsc
, altq_rio
, 1, 1, 1);
2199 #endif /* KLD_MODULE */
2200 #endif /* ALTQ3_COMPAT */
2202 #endif /* ALTQ_HFSC */