1 /* $NetBSD: altq_jobs.c,v 1.4 2006/11/16 01:32:37 christos Exp $ */
2 /* $KAME: altq_jobs.c,v 1.11 2005/04/13 03:44:25 suz Exp $ */
4 * Copyright (c) 2001, the Rector and Board of Visitors of the
5 * University of Virginia.
8 * Redistribution and use in source and binary forms,
9 * with or without modification, are permitted provided
10 * that the following conditions are met:
12 * Redistributions of source code must retain the above
13 * copyright notice, this list of conditions and the following
16 * Redistributions in binary form must reproduce the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer in the documentation and/or other materials provided
19 * with the distribution.
21 * Neither the name of the University of Virginia nor the names
22 * of its contributors may be used to endorse or promote products
23 * derived from this software without specific prior written
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
27 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
28 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
32 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
37 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
38 * THE POSSIBILITY OF SUCH DAMAGE.
41 * JoBS - altq prototype implementation
43 * Author: Nicolas Christin <nicolas@cs.virginia.edu>
45 * JoBS algorithms originally devised and proposed by
46 * Nicolas Christin and Jorg Liebeherr.
47 * Grateful acknowledgments to Tarek Abdelzaher for his help and
48 * comments, and to Kenjiro Cho for some helpful advice.
49 * Contributed by the Multimedia Networks Group at the University
52 * Papers and additional info can be found at
53 * http://qosbox.cs.virginia.edu
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: altq_jobs.c,v 1.4 2006/11/16 01:32:37 christos Exp $");
69 #ifdef ALTQ_JOBS /* jobs is enabled by ALTQ_JOBS option in opt_altq.h */
71 #include <sys/param.h>
72 #include <sys/malloc.h>
74 #include <sys/socket.h>
75 #include <sys/sockio.h>
76 #include <sys/systm.h>
78 #include <sys/errno.h>
79 #include <sys/kernel.h>
80 #include <sys/queue.h>
81 #include <sys/kauth.h>
84 #include <sys/limits.h>
88 #include <net/if_types.h>
90 #include <altq/altq.h>
91 #include <altq/altq_conf.h>
92 #include <altq/altq_jobs.h>
98 static struct jobs_if
*jobs_attach(struct ifaltq
*, u_int
, u_int
, u_int
);
99 static int jobs_detach(struct jobs_if
*);
100 static int jobs_clear_interface(struct jobs_if
*);
101 static int jobs_request(struct ifaltq
*, int, void *);
102 static void jobs_purge(struct jobs_if
*);
103 static struct jobs_class
*jobs_class_create(struct jobs_if
*,
104 int, int64_t, int64_t, int64_t, int64_t, int64_t, int);
105 static int jobs_class_destroy(struct jobs_class
*);
106 static int jobs_enqueue(struct ifaltq
*, struct mbuf
*, struct altq_pktattr
*);
107 static struct mbuf
*jobs_dequeue(struct ifaltq
*, int);
109 static int jobs_addq(struct jobs_class
*, struct mbuf
*, struct jobs_if
*);
110 static struct mbuf
*jobs_getq(struct jobs_class
*);
111 static struct mbuf
*jobs_pollq(struct jobs_class
*);
112 static void jobs_purgeq(struct jobs_class
*);
114 static int jobscmd_if_attach(struct jobs_attach
*);
115 static int jobscmd_if_detach(struct jobs_interface
*);
116 static int jobscmd_add_class(struct jobs_add_class
*);
117 static int jobscmd_delete_class(struct jobs_delete_class
*);
118 static int jobscmd_modify_class(struct jobs_modify_class
*);
119 static int jobscmd_add_filter(struct jobs_add_filter
*);
120 static int jobscmd_delete_filter(struct jobs_delete_filter
*);
121 static int jobscmd_class_stats(struct jobs_class_stats
*);
122 static void get_class_stats(struct class_stats
*, struct jobs_class
*);
123 static struct jobs_class
*clh_to_clp(struct jobs_if
*, u_long
);
124 static u_long
clp_to_clh(struct jobs_class
*);
126 static TSLIST
*tslist_alloc(void);
127 static void tslist_destroy(struct jobs_class
*);
128 static int tslist_enqueue(struct jobs_class
*, u_int64_t
);
129 static void tslist_dequeue(struct jobs_class
*);
130 static void tslist_drop(struct jobs_class
*);
132 static int enforce_wc(struct jobs_if
*);
133 static int64_t* adjust_rates_rdc(struct jobs_if
*);
134 static int64_t* assign_rate_drops_adc(struct jobs_if
*);
135 static int64_t* update_error(struct jobs_if
*);
136 static int min_rates_adc(struct jobs_if
*);
137 static int64_t proj_delay(struct jobs_if
*, int);
138 static int pick_dropped_rlc(struct jobs_if
*);
142 /* jif_list keeps all jobs_if's allocated. */
143 static struct jobs_if
*jif_list
= NULL
;
145 typedef unsigned long long ull
;
147 /* setup functions */
149 static struct jobs_if
*
150 jobs_attach(struct ifaltq
*ifq
, u_int bandwidth
, u_int qlimit
, u_int separate
)
154 jif
= malloc(sizeof(struct jobs_if
), M_DEVBUF
, M_WAITOK
|M_ZERO
);
158 jif
->jif_bandwidth
= bandwidth
;
159 jif
->jif_qlimit
= qlimit
;
160 jif
->jif_separate
= separate
;
162 printf("JoBS bandwidth = %d bps\n", (int)bandwidth
);
163 printf("JoBS buffer size = %d pkts [%s]\n",
164 (int)qlimit
, separate
?"separate buffers":"shared buffer");
166 jif
->jif_maxpri
= -1;
169 jif
->wc_cycles_enqueue
= 0;
170 jif
->avg_cycles_enqueue
= 0;
171 jif
->avg_cycles2_enqueue
= 0;
172 jif
->bc_cycles_enqueue
= INFINITY
;
173 jif
->wc_cycles_dequeue
= 0;
174 jif
->avg_cycles_dequeue
= 0;
175 jif
->avg_cycles2_dequeue
= 0;
176 jif
->bc_cycles_dequeue
= INFINITY
;
177 jif
->total_enqueued
= 0;
178 jif
->total_dequeued
= 0;
180 /* add this state to the jobs list */
181 jif
->jif_next
= jif_list
;
188 jobs_detach(struct jobs_if
*jif
)
190 (void)jobs_clear_interface(jif
);
192 /* remove this interface from the jif list */
194 jif_list
= jif
->jif_next
;
198 for (p
= jif_list
; p
!= NULL
; p
= p
->jif_next
)
199 if (p
->jif_next
== jif
) {
200 p
->jif_next
= jif
->jif_next
;
210 * bring the interface back to the initial state by discarding
211 * all the filters and classes.
214 jobs_clear_interface(struct jobs_if
*jif
)
216 struct jobs_class
*cl
;
219 /* free the filters for this interface */
220 acc_discard_filters(&jif
->jif_classifier
, NULL
, 1);
222 /* clear out the classes */
223 for (pri
= 0; pri
<= jif
->jif_maxpri
; pri
++)
224 if ((cl
= jif
->jif_classes
[pri
]) != NULL
)
225 jobs_class_destroy(cl
);
231 jobs_request(struct ifaltq
*ifq
, int req
, void *arg
)
233 struct jobs_if
*jif
= (struct jobs_if
*)ifq
->altq_disc
;
243 /* discard all the queued packets on the interface */
245 jobs_purge(struct jobs_if
*jif
)
247 struct jobs_class
*cl
;
250 for (pri
= 0; pri
<= jif
->jif_maxpri
; pri
++) {
251 if ((cl
= jif
->jif_classes
[pri
]) != NULL
&& !qempty(cl
->cl_q
))
254 if (ALTQ_IS_ENABLED(jif
->jif_ifq
))
255 jif
->jif_ifq
->ifq_len
= 0;
258 static struct jobs_class
*
259 jobs_class_create(struct jobs_if
*jif
, int pri
, int64_t adc
, int64_t rdc
,
260 int64_t alc
, int64_t rlc
, int64_t arc
, int flags
)
262 struct jobs_class
*cl
, *scan1
, *scan2
;
264 int class_exists1
, class_exists2
;
266 int64_t tmp
[JOBS_MAXPRI
];
269 if ((cl
= jif
->jif_classes
[pri
]) != NULL
) {
270 /* modify the class instead of creating a new one */
272 if (!qempty(cl
->cl_q
))
276 cl
= malloc(sizeof(struct jobs_class
), M_DEVBUF
,
281 cl
->cl_q
= malloc(sizeof(class_queue_t
), M_DEVBUF
,
283 if (cl
->cl_q
== NULL
)
286 cl
->arv_tm
= tslist_alloc();
287 if (cl
->arv_tm
== NULL
)
291 jif
->jif_classes
[pri
] = cl
;
293 if (flags
& JOCF_DEFAULTCLASS
)
294 jif
->jif_default
= cl
;
296 qtype(cl
->cl_q
) = Q_DROPTAIL
;
298 cl
->service_rate
= 0;
299 cl
->min_rate_adc
= 0;
300 cl
->current_loss
= 0;
302 PKTCNTR_RESET(&cl
->cl_arrival
);
303 PKTCNTR_RESET(&cl
->cl_rin
);
304 PKTCNTR_RESET(&cl
->cl_rout
);
305 PKTCNTR_RESET(&cl
->cl_rout_th
);
306 PKTCNTR_RESET(&cl
->cl_dropcnt
);
307 PKTCNTR_RESET(&cl
->st_arrival
);
308 PKTCNTR_RESET(&cl
->st_rin
);
309 PKTCNTR_RESET(&cl
->st_rout
);
310 PKTCNTR_RESET(&cl
->st_dropcnt
);
311 cl
->st_service_rate
= 0;
314 cl
->adc_violations
= 0;
317 cl
->concerned_adc
= 0;
320 cl
->concerned_adc
= 1;
323 cl
->concerned_alc
= 0;
326 cl
->concerned_alc
= 1;
330 cl
->concerned_rdc
= 0;
332 cl
->concerned_rdc
= 1;
336 cl
->concerned_rlc
= 0;
338 cl
->concerned_rlc
= 1;
342 cl
->concerned_arc
= 0;
344 cl
->concerned_arc
= 1;
348 if (cl
->concerned_adc
) {
349 /* adc is given in us, convert it to clock ticks */
350 cl
->cl_adc
= (u_int64_t
)(adc
*machclk_freq
/GRANULARITY
);
354 if (cl
->concerned_arc
) {
355 /* arc is given in bps, convert it to internal unit */
356 cl
->cl_arc
= (u_int64_t
)(bps_to_internal(arc
));
362 cl
->delay_prod_others
= 0;
363 cl
->loss_prod_others
= 0;
364 cl
->cl_flags
= flags
;
366 if (pri
> jif
->jif_maxpri
)
367 jif
->jif_maxpri
= pri
;
369 cl
->cl_handle
= (u_long
)cl
; /* just a pointer to this class */
372 * update delay_prod_others and loss_prod_others
373 * in all classes if needed
376 if (cl
->concerned_rdc
) {
377 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
378 scan1
= jif
->jif_classes
[i
];
379 class_exists1
= (scan1
!= NULL
);
382 for (j
= 0; j
<= i
-1; j
++) {
383 scan2
= jif
->jif_classes
[j
];
384 class_exists2
= (scan2
!= NULL
);
386 && scan2
->concerned_rdc
)
387 tmp
[i
] *= scan2
->cl_rdc
;
393 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
394 scan1
= jif
->jif_classes
[i
];
395 class_exists1
= (scan1
!= NULL
);
397 scan1
->delay_prod_others
= 1;
398 for (j
= 0; j
<= jif
->jif_maxpri
; j
++) {
399 scan2
= jif
->jif_classes
[j
];
400 class_exists2
= (scan2
!= NULL
);
401 if (class_exists2
&& j
!= i
402 && scan2
->concerned_rdc
)
403 scan1
->delay_prod_others
*= tmp
[j
];
409 if (cl
->concerned_rlc
) {
410 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
411 scan1
= jif
->jif_classes
[i
];
412 class_exists1
= (scan1
!= NULL
);
415 for (j
= 0; j
<= i
-1; j
++) {
416 scan2
= jif
->jif_classes
[j
];
417 class_exists2
= (scan2
!= NULL
);
419 && scan2
->concerned_rlc
)
420 tmp
[i
] *= scan2
->cl_rlc
;
426 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
427 scan1
= jif
->jif_classes
[i
];
428 class_exists1
= (scan1
!= NULL
);
430 scan1
->loss_prod_others
= 1;
431 for (j
= 0; j
<= jif
->jif_maxpri
; j
++) {
432 scan2
= jif
->jif_classes
[j
];
433 class_exists2
= (scan2
!= NULL
);
434 if (class_exists2
&& j
!= i
435 && scan2
->concerned_rlc
)
436 scan1
->loss_prod_others
*= tmp
[j
];
442 now
= read_machclk();
447 if (cl
->cl_q
!= NULL
)
448 free(cl
->cl_q
, M_DEVBUF
);
449 if (cl
->arv_tm
!= NULL
)
450 free(cl
->arv_tm
, M_DEVBUF
);
457 jobs_class_destroy(struct jobs_class
*cl
)
464 /* delete filters referencing to this class */
465 acc_discard_filters(&cl
->cl_jif
->jif_classifier
, cl
, 0);
467 if (!qempty(cl
->cl_q
))
471 jif
->jif_classes
[cl
->cl_pri
] = NULL
;
472 if (jif
->jif_maxpri
== cl
->cl_pri
) {
473 for (pri
= cl
->cl_pri
; pri
>= 0; pri
--)
474 if (jif
->jif_classes
[pri
] != NULL
) {
475 jif
->jif_maxpri
= pri
;
479 jif
->jif_maxpri
= -1;
484 free(cl
->cl_q
, M_DEVBUF
);
490 * jobs_enqueue is an enqueue function to be registered to
491 * (*altq_enqueue) in struct ifaltq.
494 jobs_enqueue(struct ifaltq
*ifq
, struct mbuf
*m
, struct altq_pktattr
*pktattr
)
496 struct jobs_if
*jif
= (struct jobs_if
*)ifq
->altq_disc
;
497 struct jobs_class
*cl
, *scan
;
504 u_int64_t tstamp1
, tstamp2
, cycles
; /* used for benchmarking only */
506 jif
->total_enqueued
++;
507 now
= read_machclk();
512 /* proceed with packet enqueuing */
514 if (IFQ_IS_EMPTY(ifq
)) {
515 for (pri
=0; pri
<= jif
->jif_maxpri
; pri
++) {
516 scan
= jif
->jif_classes
[pri
];
519 * reset all quantities, except:
520 * average delay, number of violations
522 PKTCNTR_RESET(&scan
->cl_rin
);
523 PKTCNTR_RESET(&scan
->cl_rout
);
524 PKTCNTR_RESET(&scan
->cl_rout_th
);
525 PKTCNTR_RESET(&scan
->cl_arrival
);
526 PKTCNTR_RESET(&scan
->cl_dropcnt
);
527 scan
->cl_lastdel
= 0;
528 scan
->current_loss
= 0;
529 scan
->service_rate
= 0;
530 scan
->idletime
= now
;
531 scan
->cl_last_rate_update
= now
;
536 /* grab class set by classifier */
537 if (pktattr
== NULL
|| (cl
= pktattr
->pattr_class
) == NULL
)
538 cl
= jif
->jif_default
;
541 old_arv
= cl
->cl_arrival
.bytes
;
542 PKTCNTR_ADD(&cl
->cl_arrival
, (int)len
);
543 PKTCNTR_ADD(&cl
->cl_rin
, (int)len
);
544 PKTCNTR_ADD(&cl
->st_arrival
, (int)len
);
545 PKTCNTR_ADD(&cl
->st_rin
, (int)len
);
547 if (cl
->cl_arrival
.bytes
< old_arv
) {
548 /* deals w/ overflow */
549 for (pri
=0; pri
<= jif
->jif_maxpri
; pri
++) {
550 scan
= jif
->jif_classes
[pri
];
553 * reset all quantities, except:
554 * average delay, number of violations
556 PKTCNTR_RESET(&scan
->cl_rin
);
557 PKTCNTR_RESET(&scan
->cl_rout
);
558 PKTCNTR_RESET(&scan
->cl_rout_th
);
559 PKTCNTR_RESET(&scan
->cl_arrival
);
560 PKTCNTR_RESET(&scan
->cl_dropcnt
);
561 scan
->current_loss
= 0;
562 scan
->service_rate
= 0;
563 scan
->idletime
= now
;
564 scan
->cl_last_rate_update
= now
;
567 PKTCNTR_ADD(&cl
->cl_arrival
, (int)len
);
568 PKTCNTR_ADD(&cl
->cl_rin
, (int)len
);
571 if (cl
->cl_arrival
.bytes
> cl
->cl_rin
.bytes
)
573 ((cl
->cl_arrival
.bytes
- cl
->cl_rin
.bytes
) << SCALE_LOSS
)
574 / cl
->cl_arrival
.bytes
;
576 cl
->current_loss
= 0;
578 /* for MDRR: update theoretical value of the output curve */
580 for (pri
=0; pri
<= jif
->jif_maxpri
; pri
++) {
581 scan
= jif
->jif_classes
[pri
];
583 if (scan
->cl_last_rate_update
== scan
->idletime
584 || scan
->cl_last_rate_update
== 0)
585 scan
->cl_last_rate_update
= now
; /* initial case */
587 scan
->cl_rout_th
.bytes
+=
588 delay_diff(now
, scan
->cl_last_rate_update
)
589 * scan
->service_rate
;
592 * we don't really care about packets here
593 * WARNING: rout_th is SCALED
594 * (b/c of the service rate)
595 * for precision, as opposed to rout.
598 scan
->cl_last_rate_update
= now
;
602 if (jobs_addq(cl
, m
, jif
) != 0)
603 return_flag
= ENOBUFS
; /* signals there's a buffer overflow */
607 /* successfully queued. */
611 if (!min_rates_adc(jif
)) {
612 delta_rate
= assign_rate_drops_adc(jif
);
613 if (delta_rate
!= NULL
) {
614 for (pri
= 0; pri
<= jif
->jif_maxpri
; pri
++)
615 if ((cl
= jif
->jif_classes
[pri
]) != NULL
&&
617 cl
->service_rate
+= delta_rate
[pri
];
618 free(delta_rate
, M_DEVBUF
);
622 delta_rate
= adjust_rates_rdc(jif
);
624 if (delta_rate
!= NULL
) {
625 for (pri
= 0; pri
<= jif
->jif_maxpri
; pri
++)
626 if ((cl
= jif
->jif_classes
[pri
]) != NULL
&&
628 cl
->service_rate
+= delta_rate
[pri
];
629 free(delta_rate
, M_DEVBUF
);
632 tstamp2
= read_machclk();
633 cycles
= delay_diff(tstamp2
, tstamp1
);
634 if (cycles
> jif
->wc_cycles_enqueue
)
635 jif
->wc_cycles_enqueue
=cycles
;
636 if (cycles
< jif
->bc_cycles_enqueue
)
637 jif
->bc_cycles_enqueue
=cycles
;
639 jif
->avg_cycles_enqueue
+= cycles
;
640 jif
->avg_cycles2_enqueue
+= cycles
* cycles
;
642 return (return_flag
);
646 * jobs_dequeue is a dequeue function to be registered to
647 * (*altq_dequeue) in struct ifaltq.
649 * note: ALTDQ_POLL returns the next packet without removing the packet
650 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
651 * ALTDQ_REMOVE must return the same packet if called immediately
656 jobs_dequeue(struct ifaltq
*ifq
, int op
)
658 struct jobs_if
*jif
= (struct jobs_if
*)ifq
->altq_disc
;
659 struct jobs_class
*cl
;
666 u_int64_t tstamp1
, tstamp2
, cycles
;
668 jif
->total_dequeued
++;
670 now
= read_machclk();
673 if (IFQ_IS_EMPTY(ifq
)) {
674 /* no packet in the queue */
675 for (pri
=0; pri
<= jif
->jif_maxpri
; pri
++) {
676 cl
= jif
->jif_classes
[pri
];
681 tstamp2
= read_machclk();
682 cycles
= delay_diff(tstamp2
, tstamp1
);
683 if (cycles
> jif
->wc_cycles_dequeue
)
684 jif
->wc_cycles_dequeue
= cycles
;
685 if (cycles
< jif
->bc_cycles_dequeue
)
686 jif
->bc_cycles_dequeue
= cycles
;
688 jif
->avg_cycles_dequeue
+= cycles
;
689 jif
->avg_cycles2_dequeue
+= cycles
* cycles
;
695 * select the class whose actual tranmissions are the furthest
696 * from the promised transmissions
702 for (pri
=0; pri
<= jif
->jif_maxpri
; pri
++) {
703 if (((cl
= jif
->jif_classes
[pri
]) != NULL
)
704 && !qempty(cl
->cl_q
)) {
705 error
= (int64_t)cl
->cl_rout_th
.bytes
706 -(int64_t)scale_rate(cl
->cl_rout
.bytes
);
707 if (max_error
== -1) {
710 } else if (error
> max_error
) {
718 cl
= jif
->jif_classes
[svc_class
];
722 if (op
== ALTDQ_POLL
) {
723 tstamp2
= read_machclk();
724 cycles
= delay_diff(tstamp2
, tstamp1
);
725 if (cycles
> jif
->wc_cycles_dequeue
)
726 jif
->wc_cycles_dequeue
= cycles
;
727 if (cycles
< jif
->bc_cycles_dequeue
)
728 jif
->bc_cycles_dequeue
= cycles
;
730 jif
->avg_cycles_dequeue
+= cycles
;
731 jif
->avg_cycles2_dequeue
+= cycles
* cycles
;
733 return (jobs_pollq(cl
));
743 if (qempty(cl
->cl_q
))
746 cl
->cl_lastdel
= (u_int64_t
)delay_diff(now
,
747 tslist_first(cl
->arv_tm
)->timestamp
);
748 if (cl
->concerned_adc
749 && (int64_t)cl
->cl_lastdel
> cl
->cl_adc
)
750 cl
->adc_violations
++;
751 cl
->cl_avgdel
+= ticks_to_secs(GRANULARITY
*cl
->cl_lastdel
);
753 PKTCNTR_ADD(&cl
->cl_rout
, m_pktlen(m
));
754 PKTCNTR_ADD(&cl
->st_rout
, m_pktlen(m
));
757 tslist_dequeue(cl
); /* dequeue the timestamp */
759 tstamp2
= read_machclk();
760 cycles
= delay_diff(tstamp2
, tstamp1
);
761 if (cycles
> jif
->wc_cycles_dequeue
)
762 jif
->wc_cycles_dequeue
= cycles
;
763 if (cycles
< jif
->bc_cycles_dequeue
)
764 jif
->bc_cycles_dequeue
= cycles
;
766 jif
->avg_cycles_dequeue
+= cycles
;
767 jif
->avg_cycles2_dequeue
+= cycles
* cycles
;
773 jobs_addq(struct jobs_class
*cl
, struct mbuf
*m
, struct jobs_if
*jif
)
778 struct jobs_class
* victim_class
;
784 now
= read_machclk();
786 if (jif
->jif_separate
&& qlen(cl
->cl_q
) >= jif
->jif_qlimit
) {
788 * separate buffers: no guarantees on packet drops
790 * thus we drop the incoming packet
792 len
= (u_int64_t
)m_pktlen(m
);
793 PKTCNTR_ADD(&cl
->cl_dropcnt
, (int)len
);
794 PKTCNTR_SUB(&cl
->cl_rin
, (int)len
);
795 PKTCNTR_ADD(&cl
->st_dropcnt
, (int)len
);
796 PKTCNTR_SUB(&cl
->st_rin
, (int)len
);
797 cl
->current_loss
+= (len
<< SCALE_LOSS
)
798 /cl
->cl_arrival
.bytes
;
802 } else if (!jif
->jif_separate
803 && jif
->jif_ifq
->ifq_len
>= jif
->jif_qlimit
) {
804 /* shared buffer: supports guarantees on losses */
805 if (!cl
->concerned_rlc
) {
806 if (!cl
->concerned_alc
) {
808 * no ALC, no RLC on this class:
809 * drop the incoming packet
811 len
= (u_int64_t
)m_pktlen(m
);
812 PKTCNTR_ADD(&cl
->cl_dropcnt
, (int)len
);
813 PKTCNTR_SUB(&cl
->cl_rin
, (int)len
);
814 PKTCNTR_ADD(&cl
->st_dropcnt
, (int)len
);
815 PKTCNTR_SUB(&cl
->st_rin
, (int)len
);
816 cl
->current_loss
+= (len
<< SCALE_LOSS
)/cl
->cl_arrival
.bytes
;
821 * no RLC, but an ALC:
822 * drop the incoming packet if possible
824 len
= (u_int64_t
)m_pktlen(m
);
825 if (cl
->current_loss
+ (len
<< SCALE_LOSS
)
826 / cl
->cl_arrival
.bytes
<= cl
->cl_alc
) {
827 PKTCNTR_ADD(&cl
->cl_dropcnt
, (int)len
);
828 PKTCNTR_SUB(&cl
->cl_rin
, (int)len
);
829 PKTCNTR_ADD(&cl
->st_dropcnt
, (int)len
);
830 PKTCNTR_SUB(&cl
->st_rin
, (int)len
);
831 cl
->current_loss
+= (len
<< SCALE_LOSS
)/cl
->cl_arrival
.bytes
;
836 * the ALC would be violated:
840 tslist_enqueue(cl
, now
);
842 victim
= pick_dropped_rlc(jif
);
846 * something went wrong
848 * the incoming packet,
854 victim_class
= jif
->jif_classes
[victim
];
856 if (victim_class
!= NULL
) {
862 m
= _getq_tail(victim_class
->cl_q
);
863 len
= (u_int64_t
)m_pktlen(m
);
864 PKTCNTR_ADD(&victim_class
->cl_dropcnt
, (int)len
);
865 PKTCNTR_SUB(&victim_class
->cl_rin
, (int)len
);
866 PKTCNTR_ADD(&victim_class
->st_dropcnt
, (int)len
);
867 PKTCNTR_SUB(&victim_class
->st_rin
, (int)len
);
868 victim_class
->current_loss
+= (len
<< SCALE_LOSS
)/victim_class
->cl_arrival
.bytes
;
869 m_freem(m
); /* the packet is trashed here */
870 tslist_drop(victim_class
); /* and its timestamp as well */
878 * pick class according to RLCs
881 tslist_enqueue(cl
, now
);
883 victim
= pick_dropped_rlc(jif
);
886 * something went wrong
887 * let us discard the incoming packet,
888 * regardless of what may happen...
892 victim_class
= jif
->jif_classes
[victim
];
894 if (victim_class
!= NULL
) {
896 * test for safety purposes...
899 m
= _getq_tail(victim_class
->cl_q
);
900 len
= (u_int64_t
)m_pktlen(m
);
901 PKTCNTR_ADD(&victim_class
->cl_dropcnt
, (int)len
);
902 PKTCNTR_SUB(&victim_class
->cl_rin
, (int)len
);
903 PKTCNTR_ADD(&victim_class
->st_dropcnt
, (int)len
);
904 PKTCNTR_SUB(&victim_class
->st_rin
, (int)len
);
905 victim_class
->current_loss
+= (len
<< SCALE_LOSS
)/victim_class
->cl_arrival
.bytes
;
906 m_freem(m
); /* the packet is trashed here */
907 tslist_drop(victim_class
); /* and its timestamp as well */
915 tslist_enqueue(cl
, now
);
921 jobs_getq(struct jobs_class
*cl
)
923 return _getq(cl
->cl_q
);
927 jobs_pollq(struct jobs_class
*cl
)
929 return qhead(cl
->cl_q
);
933 jobs_purgeq(struct jobs_class
*cl
)
937 if (qempty(cl
->cl_q
))
940 while ((m
= _getq(cl
->cl_q
)) != NULL
) {
941 PKTCNTR_ADD(&cl
->cl_dropcnt
, m_pktlen(m
));
942 PKTCNTR_ADD(&cl
->st_dropcnt
, m_pktlen(m
));
946 ASSERT(qlen(cl
->cl_q
) == 0);
950 * timestamp list support routines
952 * this implementation has been revamped and
953 * now uses a TAILQ structure.
954 * timestamp list holds class timestamps
955 * there is one timestamp list per class.
962 list_init
= malloc(sizeof(TSLIST
), M_DEVBUF
, M_WAITOK
);
963 TAILQ_INIT(list_init
);
968 tslist_destroy(struct jobs_class
*cl
)
970 while (tslist_first(cl
->arv_tm
) != NULL
)
973 free(cl
->arv_tm
, M_DEVBUF
);
977 tslist_enqueue(struct jobs_class
*cl
, u_int64_t arv
)
980 pushed
= malloc(sizeof(TSENTRY
), M_DEVBUF
, M_WAITOK
);
984 pushed
->timestamp
= arv
;
985 TAILQ_INSERT_TAIL(cl
->arv_tm
, pushed
, ts_list
);
990 tslist_dequeue(struct jobs_class
*cl
)
993 popped
= tslist_first(cl
->arv_tm
);
994 if (popped
!= NULL
) {
995 TAILQ_REMOVE(cl
->arv_tm
, popped
, ts_list
);
996 free(popped
, M_DEVBUF
);
1002 tslist_drop(struct jobs_class
*cl
)
1005 popped
= tslist_last(cl
->arv_tm
);
1006 if (popped
!= NULL
) {
1007 TAILQ_REMOVE(cl
->arv_tm
, popped
, ts_list
);
1008 free(popped
, M_DEVBUF
);
1014 * rate allocation support routines
1017 * enforce_wc: enforce that backlogged classes have non-zero
1018 * service rate, and that non-backlogged classes have zero
1023 enforce_wc(struct jobs_if
*jif
)
1025 struct jobs_class
*cl
;
1027 int64_t active_classes
;
1029 int is_backlogged
, class_exists
, updated
;
1034 for (pri
= 0; pri
<= jif
->jif_maxpri
; pri
++) {
1035 cl
= jif
->jif_classes
[pri
];
1036 class_exists
= (cl
!= NULL
);
1037 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1041 if ((is_backlogged
&& cl
->service_rate
<= 0)
1043 && !is_backlogged
&& cl
->service_rate
> 0))
1048 for (pri
= 0; pri
<= jif
->jif_maxpri
; pri
++) {
1049 cl
= jif
->jif_classes
[pri
];
1050 class_exists
= (cl
!= NULL
);
1051 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1053 if (class_exists
&& !is_backlogged
)
1054 cl
->service_rate
= 0;
1055 else if (is_backlogged
)
1056 cl
->service_rate
= (int64_t)(bps_to_internal((u_int64_t
)jif
->jif_bandwidth
)/active_classes
);
1064 * adjust_rates_rdc: compute the service rates adjustments
1065 * needed to realize the desired proportional delay differentiation.
1066 * essentially, the rate adjustement delta_rate = prop_control*error,
1067 * where error is the difference between the measured "weighted"
1068 * delay and the mean of the weighted delays. see paper for more
1070 * prop_control has slightly changed since the INFOCOM paper,
1071 * this condition seems to provide better results.
1075 adjust_rates_rdc(struct jobs_if
*jif
)
1078 int64_t credit
, available
, lower_bound
, upper_bound
;
1081 int rdc_classes
, active_classes
;
1082 int class_exists
, is_backlogged
;
1083 struct jobs_class
*cl
;
1085 int64_t prop_control
;
1087 u_int64_t min_share
;
1088 u_int64_t max_avg_pkt_size
;
1091 * min_share is scaled
1092 * to avoid dealing with doubles
1097 max_avg_pkt_size
= 0;
1099 upper_bound
= (int64_t)jif
->jif_bandwidth
;
1101 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1102 cl
= jif
->jif_classes
[i
];
1103 class_exists
= (cl
!= NULL
);
1104 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1105 if (is_backlogged
) {
1107 if (cl
->concerned_rdc
)
1111 internal_to_bps(cl
->service_rate
);
1115 result
= malloc((jif
->jif_maxpri
+1)*sizeof(int64_t),
1116 M_DEVBUF
, M_WAITOK
);
1121 for (i
= 0; i
<= jif
->jif_maxpri
; i
++)
1124 if (upper_bound
<= 0 || rdc_classes
== 0)
1129 min_share
= ((u_int64_t
)1 << SCALE_SHARE
);
1132 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1133 cl
= jif
->jif_classes
[i
];
1134 class_exists
= (cl
!= NULL
);
1135 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1136 if (is_backlogged
&& cl
->concerned_rdc
)
1137 bk
+= cl
->cl_rin
.bytes
;
1143 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1144 cl
= jif
->jif_classes
[i
];
1145 class_exists
= (cl
!= NULL
);
1146 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1148 && (cl
->cl_rin
.bytes
<< SCALE_SHARE
)/bk
< min_share
)
1149 min_share
= (cl
->cl_rin
.bytes
<< SCALE_SHARE
)/bk
;
1150 if (is_backlogged
&& cl
->concerned_rdc
1151 && cl
->delay_prod_others
> max_prod
)
1152 max_prod
= cl
->delay_prod_others
;
1154 if (is_backlogged
&& cl
->concerned_rdc
1155 && cl
->cl_rin
.bytes
> max_avg_pkt_size
*cl
->cl_rin
.packets
)
1156 max_avg_pkt_size
= (u_int64_t
)((u_int
)cl
->cl_rin
.bytes
/(u_int
)cl
->cl_rin
.packets
);
1159 error
= update_error(jif
);
1163 prop_control
= (upper_bound
*upper_bound
*min_share
)
1164 /(max_prod
*(max_avg_pkt_size
<< 2));
1166 prop_control
= bps_to_internal(ticks_to_secs(prop_control
)); /* in BT-1 */
1169 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1170 cl
= jif
->jif_classes
[i
];
1171 class_exists
= (cl
!= NULL
);
1172 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1173 if (is_backlogged
&& cl
->concerned_rdc
) {
1174 result
[i
] = -prop_control
*error
[i
]; /* in BT-1 */
1175 result
[i
] >>= (SCALE_SHARE
);
1179 free(error
, M_DEVBUF
); /* we don't need these anymore */
1183 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1184 cl
= jif
->jif_classes
[i
];
1185 class_exists
= (cl
!= NULL
);
1186 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1188 if (is_backlogged
&& cl
->concerned_rdc
)
1189 lower_bound
+= cl
->min_rate_adc
;
1191 * note: if there's no ADC or ARC on cl,
1192 * this is equal to zero, which is fine
1196 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1197 cl
= jif
->jif_classes
[i
];
1198 class_exists
= (cl
!= NULL
);
1199 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1201 if (is_backlogged
&& cl
->concerned_rdc
1202 && result
[i
] + cl
->service_rate
> upper_bound
) {
1203 for (j
= 0; j
<= jif
->jif_maxpri
; j
++) {
1204 cl
= jif
->jif_classes
[j
];
1205 class_exists
= (cl
!= NULL
);
1206 is_backlogged
= (class_exists
1207 && !qempty(cl
->cl_q
));
1208 if (is_backlogged
&& cl
->concerned_rdc
) {
1210 result
[j
] = upper_bound
1223 cl
= jif
->jif_classes
[i
];
1224 /* do this again since it may have been modified */
1225 class_exists
= (cl
!= NULL
);
1226 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1228 if (is_backlogged
&& cl
->concerned_rdc
1229 && result
[i
] + cl
->service_rate
< cl
->min_rate_adc
) {
1230 credit
+= cl
->service_rate
+result
[i
]
1232 /* "credit" is in fact a negative number */
1233 result
[i
] = -cl
->service_rate
+cl
->min_rate_adc
;
1237 for (i
= jif
->jif_maxpri
; (i
>= 0 && credit
< 0); i
--) {
1238 cl
= jif
->jif_classes
[i
];
1239 class_exists
= (cl
!= NULL
);
1240 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1242 if (is_backlogged
&& cl
->concerned_rdc
) {
1243 available
= result
[i
]
1244 + cl
->service_rate
-cl
->min_rate_adc
;
1245 if (available
>= -credit
) {
1246 result
[i
] += credit
;
1249 result
[i
] -= available
;
1250 credit
+= available
;
1258 * assign_rate_drops_adc: returns the adjustment needed to
1259 * the service rates to meet the absolute delay/rate constraints
1260 * (delay/throughput bounds) and drops traffic if need be.
1261 * see tech. report UVA/T.R. CS-2000-24/CS-2001-21 for more info.
1265 assign_rate_drops_adc(struct jobs_if
*jif
)
1268 int class_exists
, is_backlogged
;
1269 struct jobs_class
*cl
;
1274 int lowest
, highest
;
1277 u_int64_t now
, oldest_arv
;
1278 int64_t remaining_time
;
1282 now
= read_machclk();
1285 result
= malloc((jif
->jif_maxpri
+1)*sizeof(int64_t), M_DEVBUF
, M_WAITOK
);
1288 c
= malloc((jif
->jif_maxpri
+1)*sizeof(u_int64_t
), M_DEVBUF
, M_WAITOK
);
1291 n
= malloc((jif
->jif_maxpri
+1)*sizeof(u_int64_t
), M_DEVBUF
, M_WAITOK
);
1294 k
= malloc((jif
->jif_maxpri
+1)*sizeof(u_int64_t
), M_DEVBUF
, M_WAITOK
);
1297 available
= malloc((jif
->jif_maxpri
+1)*sizeof(int64_t), M_DEVBUF
, M_WAITOK
);
1298 if (available
== NULL
)
1301 for (i
= 0; i
<= jif
->jif_maxpri
; i
++)
1306 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1307 cl
= jif
->jif_classes
[i
];
1308 class_exists
= (cl
!= NULL
);
1309 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1311 if (is_backlogged
) {
1312 if (cl
->concerned_adc
) {
1314 * get the arrival time of the oldest
1317 if (tslist_first(cl
->arv_tm
) == NULL
)
1318 oldest_arv
= now
; /* NOTREACHED */
1320 oldest_arv
= (tslist_first(cl
->arv_tm
))->timestamp
;
1322 n
[i
] = cl
->service_rate
;
1323 k
[i
] = scale_rate((int64_t)(cl
->cl_rin
.bytes
- cl
->cl_rout
.bytes
));
1325 remaining_time
= cl
->cl_adc
1326 - (int64_t)delay_diff(now
, oldest_arv
);
1327 if (remaining_time
> 0) {
1328 c
[i
] = remaining_time
;
1330 * c is the remaining time before
1331 * the deadline is violated
1334 available
[i
] = n
[i
]-k
[i
]/c
[i
];
1337 * deadline has passed...
1338 * we allocate the whole link
1339 * capacity to hopefully
1343 available
[i
] = -((int64_t)bps_to_internal((u_int64_t
)jif
->jif_bandwidth
));
1345 if (cl
->concerned_arc
) {
1347 * there's an ARC in addition
1350 if (n
[i
] - cl
->cl_arc
< available
[i
])
1354 } else if (cl
->concerned_arc
) {
1356 * backlogged, concerned by ARC
1359 n
[i
] = cl
->service_rate
;
1360 available
[i
] = n
[i
] - cl
->cl_arc
;
1363 * backlogged but not concerned by ADC
1364 * or ARC -> can give everything
1366 n
[i
] = cl
->service_rate
;
1367 available
[i
] = n
[i
];
1370 /* not backlogged */
1375 available
[i
] = cl
->service_rate
;
1381 /* step 1: adjust rates (greedy algorithm) */
1384 lowest
= jif
->jif_maxpri
;
1386 while (highest
< jif
->jif_maxpri
+1 && available
[highest
] >= 0)
1387 highest
++; /* which is the highest class that needs more service? */
1388 while (lowest
> 0 && available
[lowest
] <= 0)
1389 lowest
--; /* which is the lowest class that needs less service? */
1391 while (highest
!= jif
->jif_maxpri
+1 && lowest
!= -1) {
1392 /* give the excess service from lowest to highest */
1393 if (available
[lowest
]+available
[highest
] > 0) {
1395 * still some "credit" left
1396 * give all that is needed by "highest"
1398 n
[lowest
] += available
[highest
];
1399 n
[highest
] -= available
[highest
];
1400 available
[lowest
] += available
[highest
];
1401 available
[highest
] = 0;
1403 while (highest
< jif
->jif_maxpri
+1
1404 && available
[highest
] >= 0)
1405 highest
++; /* which is the highest class that needs more service now? */
1407 } else if (available
[lowest
]+available
[highest
] == 0) {
1408 /* no more credit left but it's fine */
1409 n
[lowest
] += available
[highest
];
1410 n
[highest
] -= available
[highest
];
1411 available
[highest
] = 0;
1412 available
[lowest
] = 0;
1414 while (highest
< jif
->jif_maxpri
+1
1415 && available
[highest
] >= 0)
1416 highest
++; /* which is the highest class that needs more service? */
1417 while (lowest
>= 0 && available
[lowest
] <= 0)
1418 lowest
--; /* which is the lowest class that needs less service? */
1420 } else if (available
[lowest
]+available
[highest
] < 0) {
1422 * no more credit left and we need to switch
1425 n
[lowest
] -= available
[lowest
];
1426 n
[highest
] += available
[lowest
];
1427 available
[highest
] += available
[lowest
];
1428 available
[lowest
] = 0;
1430 while ((lowest
>= 0)&&(available
[lowest
] <= 0))
1431 lowest
--; /* which is the lowest class that needs less service? */
1435 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1436 cl
= jif
->jif_classes
[i
];
1437 class_exists
= (cl
!= NULL
);
1438 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1439 if (is_backlogged
) {
1440 result
[i
] = n
[i
] - cl
->service_rate
;
1443 result
[i
] = - cl
->service_rate
;
1449 /* step 2: adjust drops (for ADC) */
1451 if (highest
!= jif
->jif_maxpri
+1) {
1452 /* some class(es) still need(s) additional service */
1453 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1454 cl
= jif
->jif_classes
[i
];
1455 class_exists
= (cl
!= NULL
);
1456 is_backlogged
= (class_exists
1457 && !qempty(cl
->cl_q
));
1458 if (is_backlogged
&& available
[i
] < 0) {
1459 if (cl
->concerned_adc
) {
1461 while (keep_going
&& scale_rate((int64_t)(cl
->cl_rin
.bytes
-cl
->cl_rout
.bytes
)) > k
[i
]) {
1462 pkt
= qtail(cl
->cl_q
);
1464 /* "safeguard" test (a packet SHOULD be in there) */
1465 len
= (u_int64_t
)m_pktlen(pkt
);
1466 /* access packet at the tail */
1467 if (cl
->concerned_alc
1468 && cl
->current_loss
+(len
<< SCALE_LOSS
)/cl
->cl_arrival
.bytes
> cl
->cl_alc
) {
1469 keep_going
= 0; /* relax ADC in favor of ALC */
1471 /* drop packet at the tail of the class-i queue, update values */
1472 pkt
= _getq_tail(cl
->cl_q
);
1473 len
= (u_int64_t
)m_pktlen(pkt
);
1474 PKTCNTR_ADD(&cl
->cl_dropcnt
, (int)len
);
1475 PKTCNTR_SUB(&cl
->cl_rin
, (int)len
);
1476 PKTCNTR_ADD(&cl
->st_dropcnt
, (int)len
);
1477 PKTCNTR_SUB(&cl
->st_rin
, (int)len
);
1478 cl
->current_loss
+= (len
<< SCALE_LOSS
)/cl
->cl_arrival
.bytes
;
1479 m_freem(pkt
); /* the packet is trashed here */
1481 IFQ_DEC_LEN(cl
->cl_jif
->jif_ifq
);
1484 keep_going
= 0; /* NOTREACHED */
1486 k
[i
] = scale_rate((int64_t)(cl
->cl_rin
.bytes
-cl
->cl_rout
.bytes
));
1489 * n[i] is the max rate we can give.
1490 * the above drops as much as possible
1491 * to respect a delay bound.
1492 * for throughput bounds,
1493 * there's nothing that can be done
1494 * after the greedy reallocation.
1500 /* update the values of min_rate_adc */
1501 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1502 cl
= jif
->jif_classes
[i
];
1503 class_exists
= (cl
!= NULL
);
1504 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1505 if (is_backlogged
&& cl
->concerned_adc
) {
1507 if (cl
->concerned_adc
1508 && !cl
->concerned_arc
)
1509 cl
->min_rate_adc
= k
[i
]/c
[i
];
1511 cl
->min_rate_adc
= n
[i
];
1513 cl
->min_rate_adc
= (int64_t)bps_to_internal((u_int64_t
)jif
->jif_bandwidth
);
1514 } else if (is_backlogged
&& cl
->concerned_arc
)
1515 cl
->min_rate_adc
= n
[i
]; /* the best we can give */
1518 cl
->min_rate_adc
= 0;
1525 free(available
, M_DEVBUF
);
1531 * update_error: returns the difference between the mean weighted
1532 * delay and the weighted delay for each class. if proportional
1533 * delay differentiation is perfectly achieved, it should return
1534 * zero for each class.
1537 update_error(struct jobs_if
*jif
)
1540 int active_classes
, backlogged_classes
;
1541 u_int64_t mean_weighted_delay
;
1542 u_int64_t delays
[JOBS_MAXPRI
];
1544 int class_exists
, is_backlogged
;
1545 struct jobs_class
*cl
;
1547 error
= malloc(sizeof(int64_t)*(jif
->jif_maxpri
+1), M_DEVBUF
,
1553 mean_weighted_delay
= 0;
1555 backlogged_classes
= 0;
1557 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1558 cl
= jif
->jif_classes
[i
];
1559 class_exists
= (cl
!= NULL
);
1560 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1562 if (is_backlogged
) {
1563 backlogged_classes
++;
1564 if (cl
->concerned_rdc
) {
1565 delays
[i
] = proj_delay(jif
, i
);
1566 mean_weighted_delay
+= cl
->delay_prod_others
*delays
[i
];
1572 if (active_classes
== 0)
1575 mean_weighted_delay
/= active_classes
;
1577 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1578 cl
= jif
->jif_classes
[i
];
1579 class_exists
= (cl
!= NULL
);
1580 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1582 if (is_backlogged
&& cl
->concerned_rdc
)
1583 error
[i
] = ((int64_t)mean_weighted_delay
)-((int64_t)cl
->delay_prod_others
*delays
[i
]);
1586 * either the class isn't concerned,
1587 * or it's not backlogged.
1588 * in any case, the rate shouldn't
1596 * min_rates_adc: computes the minimum service rates needed in
1597 * each class to meet the absolute delay bounds. if, for any
1598 * class i, the current service rate of class i is less than
1599 * the computed minimum service rate, this function returns
1600 * false, true otherwise.
1603 min_rates_adc(struct jobs_if
*jif
)
1607 int class_exists
, is_backlogged
;
1608 int64_t remaining_time
;
1609 struct jobs_class
*cl
;
1612 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1613 cl
= jif
->jif_classes
[i
];
1614 class_exists
= (cl
!= NULL
);
1615 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1616 if (is_backlogged
&& cl
->concerned_adc
) {
1617 remaining_time
= cl
->cl_adc
- proj_delay(jif
, i
);
1618 if (remaining_time
> 0 ) {
1619 /* min rate needed for ADC */
1620 cl
->min_rate_adc
= scale_rate((int64_t)(cl
->cl_rin
.bytes
-cl
->cl_rout
.bytes
))/remaining_time
;
1621 if (cl
->concerned_arc
1622 && cl
->cl_arc
> cl
->min_rate_adc
) {
1623 /* min rate needed for ADC + ARC */
1624 cl
->min_rate_adc
= cl
->cl_arc
;
1627 /* the deadline has been exceeded: give the whole link capacity to hopefully fix the situation */
1628 cl
->min_rate_adc
= (int64_t)bps_to_internal((u_int64_t
)jif
->jif_bandwidth
);
1630 } else if (is_backlogged
&& cl
->concerned_arc
)
1631 cl
->min_rate_adc
= cl
->cl_arc
; /* no ADC, an ARC */
1632 else if (class_exists
)
1633 cl
->min_rate_adc
= 0; /*
1634 * either the class is not
1636 * or there is no ADC and
1639 if (is_backlogged
&& cl
->min_rate_adc
> cl
->service_rate
)
1647 * proj_delay: computes the difference between the current time
1648 * and the time the oldest class-i packet still in the class-i
1649 * queue i arrived in the system.
1652 proj_delay(struct jobs_if
*jif
, int i
)
1655 int class_exists
, is_backlogged
;
1656 struct jobs_class
*cl
;
1658 now
= read_machclk();
1659 cl
= jif
->jif_classes
[i
];
1660 class_exists
= (cl
!= NULL
);
1661 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1664 return ((int64_t)delay_diff(now
, tslist_first(cl
->arv_tm
)->timestamp
));
1666 return (0); /* NOTREACHED */
1670 * pick_dropped_rlc: returns the class index of the class to be
1671 * dropped for meeting the relative loss constraints.
1674 pick_dropped_rlc(struct jobs_if
*jif
)
1677 int64_t* loss_error
;
1678 int i
, active_classes
, backlogged_classes
;
1679 int class_exists
, is_backlogged
;
1684 struct jobs_class
*cl
;
1687 loss_error
= malloc(sizeof(int64_t)*(jif
->jif_maxpri
+1),
1688 M_DEVBUF
, M_WAITOK
);
1690 if (loss_error
== NULL
)
1697 backlogged_classes
= 0;
1699 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1700 cl
= jif
->jif_classes
[i
];
1701 class_exists
= (cl
!= NULL
);
1702 is_backlogged
= (class_exists
&& !qempty(cl
->cl_q
));
1703 if (is_backlogged
) {
1704 backlogged_classes
++;
1705 if (cl
->concerned_rlc
) {
1706 mean
+= cl
->loss_prod_others
1713 if (active_classes
> 0)
1714 mean
/= active_classes
;
1716 if (active_classes
== 0)
1717 class_dropped
= JOBS_MAXPRI
+1; /*
1718 * no classes are concerned
1719 * by RLCs (JOBS_MAXPRI+1
1720 * means "ignore RLC" here)
1723 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1724 cl
= jif
->jif_classes
[i
];
1725 class_exists
= (cl
!= NULL
);
1726 is_backlogged
= (class_exists
1727 && !qempty(cl
->cl_q
));
1729 if ((is_backlogged
)&&(cl
->cl_rlc
))
1730 loss_error
[i
]=cl
->loss_prod_others
1731 *cl
->current_loss
-mean
;
1733 loss_error
[i
] = INFINITY
;
1736 for (i
= 0; i
<= jif
->jif_maxpri
; i
++) {
1737 cl
= jif
->jif_classes
[i
];
1738 class_exists
= (cl
!= NULL
);
1739 is_backlogged
= (class_exists
1740 && !qempty(cl
->cl_q
));
1741 if (is_backlogged
&& loss_error
[i
] <= max_error
) {
1743 * find out which class is the most
1745 * it's the one that needs to be dropped
1746 * ties are broken in favor of the higher
1747 * priority classes (i.e., if two classes
1748 * present the same deviation, the lower
1749 * priority class will get dropped).
1751 max_error
= loss_error
[i
];
1756 if (class_dropped
!= -1) {
1757 cl
= jif
->jif_classes
[class_dropped
];
1758 pkt
= qtail(cl
->cl_q
);
1761 * "safeguard" test (a packet SHOULD be
1764 len
= (u_int64_t
)m_pktlen(pkt
);
1765 /* access packet at the tail */
1766 if (cl
->current_loss
+(len
<< SCALE_LOSS
)/cl
->cl_arrival
.bytes
> cl
->cl_alc
) {
1768 * the class to drop for meeting
1769 * the RLC will defeat the ALC:
1772 class_dropped
= JOBS_MAXPRI
+1;
1775 class_dropped
= JOBS_MAXPRI
+1; /* NOTREACHED */
1777 class_dropped
= JOBS_MAXPRI
+1;
1780 if (class_dropped
== JOBS_MAXPRI
+1) {
1781 max_alc
= -((int64_t)1 << SCALE_LOSS
);
1782 for (i
= jif
->jif_maxpri
; i
>= 0; i
--) {
1783 cl
= jif
->jif_classes
[i
];
1784 class_exists
= (cl
!= NULL
);
1785 is_backlogged
= (class_exists
1786 && !qempty(cl
->cl_q
));
1787 if (is_backlogged
) {
1788 if (cl
->concerned_alc
&& cl
->cl_alc
- cl
->current_loss
> max_alc
) {
1789 max_alc
= cl
->cl_alc
-cl
->current_loss
; /* pick the class which is the furthest from its ALC */
1791 } else if (!cl
->concerned_alc
&& ((int64_t) 1 << SCALE_LOSS
)-cl
->current_loss
> max_alc
) {
1792 max_alc
= ((int64_t) 1 << SCALE_LOSS
)-cl
->current_loss
;
1799 free(loss_error
, M_DEVBUF
);
1800 return (class_dropped
);
1804 * ALTQ binding/setup functions
1807 * jobs device interface
1810 jobsopen(dev_t dev
, int flag
, int fmt
,
1813 if (machclk_freq
== 0)
1816 if (machclk_freq
== 0) {
1817 printf("jobs: no CPU clock available!\n");
1820 /* everything will be done when the queueing scheme is attached. */
1825 jobsclose(dev_t dev
, int flag
, int fmt
,
1828 struct jobs_if
*jif
;
1831 while ((jif
= jif_list
) != NULL
) {
1833 if (ALTQ_IS_ENABLED(jif
->jif_ifq
))
1834 altq_disable(jif
->jif_ifq
);
1836 err
= altq_detach(jif
->jif_ifq
);
1838 err
= jobs_detach(jif
);
1839 if (err
!= 0 && error
== 0)
1847 jobsioctl(dev_t dev
, ioctlcmd_t cmd
, void *addr
, int flag
,
1850 struct jobs_if
*jif
;
1851 struct jobs_interface
*ifacep
;
1852 struct proc
*p
= l
->l_proc
;
1855 /* check super-user privilege */
1860 #if (__FreeBSD_version > 400000)
1861 if ((error
= suser(p
)) != 0)
1864 if ((error
= kauth_authorize_network(p
->p_cred
,
1865 KAUTH_NETWORK_ALTQ
, KAUTH_REQ_NETWORK_ALTQ_JOBS
, NULL
,
1874 case JOBS_IF_ATTACH
:
1875 error
= jobscmd_if_attach((struct jobs_attach
*)addr
);
1878 case JOBS_IF_DETACH
:
1879 error
= jobscmd_if_detach((struct jobs_interface
*)addr
);
1885 ifacep
= (struct jobs_interface
*)addr
;
1886 if ((jif
= altq_lookup(ifacep
->jobs_ifname
,
1887 ALTQT_JOBS
)) == NULL
) {
1894 if (jif
->jif_default
== NULL
) {
1896 printf("jobs: no default class\n");
1901 error
= altq_enable(jif
->jif_ifq
);
1905 error
= altq_disable(jif
->jif_ifq
);
1909 jobs_clear_interface(jif
);
1914 case JOBS_ADD_CLASS
:
1915 error
= jobscmd_add_class((struct jobs_add_class
*)addr
);
1918 case JOBS_DEL_CLASS
:
1919 error
= jobscmd_delete_class((struct jobs_delete_class
*)addr
);
1922 case JOBS_MOD_CLASS
:
1923 error
= jobscmd_modify_class((struct jobs_modify_class
*)addr
);
1926 case JOBS_ADD_FILTER
:
1927 error
= jobscmd_add_filter((struct jobs_add_filter
*)addr
);
1930 case JOBS_DEL_FILTER
:
1931 error
= jobscmd_delete_filter((struct jobs_delete_filter
*)addr
);
1935 error
= jobscmd_class_stats((struct jobs_class_stats
*)addr
);
1946 jobscmd_if_attach(struct jobs_attach
*ap
)
1948 struct jobs_if
*jif
;
1952 if ((ifp
= ifunit(ap
->iface
.jobs_ifname
)) == NULL
)
1954 if ((jif
= jobs_attach(&ifp
->if_snd
, ap
->bandwidth
, ap
->qlimit
, ap
->separate
)) == NULL
)
1958 * set JOBS to this ifnet structure.
1960 if ((error
= altq_attach(&ifp
->if_snd
, ALTQT_JOBS
, jif
,
1961 jobs_enqueue
, jobs_dequeue
, jobs_request
,
1962 &jif
->jif_classifier
, acc_classify
)) != 0)
1963 (void)jobs_detach(jif
);
1969 jobscmd_if_detach(struct jobs_interface
*ap
)
1971 struct jobs_if
*jif
;
1974 if ((jif
= altq_lookup(ap
->jobs_ifname
, ALTQT_JOBS
)) == NULL
)
1977 if (ALTQ_IS_ENABLED(jif
->jif_ifq
))
1978 altq_disable(jif
->jif_ifq
);
1980 if ((error
= altq_detach(jif
->jif_ifq
)))
1983 return jobs_detach(jif
);
1987 jobscmd_add_class(struct jobs_add_class
*ap
)
1989 struct jobs_if
*jif
;
1990 struct jobs_class
*cl
;
1992 if ((jif
= altq_lookup(ap
->iface
.jobs_ifname
, ALTQT_JOBS
)) == NULL
)
1995 if (ap
->pri
< 0 || ap
->pri
>= JOBS_MAXPRI
)
1998 if ((cl
= jobs_class_create(jif
, ap
->pri
,
1999 ap
->cl_adc
, ap
->cl_rdc
,
2000 ap
->cl_alc
, ap
->cl_rlc
, ap
-> cl_arc
,
2001 ap
->flags
)) == NULL
)
2004 /* return a class handle to the user */
2005 ap
->class_handle
= clp_to_clh(cl
);
2010 jobscmd_delete_class(struct jobs_delete_class
*ap
)
2012 struct jobs_if
*jif
;
2013 struct jobs_class
*cl
;
2015 if ((jif
= altq_lookup(ap
->iface
.jobs_ifname
, ALTQT_JOBS
)) == NULL
)
2018 if ((cl
= clh_to_clp(jif
, ap
->class_handle
)) == NULL
)
2021 return jobs_class_destroy(cl
);
2025 jobscmd_modify_class(struct jobs_modify_class
*ap
)
2027 struct jobs_if
*jif
;
2028 struct jobs_class
*cl
;
2030 if ((jif
= altq_lookup(ap
->iface
.jobs_ifname
, ALTQT_JOBS
)) == NULL
)
2033 if (ap
->pri
< 0 || ap
->pri
>= JOBS_MAXPRI
)
2036 if ((cl
= clh_to_clp(jif
, ap
->class_handle
)) == NULL
)
2040 * if priority is changed, move the class to the new priority
2042 if (jif
->jif_classes
[ap
->pri
] != cl
) {
2043 if (jif
->jif_classes
[ap
->pri
] != NULL
)
2045 jif
->jif_classes
[cl
->cl_pri
] = NULL
;
2046 jif
->jif_classes
[ap
->pri
] = cl
;
2047 cl
->cl_pri
= ap
->pri
;
2050 /* call jobs_class_create to change class parameters */
2051 if ((cl
= jobs_class_create(jif
, ap
->pri
,
2052 ap
->cl_adc
, ap
->cl_rdc
,
2053 ap
->cl_alc
, ap
->cl_rlc
, ap
->cl_arc
,
2054 ap
->flags
)) == NULL
)
2060 jobscmd_add_filter(struct jobs_add_filter
*ap
)
2062 struct jobs_if
*jif
;
2063 struct jobs_class
*cl
;
2065 if ((jif
= altq_lookup(ap
->iface
.jobs_ifname
, ALTQT_JOBS
)) == NULL
)
2068 if ((cl
= clh_to_clp(jif
, ap
->class_handle
)) == NULL
)
2071 return acc_add_filter(&jif
->jif_classifier
, &ap
->filter
,
2072 cl
, &ap
->filter_handle
);
2076 jobscmd_delete_filter(struct jobs_delete_filter
*ap
)
2078 struct jobs_if
*jif
;
2080 if ((jif
= altq_lookup(ap
->iface
.jobs_ifname
, ALTQT_JOBS
)) == NULL
)
2083 return acc_delete_filter(&jif
->jif_classifier
, ap
->filter_handle
);
2087 jobscmd_class_stats(struct jobs_class_stats
*ap
)
2089 struct jobs_if
*jif
;
2090 struct jobs_class
*cl
;
2091 struct class_stats stats
, *usp
;
2094 if ((jif
= altq_lookup(ap
->iface
.jobs_ifname
, ALTQT_JOBS
)) == NULL
)
2097 ap
->maxpri
= jif
->jif_maxpri
;
2099 /* then, read the next N classes */
2101 for (pri
= 0; pri
<= jif
->jif_maxpri
; pri
++) {
2102 cl
= jif
->jif_classes
[pri
];
2104 get_class_stats(&stats
, cl
);
2106 (void)memset(&stats
, 0, sizeof(stats
));
2107 if ((error
= copyout((void *)&stats
, (void *)usp
++,
2108 sizeof(stats
))) != 0)
2115 get_class_stats(struct class_stats
*sp
, struct jobs_class
*cl
)
2118 now
= read_machclk();
2120 sp
->class_handle
= clp_to_clh(cl
);
2121 sp
->qlength
= qlen(cl
->cl_q
);
2123 sp
->period
= cl
->cl_period
;
2124 sp
->rin
= cl
->st_rin
;
2125 sp
->arrival
= cl
->st_arrival
;
2126 sp
->arrivalbusy
= cl
->cl_arrival
;
2127 sp
->rout
= cl
->st_rout
;
2128 sp
->dropcnt
= cl
->cl_dropcnt
;
2130 /* PKTCNTR_RESET(&cl->st_arrival);*/
2131 PKTCNTR_RESET(&cl
->st_rin
);
2132 PKTCNTR_RESET(&cl
->st_rout
);
2134 sp
->totallength
= cl
->cl_jif
->jif_ifq
->ifq_len
;
2135 sp
->lastdel
= ticks_to_secs(GRANULARITY
*cl
->cl_lastdel
);
2136 sp
->avgdel
= cl
->cl_avgdel
;
2140 sp
->busylength
= ticks_to_secs(1000*delay_diff(now
, cl
->idletime
));
2141 sp
->adc_violations
= cl
->adc_violations
;
2143 sp
->wc_cycles_enqueue
= cl
->cl_jif
->wc_cycles_enqueue
;
2144 sp
->wc_cycles_dequeue
= cl
->cl_jif
->wc_cycles_dequeue
;
2145 sp
->bc_cycles_enqueue
= cl
->cl_jif
->bc_cycles_enqueue
;
2146 sp
->bc_cycles_dequeue
= cl
->cl_jif
->bc_cycles_dequeue
;
2147 sp
->avg_cycles_enqueue
= cl
->cl_jif
->avg_cycles_enqueue
;
2148 sp
->avg_cycles_dequeue
= cl
->cl_jif
->avg_cycles_dequeue
;
2149 sp
->avg_cycles2_enqueue
= cl
->cl_jif
->avg_cycles2_enqueue
;
2150 sp
->avg_cycles2_dequeue
= cl
->cl_jif
->avg_cycles2_dequeue
;
2151 sp
->total_enqueued
= cl
->cl_jif
->total_enqueued
;
2152 sp
->total_dequeued
= cl
->cl_jif
->total_dequeued
;
2155 /* convert a class handle to the corresponding class pointer */
2156 static struct jobs_class
*
2157 clh_to_clp(struct jobs_if
*jif
, u_long chandle
)
2159 struct jobs_class
*cl
;
2161 cl
= (struct jobs_class
*)chandle
;
2162 if (chandle
!= ALIGN(cl
)) {
2164 printf("clh_to_cl: unaligned pointer %p\n", cl
);
2169 if (cl
== NULL
|| cl
->cl_handle
!= chandle
|| cl
->cl_jif
!= jif
)
2174 /* convert a class pointer to the corresponding class handle */
2176 clp_to_clh(struct jobs_class
*cl
)
2178 return (cl
->cl_handle
);
2183 static struct altqsw jobs_sw
=
2184 {"jobs", jobsopen
, jobsclose
, jobsioctl
};
2186 ALTQ_MODULE(altq_jobs
, ALTQT_JOBS
, &jobs_sw
);
2188 #endif /* KLD_MODULE */
2190 #endif /* ALTQ3_COMPAT */
2191 #endif /* ALTQ_JOBS */