2 * Quagga Work Queue Support.
4 * Copyright (C) 2005 Sun Microsystems, Inc.
6 * This file is part of GNU Zebra.
8 * Quagga is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
13 * Quagga is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Quagga; see the file COPYING. If not, write to the Free
20 * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
24 #include <lib/zebra.h>
27 #include "workqueue.h"
32 /* master list of work_queues */
33 static struct list work_queues
;
35 #define WORK_QUEUE_MIN_GRANULARITY 1
37 static struct work_queue_item
*
38 work_queue_item_new (struct work_queue
*wq
)
40 struct work_queue_item
*item
;
43 item
= XCALLOC (MTYPE_WORK_QUEUE_ITEM
,
44 sizeof (struct work_queue_item
));
50 work_queue_item_free (struct work_queue_item
*item
)
52 XFREE (MTYPE_WORK_QUEUE_ITEM
, item
);
56 /* create new work queue */
58 work_queue_new (struct thread_master
*m
, const char *queue_name
)
60 struct work_queue
*new;
62 new = XCALLOC (MTYPE_WORK_QUEUE
, sizeof (struct work_queue
));
67 new->name
= XSTRDUP (MTYPE_WORK_QUEUE_NAME
, queue_name
);
69 SET_FLAG (new->flags
, WQ_UNPLUGGED
);
71 if ( (new->items
= list_new ()) == NULL
)
73 XFREE (MTYPE_WORK_QUEUE_NAME
, new->name
);
74 XFREE (MTYPE_WORK_QUEUE
, new);
79 new->items
->del
= (void (*)(void *)) work_queue_item_free
;
81 listnode_add (&work_queues
, new);
83 new->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
85 /* Default values, can be overriden by caller */
86 new->spec
.hold
= WORK_QUEUE_DEFAULT_HOLD
;
92 work_queue_free (struct work_queue
*wq
)
94 if (wq
->thread
!= NULL
)
95 thread_cancel(wq
->thread
);
97 /* list_delete frees items via callback */
98 list_delete (wq
->items
);
99 listnode_delete (&work_queues
, wq
);
101 XFREE (MTYPE_WORK_QUEUE_NAME
, wq
->name
);
102 XFREE (MTYPE_WORK_QUEUE
, wq
);
107 work_queue_schedule (struct work_queue
*wq
, unsigned int delay
)
109 /* if appropriate, schedule work queue thread */
110 if ( CHECK_FLAG (wq
->flags
, WQ_UNPLUGGED
)
111 && (wq
->thread
== NULL
)
112 && (listcount (wq
->items
) > 0) )
114 wq
->thread
= thread_add_background (wq
->master
, work_queue_run
,
123 work_queue_add (struct work_queue
*wq
, void *data
)
125 struct work_queue_item
*item
;
129 if (!(item
= work_queue_item_new (wq
)))
131 zlog_err ("%s: unable to get new queue item", __func__
);
136 listnode_add (wq
->items
, item
);
138 work_queue_schedule (wq
, wq
->spec
.hold
);
144 work_queue_item_remove (struct work_queue
*wq
, struct listnode
*ln
)
146 struct work_queue_item
*item
= listgetdata (ln
);
148 assert (item
&& item
->data
);
150 /* call private data deletion callback if needed */
151 if (wq
->spec
.del_item_data
)
152 wq
->spec
.del_item_data (wq
, item
->data
);
154 list_delete_node (wq
->items
, ln
);
155 work_queue_item_free (item
);
161 work_queue_item_requeue (struct work_queue
*wq
, struct listnode
*ln
)
163 LISTNODE_DETACH (wq
->items
, ln
);
164 LISTNODE_ATTACH (wq
->items
, ln
); /* attach to end of list */
167 DEFUN(show_work_queues
,
168 show_work_queues_cmd
,
171 "Work Queue information\n")
173 struct listnode
*node
;
174 struct work_queue
*wq
;
177 "%c %8s %5s %8s %21s%s",
178 ' ', "List","(ms) ","Q. Runs","Cycle Counts ",
181 "%c %8s %5s %8s %7s %6s %6s %s%s",
186 "Best","Gran.","Avg.",
190 for (ALL_LIST_ELEMENTS_RO ((&work_queues
), node
, wq
))
192 vty_out (vty
,"%c %8d %5d %8ld %7d %6d %6u %s%s",
193 (CHECK_FLAG (wq
->flags
, WQ_UNPLUGGED
) ? ' ' : 'P'),
194 listcount (wq
->items
),
197 wq
->cycles
.best
, wq
->cycles
.granularity
,
199 (unsigned int) (wq
->cycles
.total
/ wq
->runs
) : 0,
207 /* 'plug' a queue: Stop it from being scheduled,
208 * ie: prevent the queue from draining.
211 work_queue_plug (struct work_queue
*wq
)
214 thread_cancel (wq
->thread
);
218 UNSET_FLAG (wq
->flags
, WQ_UNPLUGGED
);
221 /* unplug queue, schedule it again, if appropriate
222 * Ie: Allow the queue to be drained again
225 work_queue_unplug (struct work_queue
*wq
)
227 SET_FLAG (wq
->flags
, WQ_UNPLUGGED
);
229 /* if thread isnt already waiting, add one */
230 work_queue_schedule (wq
, wq
->spec
.hold
);
233 /* timer thread to process a work queue
234 * will reschedule itself if required,
235 * otherwise work_queue_item_add
238 work_queue_run (struct thread
*thread
)
240 struct work_queue
*wq
;
241 struct work_queue_item
*item
;
243 unsigned int cycles
= 0;
244 struct listnode
*node
, *nnode
;
247 wq
= THREAD_ARG (thread
);
250 assert (wq
&& wq
->items
);
252 /* calculate cycle granularity:
253 * list iteration == 1 cycle
254 * granularity == # cycles between checks whether we should yield.
256 * granularity should be > 0, and can increase slowly after each run to
257 * provide some hysteris, but not past cycles.best or 2*cycles.
259 * Best: starts low, can only increase
261 * Granularity: starts at WORK_QUEUE_MIN_GRANULARITY, can be decreased
262 * if we run to end of time slot, can increase otherwise
265 * We could use just the average and save some work, however we want to be
266 * able to adjust quickly to CPU pressure. Average wont shift much if
267 * daemon has been running a long time.
269 if (wq
->cycles
.granularity
== 0)
270 wq
->cycles
.granularity
= WORK_QUEUE_MIN_GRANULARITY
;
272 for (ALL_LIST_ELEMENTS (wq
->items
, node
, nnode
, item
))
274 assert (item
&& item
->data
);
276 /* dont run items which are past their allowed retries */
277 if (item
->ran
> wq
->spec
.max_retries
)
279 /* run error handler, if any */
280 if (wq
->spec
.errorfunc
)
281 wq
->spec
.errorfunc (wq
, item
->data
);
282 work_queue_item_remove (wq
, node
);
286 /* run and take care of items that want to be retried immediately */
289 ret
= wq
->spec
.workfunc (wq
, item
->data
);
292 while ((ret
== WQ_RETRY_NOW
)
293 && (item
->ran
< wq
->spec
.max_retries
));
297 case WQ_QUEUE_BLOCKED
:
299 /* decrement item->ran again, cause this isn't an item
300 * specific error, and fall through to WQ_RETRY_LATER
311 work_queue_item_requeue (wq
, node
);
315 /* a RETRY_NOW that gets here has exceeded max_tries, same as ERROR */
318 if (wq
->spec
.errorfunc
)
319 wq
->spec
.errorfunc (wq
, item
);
321 /* fall through here is deliberate */
325 work_queue_item_remove (wq
, node
);
330 /* completed cycle */
333 /* test if we should yield */
334 if ( !(cycles
% wq
->cycles
.granularity
)
335 && thread_should_yield (thread
))
344 #define WQ_HYSTERIS_FACTOR 2
346 /* we yielded, check whether granularity should be reduced */
347 if (yielded
&& (cycles
< wq
->cycles
.granularity
))
349 wq
->cycles
.granularity
= ((cycles
> 0) ? cycles
350 : WORK_QUEUE_MIN_GRANULARITY
);
353 if (cycles
>= (wq
->cycles
.granularity
))
355 if (cycles
> wq
->cycles
.best
)
356 wq
->cycles
.best
= cycles
;
358 /* along with yielded check, provides hysteris for granularity */
359 if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERIS_FACTOR
* 2))
360 wq
->cycles
.granularity
*= WQ_HYSTERIS_FACTOR
; /* quick ramp-up */
361 else if (cycles
> (wq
->cycles
.granularity
* WQ_HYSTERIS_FACTOR
))
362 wq
->cycles
.granularity
+= WQ_HYSTERIS_FACTOR
;
364 #undef WQ_HYSTERIS_FACTOR
367 wq
->cycles
.total
+= cycles
;
370 printf ("%s: cycles %d, new: best %d, worst %d\n",
371 __func__
, cycles
, wq
->cycles
.best
, wq
->cycles
.granularity
);
374 /* Is the queue done yet? If it is, call the completion callback. */
375 if (listcount (wq
->items
) > 0)
376 work_queue_schedule (wq
, 0);
377 else if (wq
->spec
.completion_func
)
378 wq
->spec
.completion_func (wq
);