2 ******************************************************************************
4 * @file pios_callbackscheduler.c
5 * @author The OpenPilot Team, http://www.openpilot.org Copyright (C) 2013.
6 * @brief Scheduler to run callback functions from a shared context with given priorities.
8 * @see The GNU Public License (GPL) Version 3
10 *****************************************************************************/
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 3 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #ifdef PIOS_INCLUDE_CALLBACKSCHEDULER
31 #include <uavobjectmanager.h>
35 #define STACK_SAFETYCOUNT 16
36 #define STACK_SIZE (300 + STACK_SAFETYSIZE)
37 #define STACK_SAFETYSIZE 8
38 #define MAX_SLEEP 1000
44 struct DelayedCallbackTaskStruct
{
45 DelayedCallbackInfo
*callbackQueue
[CALLBACK_PRIORITY_LOW
+ 1];
46 DelayedCallbackInfo
*queueCursor
[CALLBACK_PRIORITY_LOW
+ 1];
47 xTaskHandle callbackSchedulerTaskHandle
;
50 DelayedCallbackPriorityTask priorityTask
;
51 xSemaphoreHandle signal
;
52 struct DelayedCallbackTaskStruct
*next
;
56 * callback information
58 struct DelayedCallbackInfoStruct
{
61 bool volatile waiting
;
62 uint32_t volatile scheduletime
;
66 uint16_t stackSafetyCount
;
67 uint16_t currentSafetyCount
;
69 struct DelayedCallbackTaskStruct
*task
;
70 struct DelayedCallbackInfoStruct
*next
;
75 static struct DelayedCallbackTaskStruct
*schedulerTasks
;
76 static xSemaphoreHandle mutex
;
77 static bool schedulerStarted
;
80 static void CallbackSchedulerTask(void *task
);
81 static int32_t runNextCallback(struct DelayedCallbackTaskStruct
*task
, DelayedCallbackPriority priority
);
84 * Initialize the scheduler
85 * must be called before any other functions are called
86 * \return Success (0), failure (-1)
88 int32_t PIOS_CALLBACKSCHEDULER_Initialize()
90 // Initialize variables
91 schedulerTasks
= NULL
;
92 schedulerStarted
= false;
95 mutex
= xSemaphoreCreateRecursiveMutex();
105 * Start all scheduler tasks
106 * Will instantiate all scheduler tasks registered so far. Although new
107 * callbacks CAN be registered beyond that point, any further scheduling tasks
108 * will be started the moment of instantiation. It is not possible to increase
109 * the STACK requirements of a scheduler task after this function has been
110 * run. No callbacks will be run before this function is called, although
111 * they can be marked for later execution by executing the dispatch function.
112 * \return Success (0), failure (-1)
114 int32_t PIOS_CALLBACKSCHEDULER_Start()
116 xSemaphoreTakeRecursive(mutex
, portMAX_DELAY
);
119 PIOS_Assert(schedulerStarted
== false);
122 struct DelayedCallbackTaskStruct
*cursor
= NULL
;
124 LL_FOREACH(schedulerTasks
, cursor
) {
126 CallbackSchedulerTask
,
128 1 + (cursor
->stackSize
/ 4),
130 cursor
->priorityTask
,
131 &cursor
->callbackSchedulerTaskHandle
133 if (TASKINFO_RUNNING_CALLBACKSCHEDULER0
+ t
<= TASKINFO_RUNNING_CALLBACKSCHEDULER3
) {
134 PIOS_TASK_MONITOR_RegisterTask(TASKINFO_RUNNING_CALLBACKSCHEDULER0
+ t
, cursor
->callbackSchedulerTaskHandle
);
139 schedulerStarted
= true;
141 xSemaphoreGiveRecursive(mutex
);
147 * Schedule dispatching a callback at some point in the future. The function returns immediately.
148 * \param[in] *cbinfo the callback handle
149 * \param[in] milliseconds How far in the future to dispatch the callback
150 * \param[in] updatemode What to do if the callback is already scheduled but not dispatched yet.
152 * UPDATEMODE_NONE: An existing schedule will not be touched, the call will have no effect at all if there's an existing schedule.
153 * UPDATEMODE_SOONER: The callback will be rescheduled only if the new schedule triggers before the original one would have triggered.
154 * UPDATEMODE_LATER: The callback will be rescheduled only if the new schedule triggers after the original one would have triggered.
155 * UPDATEMODE_OVERRIDE: The callback will be rescheduled in any case, effectively overriding any previous schedule. (sooner+later=override)
156 * \return 0: not scheduled, previous schedule takes precedence, 1: new schedule, 2: previous schedule overridden
158 int32_t PIOS_CALLBACKSCHEDULER_Schedule(
159 DelayedCallbackInfo
*cbinfo
,
160 int32_t milliseconds
,
161 DelayedCallbackUpdateMode updatemode
)
167 if (milliseconds
<= 0) {
168 milliseconds
= 0; // we can and will not schedule in the past since that ruins the wraparound of uint32_t
171 xSemaphoreTakeRecursive(mutex
, portMAX_DELAY
);
173 uint32_t new = xTaskGetTickCount() + (milliseconds
/ portTICK_RATE_MS
);
175 new = 1; // zero has a special meaning, schedule at time 1 instead
178 int32_t diff
= new - cbinfo
->scheduletime
;
179 if ((!cbinfo
->scheduletime
)
180 || ((updatemode
& CALLBACK_UPDATEMODE_SOONER
) && diff
< 0)
181 || ((updatemode
& CALLBACK_UPDATEMODE_LATER
) && diff
> 0)
183 // the scheduletime may be updated
184 if (!cbinfo
->scheduletime
) {
189 cbinfo
->scheduletime
= new;
191 // scheduler needs to be notified to adapt sleep times
192 xSemaphoreGive(cbinfo
->task
->signal
);
195 xSemaphoreGiveRecursive(mutex
);
201 * Dispatch an event by invoking the supplied callback. The function
202 * returns immediately, the callback is invoked from the event task.
203 * \param[in] cbinfo the callback handle
204 * \return Success (-1), failure (0)
206 int32_t PIOS_CALLBACKSCHEDULER_Dispatch(DelayedCallbackInfo
*cbinfo
)
210 // no semaphore needed for the callback
211 cbinfo
->waiting
= true;
212 // but the scheduler as a whole needs to be notified
213 return xSemaphoreGive(cbinfo
->task
->signal
);
217 * Dispatch an event by invoking the supplied callback. The function
218 * returns immediately, the callback is invoked from the event task.
219 * \param[in] cbinfo the callback handle
220 * \param[in] pxHigherPriorityTaskWoken
221 * xSemaphoreGiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE if
222 * giving the semaphore caused a task to unblock, and the unblocked task has a
223 * priority higher than the currently running task. If xSemaphoreGiveFromISR()
224 * sets this value to pdTRUE then a context switch should be requested before
225 * the interrupt is exited.
226 * From FreeRTOS Docu: Context switching from an ISR uses port specific syntax.
227 * Check the demo task for your port to find the syntax required.
228 * \return Success (-1), failure (0)
230 int32_t PIOS_CALLBACKSCHEDULER_DispatchFromISR(DelayedCallbackInfo
*cbinfo
, long *pxHigherPriorityTaskWoken
)
234 // no semaphore needed for the callback
235 cbinfo
->waiting
= true;
236 // but the scheduler as a whole needs to be notified
237 return xSemaphoreGiveFromISR(cbinfo
->task
->signal
, pxHigherPriorityTaskWoken
);
241 * Register a new callback to be called by a delayed callback scheduler task.
242 * If a scheduler task with the specified task priority does not exist yet, it
244 * \param[in] cb The callback to be invoked
245 * \param[in] priority Priority of the callback compared to other callbacks scheduled by the same delayed callback scheduler task.
246 * \param[in] priorityTask Task priority of the scheduler task. One scheduler task will be spawned for each distinct value specified,
247 * further callbacks created with the same priorityTask will all be handled by the same delayed callback scheduler task
248 * and scheduled according to their individual callback priorities
249 * \param[in] stacksize The stack requirements of the callback when called by the scheduler.
250 * \return CallbackInfo Pointer on success, NULL if failed.
252 DelayedCallbackInfo
*PIOS_CALLBACKSCHEDULER_Create(
254 DelayedCallbackPriority priority
,
255 DelayedCallbackPriorityTask priorityTask
,
259 xSemaphoreTakeRecursive(mutex
, portMAX_DELAY
);
261 // add callback schedulers own stack requirements
262 stacksize
+= STACK_SIZE
;
264 // find appropriate scheduler task matching priorityTask
265 struct DelayedCallbackTaskStruct
*task
= NULL
;
267 LL_FOREACH(schedulerTasks
, task
) {
268 if (task
->priorityTask
== priorityTask
) {
273 // if given priorityTask does not exist, create it
275 // allocate memory if possible
276 task
= (struct DelayedCallbackTaskStruct
*)pios_malloc(sizeof(struct DelayedCallbackTaskStruct
));
278 xSemaphoreGiveRecursive(mutex
);
282 // initialize structure
283 for (DelayedCallbackPriority p
= 0; p
<= CALLBACK_PRIORITY_LOW
; p
++) {
284 task
->callbackQueue
[p
] = NULL
;
285 task
->queueCursor
[p
] = NULL
;
288 task
->name
[1] = 'a' + t
;
290 task
->stackSize
= stacksize
;
291 task
->priorityTask
= priorityTask
;
294 // create the signaling semaphore
295 vSemaphoreCreateBinary(task
->signal
);
297 xSemaphoreGiveRecursive(mutex
);
301 // add to list of scheduler tasks
302 LL_APPEND(schedulerTasks
, task
);
304 // Previously registered tasks are spawned when PIOS_CALLBACKSCHEDULER_Start() is called.
305 // Tasks registered afterwards need to spawn upon creation.
306 if (schedulerStarted
) {
308 CallbackSchedulerTask
,
310 1 + (task
->stackSize
/ 4),
313 &task
->callbackSchedulerTaskHandle
315 if (TASKINFO_RUNNING_CALLBACKSCHEDULER0
+ t
<= TASKINFO_RUNNING_CALLBACKSCHEDULER3
) {
316 PIOS_TASK_MONITOR_RegisterTask(TASKINFO_RUNNING_CALLBACKSCHEDULER0
+ t
, task
->callbackSchedulerTaskHandle
);
321 if (!schedulerStarted
&& stacksize
> task
->stackSize
) {
322 task
->stackSize
= stacksize
; // previous to task initialisation we can still adapt to the maximum needed stack
326 if (stacksize
> task
->stackSize
) {
327 xSemaphoreGiveRecursive(mutex
);
328 return NULL
; // error - not enough memory
331 // initialize callback scheduling info
332 DelayedCallbackInfo
*info
= (DelayedCallbackInfo
*)pios_malloc(sizeof(DelayedCallbackInfo
));
334 xSemaphoreGiveRecursive(mutex
);
335 return NULL
; // error - not enough memory
338 info
->waiting
= false;
339 info
->scheduletime
= 0;
342 info
->callbackID
= callbackID
;
344 info
->stackSize
= stacksize
- STACK_SIZE
;
345 info
->stackNotFree
= info
->stackSize
;
347 info
->stackSafetyCount
= STACK_SAFETYCOUNT
;
348 info
->currentSafetyCount
= 0;
350 // add to scheduling queue
351 LL_APPEND(task
->callbackQueue
[priority
], info
);
353 xSemaphoreGiveRecursive(mutex
);
359 * Iterator. Iterates over all callbacks and all scheduler tasks and retrieves information
361 * @param[in] callback Callback function to receive the data - will be called in same task context as the callerThe id of the task the task_info refers to.
362 * @param context Context information optionally provided to the callback.
364 void PIOS_CALLBACKSCHEDULER_ForEachCallback(CallbackSchedulerCallbackInfoCallback callback
, void *context
)
370 struct pios_callback_info info
;
372 struct DelayedCallbackTaskStruct
*task
= NULL
;
373 LL_FOREACH(schedulerTasks
, task
) {
376 for (prio
= 0; prio
< (CALLBACK_PRIORITY_LOW
+ 1); prio
++) {
377 struct DelayedCallbackInfoStruct
*cbinfo
;
378 LL_FOREACH(task
->callbackQueue
[prio
], cbinfo
) {
379 xSemaphoreTakeRecursive(mutex
, portMAX_DELAY
);
380 info
.is_running
= true;
381 info
.stack_remaining
= cbinfo
->stackNotFree
;
382 info
.running_time_count
= cbinfo
->runCount
;
383 xSemaphoreGiveRecursive(mutex
);
384 callback(cbinfo
->callbackID
, &info
, context
);
391 * Stack magic, find how much stack is being used without affecting performance
393 static void markStack(DelayedCallbackInfo
*current
)
396 register int32_t halfWayMark
;
397 volatile unsigned char *marker
;
399 if (current
->stackNotFree
< 0) {
403 // end of stack watermark
404 marker
= (unsigned char *)(((size_t)&marker
) - (size_t)current
->stackSize
);
405 for (t
= -STACK_SAFETYSIZE
; t
< 0; t
++) {
408 // shifted watermarks
409 halfWayMark
= current
->stackFree
+ (current
->stackNotFree
- current
->stackFree
) / 2;
410 marker
= (unsigned char *)((size_t)marker
+ halfWayMark
);
411 for (t
= -STACK_SAFETYSIZE
; t
< 0; t
++) {
415 static void checkStack(DelayedCallbackInfo
*current
)
418 register int32_t halfWayMark
;
419 volatile unsigned char *marker
;
421 if (current
->stackNotFree
< 0) {
425 // end of stack watermark
426 marker
= (unsigned char *)(((size_t)&marker
) - (size_t)current
->stackSize
);
427 for (t
= -STACK_SAFETYSIZE
; t
< 0; t
++) {
428 if (*(marker
+ t
) != '#') {
429 current
->stackNotFree
= -1; // stack overflow, disable all further checks
433 // shifted watermarks
434 halfWayMark
= current
->stackFree
+ (current
->stackNotFree
- current
->stackFree
) / 2;
435 marker
= (unsigned char *)((size_t)marker
+ halfWayMark
);
436 for (t
= -STACK_SAFETYSIZE
; t
< 0; t
++) {
437 if (*(marker
+ t
) != '#') {
438 current
->stackNotFree
= halfWayMark
; // tainted mark, this place is definitely used stack
439 current
->currentSafetyCount
= 0;
440 if (current
->stackNotFree
<= current
->stackFree
) {
441 current
->stackFree
= 0; // if it was supposed to be free, restart search between here and bottom of stack
447 if (current
->currentSafetyCount
< 0xffff) {
448 current
->currentSafetyCount
++; // mark has not been tainted, increase safety counter
450 if (current
->currentSafetyCount
>= current
->stackSafetyCount
) { // if the safety counter is above the limit, then
451 if (halfWayMark
== current
->stackFree
) { // check if search already converged, if so increase the limit to find very rare stack usage incidents
452 current
->stackSafetyCount
= current
->currentSafetyCount
;
454 current
->stackFree
= halfWayMark
; // otherwise just mark this position as free stack to narrow search
455 current
->currentSafetyCount
= 0;
462 * \param[in] task The scheduler task in question
463 * \param[in] priority The scheduling priority of the callback to search for
464 * \return wait time until next scheduled callback is due - 0 if a callback has just been executed
466 static int32_t runNextCallback(struct DelayedCallbackTaskStruct
*task
, DelayedCallbackPriority priority
)
468 int32_t result
= MAX_SLEEP
;
472 if (priority
> CALLBACK_PRIORITY_LOW
) {
476 // queue is empty, search a lower priority queue
477 if (task
->callbackQueue
[priority
] == NULL
) {
478 return runNextCallback(task
, priority
+ 1);
481 DelayedCallbackInfo
*current
= task
->queueCursor
[priority
];
482 DelayedCallbackInfo
*next
;
484 if (current
== NULL
) {
485 next
= task
->callbackQueue
[priority
]; // loop around the end of the list
486 // also attempt to run a callback that has lower priority
487 // every time the queue is completely traversed
488 diff
= runNextCallback(task
, priority
+ 1);
490 task
->queueCursor
[priority
] = next
; // the recursive call has executed a callback
494 result
= diff
; // adjust sleep time
497 next
= current
->next
;
498 xSemaphoreTakeRecursive(mutex
, portMAX_DELAY
); // access to scheduletime should be mutex protected
499 if (current
->scheduletime
) {
500 diff
= current
->scheduletime
- xTaskGetTickCount();
502 current
->waiting
= true;
503 } else if (diff
< result
) {
504 result
= diff
; // adjust sleep time
507 if (current
->waiting
) {
508 task
->queueCursor
[priority
] = next
;
509 current
->scheduletime
= 0; // any schedules are reset
510 current
->waiting
= false; // the flag is reset just before execution.
511 xSemaphoreGiveRecursive(mutex
);
513 /* callback gets invoked here - check stack sizes */
516 current
->cb(); // call the callback
524 xSemaphoreGiveRecursive(mutex
);
527 } while (current
!= task
->queueCursor
[priority
]);
528 // once the list has been traversed entirely without finding any to be executed task, abort (nothing to do)
533 * Scheduler task, responsible of invoking callbacks.
534 * \param[in] task The scheduling task being run
536 static void CallbackSchedulerTask(void *task
)
541 delay
= runNextCallback((struct DelayedCallbackTaskStruct
*)task
, CALLBACK_PRIORITY_CRITICAL
);
543 // nothing to do but sleep
544 xSemaphoreTake(((struct DelayedCallbackTaskStruct
*)task
)->signal
, delay
);
549 #endif // ifdef PIOS_INCLUDE_CALLBACKSCHEDULER