REVONANO Milestones update
[librepilot.git] / flight / pios / common / pios_callbackscheduler.c
blobff6b80204a2cc6237a04031dd6fa6f05465fcd33
1 /**
2 ******************************************************************************
4 * @file pios_callbackscheduler.c
5 * @author The OpenPilot Team, http://www.openpilot.org Copyright (C) 2013.
6 * @brief Scheduler to run callback functions from a shared context with given priorities.
8 * @see The GNU Public License (GPL) Version 3
10 *****************************************************************************/
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 3 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <pios.h>
28 #ifdef PIOS_INCLUDE_CALLBACKSCHEDULER
30 #include <utlist.h>
31 #include <uavobjectmanager.h>
32 #include <taskinfo.h>
34 // Private constants
35 #define STACK_SAFETYCOUNT 16
36 #define STACK_SIZE (300 + STACK_SAFETYSIZE)
37 #define STACK_SAFETYSIZE 8
38 #define MAX_SLEEP 1000
40 // Private types
41 /**
42 * task information
44 struct DelayedCallbackTaskStruct {
45 DelayedCallbackInfo *callbackQueue[CALLBACK_PRIORITY_LOW + 1];
46 DelayedCallbackInfo *queueCursor[CALLBACK_PRIORITY_LOW + 1];
47 xTaskHandle callbackSchedulerTaskHandle;
48 char name[3];
49 uint32_t stackSize;
50 DelayedCallbackPriorityTask priorityTask;
51 xSemaphoreHandle signal;
52 struct DelayedCallbackTaskStruct *next;
55 /**
56 * callback information
58 struct DelayedCallbackInfoStruct {
59 DelayedCallback cb;
60 int16_t callbackID;
61 bool volatile waiting;
62 uint32_t volatile scheduletime;
63 uint32_t stackSize;
64 int32_t stackFree;
65 int32_t stackNotFree;
66 uint16_t stackSafetyCount;
67 uint16_t currentSafetyCount;
68 uint32_t runCount;
69 struct DelayedCallbackTaskStruct *task;
70 struct DelayedCallbackInfoStruct *next;
74 // Private variables
75 static struct DelayedCallbackTaskStruct *schedulerTasks;
76 static xSemaphoreHandle mutex;
77 static bool schedulerStarted;
79 // Private functions
80 static void CallbackSchedulerTask(void *task);
81 static int32_t runNextCallback(struct DelayedCallbackTaskStruct *task, DelayedCallbackPriority priority);
83 /**
84 * Initialize the scheduler
85 * must be called before any other functions are called
86 * \return Success (0), failure (-1)
88 int32_t PIOS_CALLBACKSCHEDULER_Initialize()
90 // Initialize variables
91 schedulerTasks = NULL;
92 schedulerStarted = false;
94 // Create mutex
95 mutex = xSemaphoreCreateRecursiveMutex();
96 if (mutex == NULL) {
97 return -1;
100 // Done
101 return 0;
105 * Start all scheduler tasks
106 * Will instantiate all scheduler tasks registered so far. Although new
107 * callbacks CAN be registered beyond that point, any further scheduling tasks
108 * will be started the moment of instantiation. It is not possible to increase
109 * the STACK requirements of a scheduler task after this function has been
110 * run. No callbacks will be run before this function is called, although
111 * they can be marked for later execution by executing the dispatch function.
112 * \return Success (0), failure (-1)
114 int32_t PIOS_CALLBACKSCHEDULER_Start()
116 xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
118 // only call once
119 PIOS_Assert(schedulerStarted == false);
121 // start tasks
122 struct DelayedCallbackTaskStruct *cursor = NULL;
123 int t = 0;
124 LL_FOREACH(schedulerTasks, cursor) {
125 xTaskCreate(
126 CallbackSchedulerTask,
127 cursor->name,
128 1 + (cursor->stackSize / 4),
129 cursor,
130 cursor->priorityTask,
131 &cursor->callbackSchedulerTaskHandle
133 if (TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t <= TASKINFO_RUNNING_CALLBACKSCHEDULER3) {
134 PIOS_TASK_MONITOR_RegisterTask(TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t, cursor->callbackSchedulerTaskHandle);
136 t++;
139 schedulerStarted = true;
141 xSemaphoreGiveRecursive(mutex);
143 return 0;
147 * Schedule dispatching a callback at some point in the future. The function returns immediately.
148 * \param[in] *cbinfo the callback handle
149 * \param[in] milliseconds How far in the future to dispatch the callback
150 * \param[in] updatemode What to do if the callback is already scheduled but not dispatched yet.
151 * The options are:
152 * UPDATEMODE_NONE: An existing schedule will not be touched, the call will have no effect at all if there's an existing schedule.
153 * UPDATEMODE_SOONER: The callback will be rescheduled only if the new schedule triggers before the original one would have triggered.
154 * UPDATEMODE_LATER: The callback will be rescheduled only if the new schedule triggers after the original one would have triggered.
155 * UPDATEMODE_OVERRIDE: The callback will be rescheduled in any case, effectively overriding any previous schedule. (sooner+later=override)
156 * \return 0: not scheduled, previous schedule takes precedence, 1: new schedule, 2: previous schedule overridden
158 int32_t PIOS_CALLBACKSCHEDULER_Schedule(
159 DelayedCallbackInfo *cbinfo,
160 int32_t milliseconds,
161 DelayedCallbackUpdateMode updatemode)
163 int32_t result = 0;
165 PIOS_Assert(cbinfo);
167 if (milliseconds <= 0) {
168 milliseconds = 0; // we can and will not schedule in the past since that ruins the wraparound of uint32_t
171 xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
173 uint32_t new = xTaskGetTickCount() + (milliseconds / portTICK_RATE_MS);
174 if (!new) {
175 new = 1; // zero has a special meaning, schedule at time 1 instead
178 int32_t diff = new - cbinfo->scheduletime;
179 if ((!cbinfo->scheduletime)
180 || ((updatemode & CALLBACK_UPDATEMODE_SOONER) && diff < 0)
181 || ((updatemode & CALLBACK_UPDATEMODE_LATER) && diff > 0)
183 // the scheduletime may be updated
184 if (!cbinfo->scheduletime) {
185 result = 1;
186 } else {
187 result = 2;
189 cbinfo->scheduletime = new;
191 // scheduler needs to be notified to adapt sleep times
192 xSemaphoreGive(cbinfo->task->signal);
195 xSemaphoreGiveRecursive(mutex);
197 return result;
201 * Dispatch an event by invoking the supplied callback. The function
202 * returns immediately, the callback is invoked from the event task.
203 * \param[in] cbinfo the callback handle
204 * \return Success (-1), failure (0)
206 int32_t PIOS_CALLBACKSCHEDULER_Dispatch(DelayedCallbackInfo *cbinfo)
208 PIOS_Assert(cbinfo);
210 // no semaphore needed for the callback
211 cbinfo->waiting = true;
212 // but the scheduler as a whole needs to be notified
213 return xSemaphoreGive(cbinfo->task->signal);
217 * Dispatch an event by invoking the supplied callback. The function
218 * returns immediately, the callback is invoked from the event task.
219 * \param[in] cbinfo the callback handle
220 * \param[in] pxHigherPriorityTaskWoken
221 * xSemaphoreGiveFromISR() will set *pxHigherPriorityTaskWoken to pdTRUE if
222 * giving the semaphore caused a task to unblock, and the unblocked task has a
223 * priority higher than the currently running task. If xSemaphoreGiveFromISR()
224 * sets this value to pdTRUE then a context switch should be requested before
225 * the interrupt is exited.
226 * From FreeRTOS Docu: Context switching from an ISR uses port specific syntax.
227 * Check the demo task for your port to find the syntax required.
228 * \return Success (-1), failure (0)
230 int32_t PIOS_CALLBACKSCHEDULER_DispatchFromISR(DelayedCallbackInfo *cbinfo, long *pxHigherPriorityTaskWoken)
232 PIOS_Assert(cbinfo);
234 // no semaphore needed for the callback
235 cbinfo->waiting = true;
236 // but the scheduler as a whole needs to be notified
237 return xSemaphoreGiveFromISR(cbinfo->task->signal, pxHigherPriorityTaskWoken);
241 * Register a new callback to be called by a delayed callback scheduler task.
242 * If a scheduler task with the specified task priority does not exist yet, it
243 * will be created.
244 * \param[in] cb The callback to be invoked
245 * \param[in] priority Priority of the callback compared to other callbacks scheduled by the same delayed callback scheduler task.
246 * \param[in] priorityTask Task priority of the scheduler task. One scheduler task will be spawned for each distinct value specified,
247 * further callbacks created with the same priorityTask will all be handled by the same delayed callback scheduler task
248 * and scheduled according to their individual callback priorities
249 * \param[in] stacksize The stack requirements of the callback when called by the scheduler.
250 * \return CallbackInfo Pointer on success, NULL if failed.
252 DelayedCallbackInfo *PIOS_CALLBACKSCHEDULER_Create(
253 DelayedCallback cb,
254 DelayedCallbackPriority priority,
255 DelayedCallbackPriorityTask priorityTask,
256 int16_t callbackID,
257 uint32_t stacksize)
259 xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
261 // add callback schedulers own stack requirements
262 stacksize += STACK_SIZE;
264 // find appropriate scheduler task matching priorityTask
265 struct DelayedCallbackTaskStruct *task = NULL;
266 int t = 0;
267 LL_FOREACH(schedulerTasks, task) {
268 if (task->priorityTask == priorityTask) {
269 break; // found
271 t++;
273 // if given priorityTask does not exist, create it
274 if (!task) {
275 // allocate memory if possible
276 task = (struct DelayedCallbackTaskStruct *)pios_malloc(sizeof(struct DelayedCallbackTaskStruct));
277 if (!task) {
278 xSemaphoreGiveRecursive(mutex);
279 return NULL;
282 // initialize structure
283 for (DelayedCallbackPriority p = 0; p <= CALLBACK_PRIORITY_LOW; p++) {
284 task->callbackQueue[p] = NULL;
285 task->queueCursor[p] = NULL;
287 task->name[0] = 'C';
288 task->name[1] = 'a' + t;
289 task->name[2] = 0;
290 task->stackSize = stacksize;
291 task->priorityTask = priorityTask;
292 task->next = NULL;
294 // create the signaling semaphore
295 vSemaphoreCreateBinary(task->signal);
296 if (!task->signal) {
297 xSemaphoreGiveRecursive(mutex);
298 return NULL;
301 // add to list of scheduler tasks
302 LL_APPEND(schedulerTasks, task);
304 // Previously registered tasks are spawned when PIOS_CALLBACKSCHEDULER_Start() is called.
305 // Tasks registered afterwards need to spawn upon creation.
306 if (schedulerStarted) {
307 xTaskCreate(
308 CallbackSchedulerTask,
309 task->name,
310 1 + (task->stackSize / 4),
311 task,
312 task->priorityTask,
313 &task->callbackSchedulerTaskHandle
315 if (TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t <= TASKINFO_RUNNING_CALLBACKSCHEDULER3) {
316 PIOS_TASK_MONITOR_RegisterTask(TASKINFO_RUNNING_CALLBACKSCHEDULER0 + t, task->callbackSchedulerTaskHandle);
321 if (!schedulerStarted && stacksize > task->stackSize) {
322 task->stackSize = stacksize; // previous to task initialisation we can still adapt to the maximum needed stack
326 if (stacksize > task->stackSize) {
327 xSemaphoreGiveRecursive(mutex);
328 return NULL; // error - not enough memory
331 // initialize callback scheduling info
332 DelayedCallbackInfo *info = (DelayedCallbackInfo *)pios_malloc(sizeof(DelayedCallbackInfo));
333 if (!info) {
334 xSemaphoreGiveRecursive(mutex);
335 return NULL; // error - not enough memory
337 info->next = NULL;
338 info->waiting = false;
339 info->scheduletime = 0;
340 info->task = task;
341 info->cb = cb;
342 info->callbackID = callbackID;
343 info->runCount = 0;
344 info->stackSize = stacksize - STACK_SIZE;
345 info->stackNotFree = info->stackSize;
346 info->stackFree = 0;
347 info->stackSafetyCount = STACK_SAFETYCOUNT;
348 info->currentSafetyCount = 0;
350 // add to scheduling queue
351 LL_APPEND(task->callbackQueue[priority], info);
353 xSemaphoreGiveRecursive(mutex);
355 return info;
359 * Iterator. Iterates over all callbacks and all scheduler tasks and retrieves information
361 * @param[in] callback Callback function to receive the data - will be called in same task context as the callerThe id of the task the task_info refers to.
362 * @param context Context information optionally provided to the callback.
364 void PIOS_CALLBACKSCHEDULER_ForEachCallback(CallbackSchedulerCallbackInfoCallback callback, void *context)
366 if (!callback) {
367 return;
370 struct pios_callback_info info;
372 struct DelayedCallbackTaskStruct *task = NULL;
373 LL_FOREACH(schedulerTasks, task) {
374 int prio;
376 for (prio = 0; prio < (CALLBACK_PRIORITY_LOW + 1); prio++) {
377 struct DelayedCallbackInfoStruct *cbinfo;
378 LL_FOREACH(task->callbackQueue[prio], cbinfo) {
379 xSemaphoreTakeRecursive(mutex, portMAX_DELAY);
380 info.is_running = true;
381 info.stack_remaining = cbinfo->stackNotFree;
382 info.running_time_count = cbinfo->runCount;
383 xSemaphoreGiveRecursive(mutex);
384 callback(cbinfo->callbackID, &info, context);
391 * Stack magic, find how much stack is being used without affecting performance
393 static void markStack(DelayedCallbackInfo *current)
395 register int8_t t;
396 register int32_t halfWayMark;
397 volatile unsigned char *marker;
399 if (current->stackNotFree < 0) {
400 return;
403 // end of stack watermark
404 marker = (unsigned char *)(((size_t)&marker) - (size_t)current->stackSize);
405 for (t = -STACK_SAFETYSIZE; t < 0; t++) {
406 *(marker + t) = '#';
408 // shifted watermarks
409 halfWayMark = current->stackFree + (current->stackNotFree - current->stackFree) / 2;
410 marker = (unsigned char *)((size_t)marker + halfWayMark);
411 for (t = -STACK_SAFETYSIZE; t < 0; t++) {
412 *(marker + t) = '#';
415 static void checkStack(DelayedCallbackInfo *current)
417 register int8_t t;
418 register int32_t halfWayMark;
419 volatile unsigned char *marker;
421 if (current->stackNotFree < 0) {
422 return;
425 // end of stack watermark
426 marker = (unsigned char *)(((size_t)&marker) - (size_t)current->stackSize);
427 for (t = -STACK_SAFETYSIZE; t < 0; t++) {
428 if (*(marker + t) != '#') {
429 current->stackNotFree = -1; // stack overflow, disable all further checks
430 return;
433 // shifted watermarks
434 halfWayMark = current->stackFree + (current->stackNotFree - current->stackFree) / 2;
435 marker = (unsigned char *)((size_t)marker + halfWayMark);
436 for (t = -STACK_SAFETYSIZE; t < 0; t++) {
437 if (*(marker + t) != '#') {
438 current->stackNotFree = halfWayMark; // tainted mark, this place is definitely used stack
439 current->currentSafetyCount = 0;
440 if (current->stackNotFree <= current->stackFree) {
441 current->stackFree = 0; // if it was supposed to be free, restart search between here and bottom of stack
443 return;
447 if (current->currentSafetyCount < 0xffff) {
448 current->currentSafetyCount++; // mark has not been tainted, increase safety counter
450 if (current->currentSafetyCount >= current->stackSafetyCount) { // if the safety counter is above the limit, then
451 if (halfWayMark == current->stackFree) { // check if search already converged, if so increase the limit to find very rare stack usage incidents
452 current->stackSafetyCount = current->currentSafetyCount;
453 } else {
454 current->stackFree = halfWayMark; // otherwise just mark this position as free stack to narrow search
455 current->currentSafetyCount = 0;
461 * Scheduler subtask
462 * \param[in] task The scheduler task in question
463 * \param[in] priority The scheduling priority of the callback to search for
464 * \return wait time until next scheduled callback is due - 0 if a callback has just been executed
466 static int32_t runNextCallback(struct DelayedCallbackTaskStruct *task, DelayedCallbackPriority priority)
468 int32_t result = MAX_SLEEP;
469 int32_t diff = 0;
471 // no such queue
472 if (priority > CALLBACK_PRIORITY_LOW) {
473 return result;
476 // queue is empty, search a lower priority queue
477 if (task->callbackQueue[priority] == NULL) {
478 return runNextCallback(task, priority + 1);
481 DelayedCallbackInfo *current = task->queueCursor[priority];
482 DelayedCallbackInfo *next;
483 do {
484 if (current == NULL) {
485 next = task->callbackQueue[priority]; // loop around the end of the list
486 // also attempt to run a callback that has lower priority
487 // every time the queue is completely traversed
488 diff = runNextCallback(task, priority + 1);
489 if (!diff) {
490 task->queueCursor[priority] = next; // the recursive call has executed a callback
491 return 0;
493 if (diff < result) {
494 result = diff; // adjust sleep time
496 } else {
497 next = current->next;
498 xSemaphoreTakeRecursive(mutex, portMAX_DELAY); // access to scheduletime should be mutex protected
499 if (current->scheduletime) {
500 diff = current->scheduletime - xTaskGetTickCount();
501 if (diff <= 0) {
502 current->waiting = true;
503 } else if (diff < result) {
504 result = diff; // adjust sleep time
507 if (current->waiting) {
508 task->queueCursor[priority] = next;
509 current->scheduletime = 0; // any schedules are reset
510 current->waiting = false; // the flag is reset just before execution.
511 xSemaphoreGiveRecursive(mutex);
513 /* callback gets invoked here - check stack sizes */
514 markStack(current);
516 current->cb(); // call the callback
518 checkStack(current);
520 current->runCount++;
522 return 0;
524 xSemaphoreGiveRecursive(mutex);
526 current = next;
527 } while (current != task->queueCursor[priority]);
528 // once the list has been traversed entirely without finding any to be executed task, abort (nothing to do)
529 return result;
533 * Scheduler task, responsible of invoking callbacks.
534 * \param[in] task The scheduling task being run
536 static void CallbackSchedulerTask(void *task)
538 uint32_t delay = 0;
540 while (1) {
541 delay = runNextCallback((struct DelayedCallbackTaskStruct *)task, CALLBACK_PRIORITY_CRITICAL);
542 if (delay) {
543 // nothing to do but sleep
544 xSemaphoreTake(((struct DelayedCallbackTaskStruct *)task)->signal, delay);
549 #endif // ifdef PIOS_INCLUDE_CALLBACKSCHEDULER