From 28bba1f0666c7e9fff2e700f8c8425544c933a33 Mon Sep 17 00:00:00 2001 From: Nicola Manica Date: Thu, 18 Jun 2009 15:16:56 +0200 Subject: [PATCH] Luca's patch ported rt_mutexes_setprio now is called task_setprio --- include/linux/sched.h | 21 ++ init/Kconfig | 5 + kernel/sched.c | 72 +++++- kernel/sched_fair.c | 7 +- kernel/sched_rt.c | 614 ++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 713 insertions(+), 6 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 983875f0..31120aaf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -38,11 +38,16 @@ #define SCHED_BATCH 3 /* SCHED_ISO: reserved but not implemented yet */ #define SCHED_IDLE 5 +#define SCHED_CBS 6 #ifdef __KERNEL__ struct sched_param { int sched_priority; + int sched_ss_low_priority; + struct timespec sched_ss_repl_period; + struct timespec sched_ss_init_budget; + int sched_ss_max_repl; }; #include /* for HZ */ @@ -1185,6 +1190,18 @@ struct sched_rt_entity { #endif }; +#ifdef CONFIG_SCHED_CBS +struct sched_cbs_entity { /*TODO: maybe merge with sched_entity? */ + struct rb_node run_node; + int on_rq; + u64 max_budget; + u64 period; + s64 budget; + u64 deadline; + u64 exec_start; +}; +#endif + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; @@ -1204,6 +1221,10 @@ struct task_struct { struct sched_entity se; struct sched_rt_entity rt; +#ifdef CONFIG_SCHED_CBS + struct sched_cbs_entity cbs_se; +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ struct hlist_head preempt_notifiers; diff --git a/init/Kconfig b/init/Kconfig index d4a4210a..455c5203 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -547,6 +547,11 @@ config PROC_PID_CPUSET depends on CPUSETS default y +config SCHED_CBS + bool "CBS Scheduling Class" + help + Here is the CBS... + config CGROUP_CPUACCT bool "Simple CPU accounting cgroup subsystem" depends on CGROUPS diff --git a/kernel/sched.c b/kernel/sched.c index be79f673..dd3b285f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -562,6 +562,24 @@ struct rt_rq { #endif }; +#ifdef CONFIG_SCHED_CBS +struct cbs_rq { + unsigned long nr_running; + + u64 min_deadline; + + struct rb_root tasks_timeline; + struct rb_node *rb_leftmost; + + /* + * 'curr' points to currently running entity on this cfs_rq. + * It is set to NULL otherwise (i.e when none are currently running). + */ + struct sched_cbs_entity *curr; +}; +#endif + + #ifdef CONFIG_SMP /* @@ -634,6 +652,9 @@ struct rq { struct cfs_rq cfs; struct rt_rq rt; +#ifdef CONFIG_SCHED_CBS + struct cbs_rq cbs; +#endif #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this cpu: */ @@ -2719,9 +2740,16 @@ void sched_fork(struct task_struct *p, int clone_flags) * Make sure we do not leak PI boosting priority to the child: */ p->prio = current->normal_prio; - if (!rt_prio(p->prio)) + if (!rt_prio(p->prio)) { p->sched_class = &fair_sched_class; +#ifdef CONFIG_SCHED_CBS + if (p->policy == SCHED_CBS) + p->sched_class = &cbs_sched_class; + else +#endif + p->sched_class = &fair_sched_class; +} #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) if (likely(sched_info_on())) memset(&p->sched_info, 0, sizeof(p->sched_info)); @@ -5977,9 +6005,14 @@ void task_setprio(struct task_struct *p, int prio) if (rt_prio(prio)) p->sched_class = &rt_sched_class; - else - p->sched_class = &fair_sched_class; - + else { +#ifdef CONFIG_SCHED_CBS + if (p->policy == SCHED_CBS) + p->sched_class = &cbs_sched_class; + else +#endif + p->sched_class = &fair_sched_class; +} p->prio = prio; trace_sched_task_setprio(rq, p, oldprio); @@ -6167,6 +6200,11 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) case SCHED_RR: p->sched_class = &rt_sched_class; break; +#ifdef CONFIG_SCHED_CBS + case SCHED_CBS: + p->sched_class = &cbs_sched_class; + break; +#endif } p->rt_priority = prio; @@ -6206,7 +6244,7 @@ recheck: /* double check policy once rq lock held */ if (policy < 0) policy = oldpolicy = p->policy; - else if (policy != SCHED_FIFO && policy != SCHED_RR && + else if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_CBS && policy != SCHED_NORMAL && policy != SCHED_BATCH && policy != SCHED_IDLE) return -EINVAL; @@ -6297,6 +6335,16 @@ recheck: p->sched_class->put_prev_task(rq, p); oldprio = p->prio; + +#ifdef CONFIG_SCHED_CBS + if (policy == SCHED_CBS) { /* FIXME: Move to __setscheduler()? */ + p->cbs_se.period = timespec_to_ns(&(param->sched_ss_repl_period)); + p->cbs_se.max_budget = timespec_to_ns(¶m->sched_ss_init_budget); + p->cbs_se.budget = 0; + p->cbs_se.deadline = 0; + } +#endif + __setscheduler(rq, p, policy, param->sched_priority); if (running) @@ -9351,6 +9399,17 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) #endif } +#ifdef CONFIG_SCHED_CBS +static void init_cbs_rq(struct cbs_rq *cbs_rq, struct rq *rq) +{ + cbs_rq->tasks_timeline = RB_ROOT; + cbs_rq->min_deadline = (u64)(-(1LL << 20)); +} +#else +#define init_cbs_rq(...) +#endif + + #ifdef CONFIG_FAIR_GROUP_SCHED static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *se, int cpu, int add, @@ -9509,6 +9568,9 @@ void __init sched_init(void) rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs, rq); init_rt_rq(&rq->rt, rq); +#ifdef CONFIG_SCHED_CBS + init_cbs_rq(&rq->cbs, rq); +#endif #ifdef CONFIG_FAIR_GROUP_SCHED init_task_group.shares = init_task_group_load; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3816f217..8a3bdf35 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1436,9 +1436,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) return; } - if (unlikely(p->sched_class != &fair_sched_class)) + if (unlikely(p->sched_class != &fair_sched_class)){ + update_rq_clock(rq); + update_curr(cfs_rq); + resched_task(curr); return; +} + if (unlikely(se == pse)) return; diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c1ee8dc6..c1d83ec4 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1801,7 +1801,11 @@ static void set_curr_task_rt(struct rq *rq) } static const struct sched_class rt_sched_class = { +#ifdef CONFIG_SCHED_CBS + .next = &cbs_sched_class, +#else .next = &fair_sched_class, +#endif .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, .yield_task = yield_task_rt, @@ -1847,3 +1851,613 @@ static void print_rt_stats(struct seq_file *m, int cpu) } #endif /* CONFIG_SCHED_DEBUG */ + +#ifdef CONFIG_SCHED_CBS +static inline struct task_struct *cbs_task_of(struct sched_cbs_entity *se) +{ + return container_of(se, struct task_struct, cbs_se); +} + +static inline struct rq *cbs_rq_of(struct cbs_rq *cbs_rq) +{ + return container_of(cbs_rq, struct rq, cbs); +} + +#define for_each_cbs_sched_entity(se) \ + for (; se; se = NULL) + +static inline struct cbs_rq *task_cbs_rq(struct task_struct *p) +{ + return &task_rq(p)->cbs; +} + + +static inline struct cbs_rq *cbs_cbs_rq_of(struct sched_cbs_entity *se) +{ + struct task_struct *p = cbs_task_of(se); + struct rq *rq = task_rq(p); + + return &rq->cbs; +} + +/* runqueue "owned" by this group */ +static inline struct cbs_rq *group_cbs_rq(struct sched_cbs_entity *grp) +{ + return NULL; +} + +static inline int +is_same_cbs_group(struct sched_cbs_entity *se, struct sched_cbs_entity *pse) +{ + return 1; +} + +static inline struct sched_cbs_entity *parent_cbs_entity(struct sched_cbs_entity *se) +{ + return NULL; +} + + + +/************************************************************** + * Scheduling class tree data structure manipulation methods: + */ + +static inline u64 max_dl(u64 min_dl, u64 dl) +{ + s64 delta = (s64)(dl - min_dl); + if (delta > 0) + min_dl = dl; + + return min_dl; +} + +static inline u64 min_dl(u64 min_dl, u64 dl) +{ + s64 delta = (s64)(dl - min_dl); + if (delta < 0) + min_dl = dl; + + return min_dl; +} + +static inline void deadline_postpone(struct sched_cbs_entity *cbs_se) +{ + while (cbs_se->budget < 0) { + cbs_se->deadline += cbs_se->period; + cbs_se->budget += cbs_se->max_budget; + } +} + +static inline s64 entity_deadline(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + return se->deadline - cbs_rq->min_deadline; +} + +/* + * Enqueue an entity into the rb-tree: + */ +static void __enqueue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + struct rb_node **link = &cbs_rq->tasks_timeline.rb_node; + struct rb_node *parent = NULL; + struct sched_cbs_entity *entry; + s64 key = entity_deadline(cbs_rq, se); + int leftmost = 1; + + /* + * Find the right place in the rbtree: + */ + while (*link) { + parent = *link; + entry = rb_entry(parent, struct sched_cbs_entity, run_node); + /* + * We dont care about collisions. Nodes with + * the same key stay together. + */ + if (key < entity_deadline(cbs_rq, entry)) { + link = &parent->rb_left; + } else { + link = &parent->rb_right; + leftmost = 0; + } + } + + /* + * Maintain a cache of leftmost tree entries (it is frequently + * used): + */ + if (leftmost) { + cbs_rq->rb_leftmost = &se->run_node; + /* + * maintain cbs_rq->min_deadline to be a monotonic increasing + * value tracking the leftmost deadline in the tree. + */ + cbs_rq->min_deadline = + max_dl(cbs_rq->min_deadline, se->deadline); + } + + rb_link_node(&se->run_node, parent, link); + rb_insert_color(&se->run_node, &cbs_rq->tasks_timeline); +} + +static void __dequeue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + if (cbs_rq->rb_leftmost == &se->run_node) { + struct rb_node *next_node; + struct sched_cbs_entity *next; + + next_node = rb_next(&se->run_node); + cbs_rq->rb_leftmost = next_node; + + if (next_node) { + next = rb_entry(next_node, + struct sched_cbs_entity, run_node); + cbs_rq->min_deadline = + max_dl(cbs_rq->min_deadline, + next->deadline); + } + } + + rb_erase(&se->run_node, &cbs_rq->tasks_timeline); +} + +static inline struct rb_node *earliest_deadline(struct cbs_rq *cbs_rq) +{ + return cbs_rq->rb_leftmost; +} + +static struct sched_cbs_entity *__pick_next_cbs_entity(struct cbs_rq *cbs_rq) +{ + return rb_entry(earliest_deadline(cbs_rq), struct sched_cbs_entity, run_node); +} + +/* + * Update the current task's runtime statistics. Skip current tasks that + * are not in our scheduling class. + */ +static inline void +__update_curr_cbs(struct cbs_rq *cbs_rq, struct sched_cbs_entity *curr, + unsigned long delta_exec) +{ + schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); + +// curr->sum_exec_runtime += delta_exec; + schedstat_add(cbs_rq, exec_clock, delta_exec); + curr->budget -= delta_exec; + deadline_postpone(curr); +} + +static void update_curr_cbs(struct cbs_rq *cbs_rq) +{ + struct sched_cbs_entity *curr = cbs_rq->curr; + u64 now = cbs_rq_of(cbs_rq)->clock; + unsigned long delta_exec; + + if (unlikely(!curr)) + return; + + /* + * Get the amount of time the current task was running + * since the last accounting time + */ + delta_exec = (unsigned long)(now - curr->exec_start); + + __update_curr_cbs(cbs_rq, curr, delta_exec); + curr->exec_start = now; + +#if 0 + if (entity_is_task(curr)) { + struct task_struct *curtask = cbs_task_of(curr); + + cpuacct_charge(curtask, delta_exec); + } +#endif +} + +/* + * We are picking a new current task - update its stats: + */ +static inline void +update_stats_curr_start_cbs(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + /* + * We are starting a new run period: + */ + se->exec_start = cbs_rq_of(cbs_rq)->clock; +} + +/************************************************** + * Scheduling class queueing methods: + */ + +static void +account_cbs_entity_enqueue(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + cbs_rq->nr_running++; + se->on_rq = 1; +} + +static void +account_cbs_entity_dequeue(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + BUG_ON(se->on_rq == 0); + BUG_ON(cbs_rq->nr_running == 0); + cbs_rq->nr_running--; + se->on_rq = 0; +} + +static void +enqueue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + u64 vt, now = cbs_rq_of(cbs_rq)->clock; + + /* + * Update run-time statistics of the 'current'. + */ + update_curr_cbs(cbs_rq); + account_cbs_entity_enqueue(cbs_rq, se); + + vt = se->period * se->budget; + do_div(vt, se->max_budget); + + if (vt + now > se->deadline) { + se->budget = se->max_budget; + se->deadline = se->period + now; + } + + if (se != cbs_rq->curr) + __enqueue_cbs_entity(cbs_rq, se); +} + +static void +dequeue_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + /* + * Update run-time statistics of the 'current'. + */ + update_curr_cbs(cbs_rq); + + if (se != cbs_rq->curr) + __dequeue_cbs_entity(cbs_rq, se); + account_cbs_entity_dequeue(cbs_rq, se); +} + +static void +set_next_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *se) +{ + /* 'current' is not kept within the tree. */ + if (se->on_rq) { + __dequeue_cbs_entity(cbs_rq, se); + } + + update_stats_curr_start_cbs(cbs_rq, se); + cbs_rq->curr = se; +// se->prev_sum_exec_runtime = se->sum_exec_runtime; +} + +static int +wakeup_preempt_cbs_entity(struct sched_cbs_entity *curr, struct sched_cbs_entity *se) +{ + return se->deadline < curr->deadline; +} + + +static struct sched_cbs_entity *pick_next_cbs_entity(struct cbs_rq *cbs_rq) +{ + struct sched_cbs_entity *se = NULL; + + if (earliest_deadline(cbs_rq)) { + se = __pick_next_cbs_entity(cbs_rq); + set_next_cbs_entity(cbs_rq, se); + } + + return se; +} + +static void put_prev_cbs_entity(struct cbs_rq *cbs_rq, struct sched_cbs_entity *prev) +{ + /* + * If still on the runqueue then deactivate_task() + * was not called and update_curr() has to be done: + */ + if (prev->on_rq) + update_curr_cbs(cbs_rq); + + if (prev->on_rq) { + /* Put 'current' back into the tree. */ + __enqueue_cbs_entity(cbs_rq, prev); + } + cbs_rq->curr = NULL; +} + +static void +cbs_entity_tick(struct cbs_rq *cbs_rq, struct sched_cbs_entity *curr, int queued) +{ + /* + * Update run-time statistics of the 'current'. + */ + update_curr_cbs(cbs_rq); + + if (cbs_rq->nr_running > 1) + resched_task(cbs_rq_of(cbs_rq)->curr); /* FIXME: Check! */ +} + + +/************************************************** + * CBS operations on tasks: + */ + +#ifdef CONFIG_SCHED_HRTICK +static void hrtick_start_cbs(struct rq *rq, struct task_struct *p) +{ + int requeue = rq->curr == p; + struct sched_cbs_entity *se = &p->cbs_se; + s64 delta; + + WARN_ON(task_rq(p) != rq); + + /* + * Don't schedule timeouts shorter than 10000ns, that just + * doesn't make sense. + */ + delta = max(10000LL, se->budget); + hrtick_start(rq, delta, requeue); +} +#else +static inline void +hrtick_start_cbs(struct rq *rq, struct task_struct *p) +{ +} +#endif + +/* + * The enqueue_task method is called before nr_running is + * increased. Here we update the fair scheduling stats and + * then put the task into the rbtree: + */ +static void enqueue_task_cbs(struct rq *rq, struct task_struct *p, int wakeup) +{ + struct cbs_rq *cbs_rq; + struct sched_cbs_entity *se = &p->cbs_se; + + for_each_cbs_sched_entity(se) { + if (se->on_rq) + break; + cbs_rq = cbs_cbs_rq_of(se); + enqueue_cbs_entity(cbs_rq, se); + } + + hrtick_start_cbs(rq, rq->curr); +} + +/* + * The dequeue_task method is called before nr_running is + * decreased. We remove the task from the rbtree and + * update the fair scheduling stats: + */ +static void dequeue_task_cbs(struct rq *rq, struct task_struct *p, int sleep) +{ + struct cbs_rq *cbs_rq; + struct sched_cbs_entity *se = &p->cbs_se; + + for_each_cbs_sched_entity(se) { + cbs_rq = cbs_cbs_rq_of(se); + dequeue_cbs_entity(cbs_rq, se); + /* FIXME: Don't dequeue parent if it has other entities besides us */ + } + + hrtick_start_cbs(rq, rq->curr); +} + +/* + * sched_yield() is broken on CBS. + * + * If compat_yield is turned on then we requeue to the end of the tree. + */ +static void yield_task_cbs(struct rq *rq) +{ +} + +/* return depth at which a sched entity is present in the hierarchy */ +static inline int depth_se_cbs(struct sched_cbs_entity *se) +{ + int depth = 0; + + for_each_cbs_sched_entity(se) + depth++; + + return depth; +} + +/* + * Preempt the current task with a newly woken task if needed: + */ +static void check_preempt_wakeup_cbs(struct rq *rq, struct task_struct *p) +{ + struct task_struct *curr = rq->curr; + struct cbs_rq *cbs_rq = task_cbs_rq(curr); + struct sched_cbs_entity *se = &curr->cbs_se, *pse = &p->cbs_se; +#if 0 + int se_depth, pse_depth; +#endif + if (unlikely(rt_prio(p->prio))) { + update_rq_clock(rq); + update_curr_cbs(cbs_rq); + resched_task(curr); + return; + } + +// se->last_wakeup = se->sum_exec_runtime; + if (unlikely(se == pse)) + return; + +#if 0 +/* + * preemption test can be made between sibling entities who are in the + * same cbs_rq i.e who have a common parent. Walk up the hierarchy of + * both tasks until we find their ancestors who are siblings of common + * parent. + */ + + /* First walk up until both entities are at same depth */ + se_depth = depth_se_cbs(se); + pse_depth = depth_se_cbs(pse); + + while (se_depth > pse_depth) { + se_depth--; + se = parent_cbs_entity(se); + } + + while (pse_depth > se_depth) { + pse_depth--; + pse = parent_cbs_entity(pse); + } + + while (!is_same_cbs_group(se, pse)) { + se = parent_cbs_entity(se); + pse = parent_cbs_entity(pse); + } +#endif + if (wakeup_preempt_cbs_entity(se, pse) == 1) + resched_task(curr); +} + +static struct task_struct *pick_next_task_cbs(struct rq *rq) +{ + struct task_struct *p; + struct cbs_rq *cbs_rq = &rq->cbs; + struct sched_cbs_entity *se; + + if (unlikely(!cbs_rq->nr_running)) + return NULL; + + do { + se = pick_next_cbs_entity(cbs_rq); + cbs_rq = group_cbs_rq(se); + } while (cbs_rq); + + p = cbs_task_of(se); + hrtick_start_cbs(rq, p); + + return p; +} + +/* + * Account for a descheduled task: + */ +static void put_prev_task_cbs(struct rq *rq, struct task_struct *prev) +{ + struct sched_cbs_entity *se = &prev->cbs_se; + struct cbs_rq *cbs_rq; + + for_each_cbs_sched_entity(se) { + cbs_rq = cbs_cbs_rq_of(se); + put_prev_cbs_entity(cbs_rq, se); + } +} + +/* + * scheduler tick hitting a task of our scheduling class: + */ +static void task_tick_cbs(struct rq *rq, struct task_struct *curr, int queued) +{ + struct cbs_rq *cbs_rq; + struct sched_cbs_entity *se = &curr->cbs_se; + + for_each_cbs_sched_entity(se) { + cbs_rq = cbs_cbs_rq_of(se); + cbs_entity_tick(cbs_rq, se, queued); + } +} + +/* + * FIXME! + */ +static void task_new_cbs(struct rq *rq, struct task_struct *p) +{ +#warning Task New CBS is W R O N G ! ! ! + struct cbs_rq *cbs_rq = task_cbs_rq(p); + printk("task_new_cbs has been called!\n"); + sched_info_queued(p); + + update_curr_cbs(cbs_rq); + + enqueue_task_cbs(rq, p, 0); + resched_task(rq->curr); +} + +/* + * Priority of the task has changed. Check to see if we preempt + * the current task. + */ +static void prio_changed_cbs(struct rq *rq, struct task_struct *p, + int oldprio, int running) +{ +#warning Check prio_changed_cbs() implementation, thanks! + printk("prio_changed_cbs has been called!\n"); + check_preempt_curr(rq, p); +} + +/* + * We switched to the sched_cbs class. + */ +static void switched_to_cbs(struct rq *rq, struct task_struct *p, + int running) +{ +#warning Check switched_to_cbs() implementation, thanks! + //printk("switched_to_cbs has been called!\n"); + check_preempt_curr(rq, p); +} + +/* Account for a task changing its policy or group. + * + * This routine is mostly called to set cbs_rq->curr field when a task + * migrates between groups/classes. + */ +static void set_curr_task_cbs(struct rq *rq) +{ + struct sched_cbs_entity *se = &rq->curr->cbs_se; + + for_each_cbs_sched_entity(se) + set_next_cbs_entity(cbs_cbs_rq_of(se), se); +} + +/* + * All the scheduling class methods: + */ +static const struct sched_class cbs_sched_class = { + .next = &fair_sched_class, + .enqueue_task = enqueue_task_cbs, + .dequeue_task = dequeue_task_cbs, + .yield_task = yield_task_cbs, +#ifdef CONFIG_SMP +#error CBS SMP is still a No-No! + .select_task_rq = , +#endif /* CONFIG_SMP */ + + .check_preempt_curr = check_preempt_wakeup_cbs, + + .pick_next_task = pick_next_task_cbs, + .put_prev_task = put_prev_task_cbs, + +#ifdef CONFIG_SMP +#error CBS SMP is still a No-No! + .load_balance = , + .move_one_task = , +#endif + + .set_curr_task = set_curr_task_cbs, + .task_tick = task_tick_cbs, + .task_new = task_new_cbs, + + .prio_changed = prio_changed_cbs, + .switched_to = switched_to_cbs, + +#ifdef CONFIG_CBS_GROUP_SCHED + .moved_group = , +#endif +}; + +#endif + -- 2.11.4.GIT