1 static inline struct task_struct
*cbs_task_of(struct sched_cbs_entity
*se
)
3 return container_of(se
, struct task_struct
, cbs_se
);
6 static inline struct rq
*cbs_rq_of(struct cbs_rq
*cbs_rq
)
8 return container_of(cbs_rq
, struct rq
, cbs
);
11 #define for_each_cbs_sched_entity(se) \
14 static inline struct cbs_rq
*task_cbs_rq(struct task_struct
*p
)
16 return &task_rq(p
)->cbs
;
20 static inline struct cbs_rq
*cbs_cbs_rq_of(struct sched_cbs_entity
*se
)
22 struct task_struct
*p
= cbs_task_of(se
);
23 struct rq
*rq
= task_rq(p
);
28 /* runqueue "owned" by this group */
29 static inline struct cbs_rq
*group_cbs_rq(struct sched_cbs_entity
*grp
)
35 is_same_cbs_group(struct sched_cbs_entity
*se
, struct sched_cbs_entity
*pse
)
40 static inline struct sched_cbs_entity
*parent_cbs_entity(struct sched_cbs_entity
*se
)
47 /**************************************************************
48 * Scheduling class tree data structure manipulation methods:
51 static inline u64
max_dl(u64 min_dl
, u64 dl
)
53 s64 delta
= (s64
)(dl
- min_dl
);
60 static inline u64
min_dl(u64 min_dl
, u64 dl
)
62 s64 delta
= (s64
)(dl
- min_dl
);
71 static inline void deadline_postpone(struct sched_cbs_entity
*cbs_se
);
73 static void __enqueue_cbs_entity(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
);
75 static void cbs_deadline(struct sched_cbs_entity
*se
) {
76 struct cbs_rq
*cbs_rq
=cbs_cbs_rq_of(se
);
77 printk("cbs_deadline!\n");
78 deadline_postpone(se
);
79 if (se
!= cbs_rq
->curr
)
80 __enqueue_cbs_entity(cbs_rq
, se
);
84 static enum hrtimer_restart
cbs_end_deadline(struct hrtimer
*timer
)
87 struct sched_cbs_entity
*se
= container_of(timer
, struct sched_cbs_entity
, cbs_deadline_timer
);
88 printk("timer has fired at deadline\n");
89 //WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
91 //spin_lock(&rq->lock);
92 //update_rq_clock(rq);
94 Qua va inserita una funzione che inserisce il task
95 nel rbtree e ricarica il budget (postpone_deadline + enqueue_task)
96 Question: I parametri qua son corretti?
97 rq->curr punta al task cbs che contiene il timer espirato?
100 //rq->curr->sched_class->end_(rq, rq->curr, 1);
101 //spin_unlock(&rq->lock);
103 return HRTIMER_NORESTART
;
109 static void init_cbs_timer(struct sched_cbs_entity
*cbs_se
){
110 hrtimer_init(&cbs_se
->cbs_deadline_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_ABS
);
111 cbs_se
->cbs_deadline_timer
.function
= cbs_end_deadline
;
112 cbs_se
->cbs_deadline_timer
.irqsafe
= 1;
116 static void start_cbs_timer(struct sched_cbs_entity
*cbs_se
)
119 struct cbs_rq
*cbs_rq
= cbs_cbs_rq_of(cbs_se
);
122 /* if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
125 if (hrtimer_active(&cbs_se
->cbs_deadline_timer
))
128 printk("DEAD setted to set=%llu\n",(unsigned long long)cbs_se
->deadline
);
129 printk("PERIOD %llu",(unsigned long long)cbs_se
->period
);
130 spin_lock(&cbs_se
->cbs_sleeptime_lock
);
134 if (hrtimer_active(&cbs_se
->cbs_deadline_timer
))
136 now
= cbs_rq_of(cbs_rq
)->clock
;
138 //now = hrtimer_cb_get_time(&cbs_se->cbs_deadline_timer);
139 if (cbs_se
->deadline
==0)
140 cbs_se
->deadline
=now
+cbs_se
->period
;
142 // hrtimer_forward(&cbs_se->cbs_deadline_timer, now, ns_to_ktime(cbs_se->period));
144 //soft = hrtimer_get_softexpires(&cbs_se->cbs_deadline_timer);
145 //hard = hrtimer_get_expires(&cbs_se->cbs_deadline_timer);
146 //delta = ktime_to_ns(ktime_sub(hard, soft));
147 hard
=ns_to_ktime(cbs_se
->deadline
);
148 //__hrtimer_start_range_ns(&cbs_se->cbs_deadline_timer, soft, delta, HRTIMER_MODE_ABS, 0);
149 hrtimer_start(&cbs_se
->cbs_deadline_timer
, hard
, HRTIMER_MODE_ABS
);
151 spin_unlock(&cbs_se
->cbs_sleeptime_lock
);
156 static inline void deadline_postpone(struct sched_cbs_entity
*cbs_se
)
158 while (cbs_se
->budget
< 0) {
159 cbs_se
->deadline
+= cbs_se
->period
;
160 cbs_se
->budget
+= cbs_se
->max_budget
;
164 static inline s64
entity_deadline(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
166 return se
->deadline
- cbs_rq
->min_deadline
;
170 * Enqueue an entity into the rb-tree:
172 static void __enqueue_cbs_entity(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
174 struct rb_node
**link
= &cbs_rq
->tasks_timeline
.rb_node
;
175 struct rb_node
*parent
= NULL
;
176 struct sched_cbs_entity
*entry
;
177 s64 key
= entity_deadline(cbs_rq
, se
);
181 * Find the right place in the rbtree:
185 entry
= rb_entry(parent
, struct sched_cbs_entity
, run_node
);
187 * We dont care about collisions. Nodes with
188 * the same key stay together.
190 if (key
< entity_deadline(cbs_rq
, entry
)) {
191 link
= &parent
->rb_left
;
193 link
= &parent
->rb_right
;
199 * Maintain a cache of leftmost tree entries (it is frequently
203 cbs_rq
->rb_leftmost
= &se
->run_node
;
205 * maintain cbs_rq->min_deadline to be a monotonic increasing
206 * value tracking the leftmost deadline in the tree.
208 cbs_rq
->min_deadline
=
209 max_dl(cbs_rq
->min_deadline
, se
->deadline
);
212 rb_link_node(&se
->run_node
, parent
, link
);
213 rb_insert_color(&se
->run_node
, &cbs_rq
->tasks_timeline
);
216 static void __dequeue_cbs_entity(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
218 if (cbs_rq
->rb_leftmost
== &se
->run_node
) {
219 struct rb_node
*next_node
;
220 struct sched_cbs_entity
*next
;
222 next_node
= rb_next(&se
->run_node
);
223 cbs_rq
->rb_leftmost
= next_node
;
226 next
= rb_entry(next_node
,
227 struct sched_cbs_entity
, run_node
);
228 cbs_rq
->min_deadline
=
229 max_dl(cbs_rq
->min_deadline
,
234 rb_erase(&se
->run_node
, &cbs_rq
->tasks_timeline
);
237 static inline struct rb_node
*earliest_deadline(struct cbs_rq
*cbs_rq
)
239 return cbs_rq
->rb_leftmost
;
242 static struct sched_cbs_entity
*__pick_next_cbs_entity(struct cbs_rq
*cbs_rq
)
244 return rb_entry(earliest_deadline(cbs_rq
), struct sched_cbs_entity
, run_node
);
248 * Update the current task's runtime statistics. Skip current tasks that
249 * are not in our scheduling class.
252 __update_curr_cbs(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*curr
,
253 unsigned long delta_exec
)
255 //schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
257 // curr->sum_exec_runtime += delta_exec;
258 //schedstat_add(cbs_rq, exec_clock, delta_exec);
259 curr
->budget
-= delta_exec
;
261 /**Soft CBS: If budget < 0 recharge buffer and postpone deadline**/
262 /**Hard CBS: If budget < 0 start a timer until the deadline**/
264 if (curr
->budget
<= 0)
267 start_cbs_timer(curr
);
270 deadline_postpone(curr
);
274 static void update_curr_cbs(struct cbs_rq
*cbs_rq
)
276 //printk("cbs_rq %p\n",cbs_rq);
277 struct sched_cbs_entity
*curr
= cbs_rq
->curr
;
278 u64 now
= cbs_rq_of(cbs_rq
)->clock
;
279 unsigned long delta_exec
;
285 * Get the amount of time the current task was running
286 * since the last accounting time
288 delta_exec
= (unsigned long)(now
- curr
->exec_start
);
290 __update_curr_cbs(cbs_rq
, curr
, delta_exec
);
291 curr
->exec_start
= now
;
294 if (entity_is_task(curr
)) {
295 struct task_struct
*curtask
= cbs_task_of(curr
);
297 cpuacct_charge(curtask
, delta_exec
);
303 * We are picking a new current task - update its stats:
306 update_stats_curr_start_cbs(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
309 * We are starting a new run period:
311 se
->exec_start
= cbs_rq_of(cbs_rq
)->clock
;
314 /**************************************************
315 * Scheduling class queueing methods:
319 account_cbs_entity_enqueue(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
321 cbs_rq
->nr_running
++;
326 account_cbs_entity_dequeue(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
328 BUG_ON(se
->on_rq
== 0);
329 BUG_ON(cbs_rq
->nr_running
== 0);
330 cbs_rq
->nr_running
--;
335 enqueue_cbs_entity(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
337 u64 vt
, now
= cbs_rq_of(cbs_rq
)->clock
;
340 * Update run-time statistics of the 'current'.
342 update_curr_cbs(cbs_rq
);
343 account_cbs_entity_enqueue(cbs_rq
, se
);
345 vt
= se
->period
* se
->budget
;
346 do_div(vt
, se
->max_budget
);
347 printk("dead %llu and budget %lld\n",(unsigned long long)se
->deadline
,(long long)se
->budget
);
348 if (vt
+ now
> se
->deadline
) {
349 se
->budget
= se
->max_budget
;
350 se
->deadline
= se
->period
+ now
;
352 printk("dead %llu and budget %lld\n",(unsigned long long)se
->deadline
,(long long)se
->budget
);
354 /**HARD: If budget < 0 does not enqueue the current task**/
355 /**SOFT: Enqueue the task beacuse budget can not be <= 0**/
357 if (se
!= cbs_rq
->curr
)
359 if ((se
!= cbs_rq
->curr
)&&(se
->budget
> 0))
361 __enqueue_cbs_entity(cbs_rq
, se
);
365 dequeue_cbs_entity(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
368 * Update run-time statistics of the 'current'.
370 update_curr_cbs(cbs_rq
);
372 if (se
!= cbs_rq
->curr
)
373 __dequeue_cbs_entity(cbs_rq
, se
);
374 account_cbs_entity_dequeue(cbs_rq
, se
);
378 set_next_cbs_entity(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*se
)
380 /* 'current' is not kept within the tree. */
382 __dequeue_cbs_entity(cbs_rq
, se
);
385 update_stats_curr_start_cbs(cbs_rq
, se
);
387 // se->prev_sum_exec_runtime = se->sum_exec_runtime;
391 wakeup_preempt_cbs_entity(struct sched_cbs_entity
*curr
, struct sched_cbs_entity
*se
)
393 return se
->deadline
< curr
->deadline
;
397 static struct sched_cbs_entity
*pick_next_cbs_entity(struct cbs_rq
*cbs_rq
)
399 struct sched_cbs_entity
*se
= NULL
;
401 if (earliest_deadline(cbs_rq
)) {
402 se
= __pick_next_cbs_entity(cbs_rq
);
403 set_next_cbs_entity(cbs_rq
, se
);
409 static void put_prev_cbs_entity(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*prev
)
412 * If still on the runqueue then deactivate_task()
413 * was not called and update_curr() has to be done:
416 update_curr_cbs(cbs_rq
);
419 /* Put 'current' back into the tree. */
420 __enqueue_cbs_entity(cbs_rq
, prev
);
426 cbs_entity_tick(struct cbs_rq
*cbs_rq
, struct sched_cbs_entity
*curr
, int queued
)
429 * Update run-time statistics of the 'current'.
431 update_curr_cbs(cbs_rq
);
433 if (cbs_rq
->nr_running
> 1)
434 resched_task(cbs_rq_of(cbs_rq
)->curr
); /* FIXME: Check! */
438 /**************************************************
439 * CBS operations on tasks:
442 #ifdef CONFIG_SCHED_HRTICK
443 static void hrtick_start_cbs(struct rq
*rq
, struct task_struct
*p
)
445 // int requeue = rq->curr == p;
446 struct sched_cbs_entity
*se
= &p
->cbs_se
;
449 WARN_ON(task_rq(p
) != rq
);
452 * Don't schedule timeouts shorter than 10000ns, that just
453 * doesn't make sense.
455 delta
= max(10000LL, se
->budget
);
456 hrtick_start(rq
, delta
);
460 hrtick_start_cbs(struct rq
*rq
, struct task_struct
*p
, s64 time
)
466 * The enqueue_task method is called before nr_running is
467 * increased. Here we update the fair scheduling stats and
468 * then put the task into the rbtree:
470 static void enqueue_task_cbs(struct rq
*rq
, struct task_struct
*p
, int wakeup
)
472 struct cbs_rq
*cbs_rq
;
473 struct sched_cbs_entity
*se
= &p
->cbs_se
;
475 for_each_cbs_sched_entity(se
) {
478 cbs_rq
= cbs_cbs_rq_of(se
);
479 enqueue_cbs_entity(cbs_rq
, se
);
482 hrtick_start_cbs(rq
, rq
->curr
);
486 * The dequeue_task method is called before nr_running is
487 * decreased. We remove the task from the rbtree and
488 * update the fair scheduling stats:
490 static void dequeue_task_cbs(struct rq
*rq
, struct task_struct
*p
, int sleep
)
492 struct cbs_rq
*cbs_rq
;
493 struct sched_cbs_entity
*se
= &p
->cbs_se
;
495 for_each_cbs_sched_entity(se
) {
496 cbs_rq
= cbs_cbs_rq_of(se
);
497 dequeue_cbs_entity(cbs_rq
, se
);
498 /* FIXME: Don't dequeue parent if it has other entities besides us */
501 hrtick_start_cbs(rq
, rq
->curr
);
505 * sched_yield() is broken on CBS.
507 * If compat_yield is turned on then we requeue to the end of the tree.
509 static void yield_task_cbs(struct rq
*rq
)
513 /* return depth at which a sched entity is present in the hierarchy */
514 static inline int depth_se_cbs(struct sched_cbs_entity
*se
)
518 for_each_cbs_sched_entity(se
)
525 * Preempt the current task with a newly woken task if needed:
527 static void check_preempt_wakeup_cbs(struct rq
*rq
, struct task_struct
*p
,int syn
)
529 struct task_struct
*curr
= rq
->curr
;
530 struct cbs_rq
*cbs_rq
= task_cbs_rq(curr
);
531 struct sched_cbs_entity
*se
= &curr
->cbs_se
, *pse
= &p
->cbs_se
;
533 int se_depth
, pse_depth
;
535 if (unlikely(rt_prio(p
->prio
))) {
537 update_curr_cbs(cbs_rq
);
542 // se->last_wakeup = se->sum_exec_runtime;
543 if (unlikely(se
== pse
))
548 * preemption test can be made between sibling entities who are in the
549 * same cbs_rq i.e who have a common parent. Walk up the hierarchy of
550 * both tasks until we find their ancestors who are siblings of common
554 /* First walk up until both entities are at same depth */
555 se_depth
= depth_se_cbs(se
);
556 pse_depth
= depth_se_cbs(pse
);
558 while (se_depth
> pse_depth
) {
560 se
= parent_cbs_entity(se
);
563 while (pse_depth
> se_depth
) {
565 pse
= parent_cbs_entity(pse
);
568 while (!is_same_cbs_group(se
, pse
)) {
569 se
= parent_cbs_entity(se
);
570 pse
= parent_cbs_entity(pse
);
573 if (wakeup_preempt_cbs_entity(se
, pse
) == 1)
577 static struct task_struct
*pick_next_task_cbs(struct rq
*rq
)
579 struct task_struct
*p
;
580 struct cbs_rq
*cbs_rq
= &rq
->cbs
;
581 struct sched_cbs_entity
*se
;
583 if (unlikely(!cbs_rq
->nr_running
))
587 se
= pick_next_cbs_entity(cbs_rq
);
588 cbs_rq
= group_cbs_rq(se
);
592 hrtick_start_cbs(rq
, p
);
598 * Account for a descheduled task:
600 static void put_prev_task_cbs(struct rq
*rq
, struct task_struct
*prev
)
602 struct sched_cbs_entity
*se
= &prev
->cbs_se
;
603 struct cbs_rq
*cbs_rq
;
605 for_each_cbs_sched_entity(se
) {
606 cbs_rq
= cbs_cbs_rq_of(se
);
607 put_prev_cbs_entity(cbs_rq
, se
);
612 * scheduler tick hitting a task of our scheduling class:
614 static void task_tick_cbs(struct rq
*rq
, struct task_struct
*curr
, int queued
)
616 struct cbs_rq
*cbs_rq
;
617 struct sched_cbs_entity
*se
= &curr
->cbs_se
;
619 for_each_cbs_sched_entity(se
) {
620 cbs_rq
= cbs_cbs_rq_of(se
);
621 cbs_entity_tick(cbs_rq
, se
, queued
);
628 static void task_new_cbs(struct rq
*rq
, struct task_struct
*p
)
630 //#warning Task New CBS is W R O N G ! ! !
631 struct cbs_rq
*cbs_rq
= task_cbs_rq(p
);
632 printk("task_new_cbs has been called!\n");
633 sched_info_queued(p
);
635 update_curr_cbs(cbs_rq
);
637 init_cbs_timer(cbs_rq
->curr
);
639 enqueue_task_cbs(rq
, p
, 0);
640 resched_task(rq
->curr
);
644 * Priority of the task has changed. Check to see if we preempt
647 static void prio_changed_cbs(struct rq
*rq
, struct task_struct
*p
,
648 int oldprio
, int running
)
650 //#warning Check prio_changed_cbs() implementation, thanks!
651 printk("prio_changed_cbs has been called!\n");
652 check_preempt_curr(rq
, p
,running
);
656 * We switched to the sched_cbs class.
658 static void switched_to_cbs(struct rq
*rq
, struct task_struct
*p
,
661 //#warning Check switched_to_cbs() implementation, thanks!
662 //printk("switched_to_cbs has been called!\n");
663 check_preempt_curr(rq
, p
, running
);
666 /* Account for a task changing its policy or group.
668 * This routine is mostly called to set cbs_rq->curr field when a task
669 * migrates between groups/classes.
671 static void set_curr_task_cbs(struct rq
*rq
)
673 struct sched_cbs_entity
*se
= &rq
->curr
->cbs_se
;
675 for_each_cbs_sched_entity(se
)
676 set_next_cbs_entity(cbs_cbs_rq_of(se
), se
);
680 * All the scheduling class methods:
682 static const struct sched_class cbs_sched_class
= {
683 .next
= &fair_sched_class
,
684 .enqueue_task
= enqueue_task_cbs
,
685 .dequeue_task
= dequeue_task_cbs
,
686 .yield_task
= yield_task_cbs
,
688 //#error CBS SMP is still a No-No!
690 #endif /* CONFIG_SMP */
692 .check_preempt_curr
= check_preempt_wakeup_cbs
,
694 .pick_next_task
= pick_next_task_cbs
,
695 .put_prev_task
= put_prev_task_cbs
,
698 //#error CBS SMP is still a No-No!
703 .set_curr_task
= set_curr_task_cbs
,
704 .task_tick
= task_tick_cbs
,
705 .task_new
= task_new_cbs
,
707 .prio_changed
= prio_changed_cbs
,
708 .switched_to
= switched_to_cbs
,
710 #ifdef CONFIG_CBS_GROUP_SCHED