1 /* SPDX-License-Identifier: GPL-2.0 */
3 * A demo sched_ext flattened cgroup hierarchy scheduler. It implements
4 * hierarchical weight-based cgroup CPU control by flattening the cgroup
5 * hierarchy into a single layer by compounding the active weight share at each
6 * level. Consider the following hierarchy with weights in parentheses:
8 * R + A (100) + B (100)
12 * Ignoring the root and threaded cgroups, only B, C and D can contain tasks.
13 * Let's say all three have runnable tasks. The total share that each of these
14 * three cgroups is entitled to can be calculated by compounding its share at
17 * For example, B is competing against C and in that competition its share is
18 * 100/(100+100) == 1/2. At its parent level, A is competing against D and A's
19 * share in that competition is 100/(200+100) == 1/3. B's eventual share in the
20 * system can be calculated by multiplying the two shares, 1/2 * 1/3 == 1/6. C's
21 * eventual shaer is the same at 1/6. D is only competing at the top level and
22 * its share is 200/(100+200) == 2/3.
24 * So, instead of hierarchically scheduling level-by-level, we can consider it
25 * as B, C and D competing each other with respective share of 1/6, 1/6 and 2/3
26 * and keep updating the eventual shares as the cgroups' runnable states change.
28 * This flattening of hierarchy can bring a substantial performance gain when
29 * the cgroup hierarchy is nested multiple levels. in a simple benchmark using
30 * wrk[8] on apache serving a CGI script calculating sha1sum of a small file, it
31 * outperforms CFS by ~3% with CPU controller disabled and by ~10% with two
32 * apache instances competing with 2:1 weight ratio nested four level deep.
34 * However, the gain comes at the cost of not being able to properly handle
35 * thundering herd of cgroups. For example, if many cgroups which are nested
36 * behind a low priority parent cgroup wake up around the same time, they may be
37 * able to consume more CPU cycles than they are entitled to. In many use cases,
38 * this isn't a real concern especially given the performance gain. Also, there
39 * are ways to mitigate the problem further by e.g. introducing an extra
40 * scheduling layer on cgroup delegation boundaries.
42 * The scheduler first picks the cgroup to run and then schedule the tasks
43 * within by using nested weighted vtime scheduling by default. The
44 * cgroup-internal scheduling can be switched to FIFO with the -f option.
46 #include <scx/common.bpf.h>
47 #include "scx_flatcg.h"
50 * Maximum amount of retries to find a valid cgroup.
54 CGROUP_MAX_RETRIES
= 1024,
57 char _license
[] SEC("license") = "GPL";
59 const volatile u32 nr_cpus
= 32; /* !0 for veristat, set during init */
60 const volatile u64 cgrp_slice_ns
= SCX_SLICE_DFL
;
61 const volatile bool fifo_sched
;
67 __uint(type
, BPF_MAP_TYPE_PERCPU_ARRAY
);
70 __uint(max_entries
, FCG_NR_STATS
);
73 static void stat_inc(enum fcg_stat_idx idx
)
77 u64
*cnt_p
= bpf_map_lookup_elem(&stats
, &idx_v
);
88 __uint(type
, BPF_MAP_TYPE_PERCPU_ARRAY
);
90 __type(value
, struct fcg_cpu_ctx
);
91 __uint(max_entries
, 1);
92 } cpu_ctx
SEC(".maps");
95 __uint(type
, BPF_MAP_TYPE_CGRP_STORAGE
);
96 __uint(map_flags
, BPF_F_NO_PREALLOC
);
98 __type(value
, struct fcg_cgrp_ctx
);
99 } cgrp_ctx
SEC(".maps");
102 struct bpf_rb_node rb_node
;
107 private(CGV_TREE
) struct bpf_spin_lock cgv_tree_lock
;
108 private(CGV_TREE
) struct bpf_rb_root cgv_tree
__contains(cgv_node
, rb_node
);
110 struct cgv_node_stash
{
111 struct cgv_node __kptr
*node
;
115 __uint(type
, BPF_MAP_TYPE_HASH
);
116 __uint(max_entries
, 16384);
118 __type(value
, struct cgv_node_stash
);
119 } cgv_node_stash
SEC(".maps");
121 struct fcg_task_ctx
{
126 __uint(type
, BPF_MAP_TYPE_TASK_STORAGE
);
127 __uint(map_flags
, BPF_F_NO_PREALLOC
);
129 __type(value
, struct fcg_task_ctx
);
130 } task_ctx
SEC(".maps");
132 /* gets inc'd on weight tree changes to expire the cached hweights */
135 static u64
div_round_up(u64 dividend
, u64 divisor
)
137 return (dividend
+ divisor
- 1) / divisor
;
140 static bool vtime_before(u64 a
, u64 b
)
142 return (s64
)(a
- b
) < 0;
145 static bool cgv_node_less(struct bpf_rb_node
*a
, const struct bpf_rb_node
*b
)
147 struct cgv_node
*cgc_a
, *cgc_b
;
149 cgc_a
= container_of(a
, struct cgv_node
, rb_node
);
150 cgc_b
= container_of(b
, struct cgv_node
, rb_node
);
152 return cgc_a
->cvtime
< cgc_b
->cvtime
;
155 static struct fcg_cpu_ctx
*find_cpu_ctx(void)
157 struct fcg_cpu_ctx
*cpuc
;
160 cpuc
= bpf_map_lookup_elem(&cpu_ctx
, &idx
);
162 scx_bpf_error("cpu_ctx lookup failed");
168 static struct fcg_cgrp_ctx
*find_cgrp_ctx(struct cgroup
*cgrp
)
170 struct fcg_cgrp_ctx
*cgc
;
172 cgc
= bpf_cgrp_storage_get(&cgrp_ctx
, cgrp
, 0, 0);
174 scx_bpf_error("cgrp_ctx lookup failed for cgid %llu", cgrp
->kn
->id
);
180 static struct fcg_cgrp_ctx
*find_ancestor_cgrp_ctx(struct cgroup
*cgrp
, int level
)
182 struct fcg_cgrp_ctx
*cgc
;
184 cgrp
= bpf_cgroup_ancestor(cgrp
, level
);
186 scx_bpf_error("ancestor cgroup lookup failed");
190 cgc
= find_cgrp_ctx(cgrp
);
192 scx_bpf_error("ancestor cgrp_ctx lookup failed");
193 bpf_cgroup_release(cgrp
);
197 static void cgrp_refresh_hweight(struct cgroup
*cgrp
, struct fcg_cgrp_ctx
*cgc
)
201 if (!cgc
->nr_active
) {
202 stat_inc(FCG_STAT_HWT_SKIP
);
206 if (cgc
->hweight_gen
== hweight_gen
) {
207 stat_inc(FCG_STAT_HWT_CACHE
);
211 stat_inc(FCG_STAT_HWT_UPDATES
);
212 bpf_for(level
, 0, cgrp
->level
+ 1) {
213 struct fcg_cgrp_ctx
*cgc
;
216 cgc
= find_ancestor_cgrp_ctx(cgrp
, level
);
221 cgc
->hweight
= FCG_HWEIGHT_ONE
;
222 cgc
->hweight_gen
= hweight_gen
;
224 struct fcg_cgrp_ctx
*pcgc
;
226 pcgc
= find_ancestor_cgrp_ctx(cgrp
, level
- 1);
231 * We can be opportunistic here and not grab the
232 * cgv_tree_lock and deal with the occasional races.
233 * However, hweight updates are already cached and
234 * relatively low-frequency. Let's just do the
235 * straightforward thing.
237 bpf_spin_lock(&cgv_tree_lock
);
238 is_active
= cgc
->nr_active
;
240 cgc
->hweight_gen
= pcgc
->hweight_gen
;
242 div_round_up(pcgc
->hweight
* cgc
->weight
,
243 pcgc
->child_weight_sum
);
245 bpf_spin_unlock(&cgv_tree_lock
);
248 stat_inc(FCG_STAT_HWT_RACE
);
255 static void cgrp_cap_budget(struct cgv_node
*cgv_node
, struct fcg_cgrp_ctx
*cgc
)
257 u64 delta
, cvtime
, max_budget
;
260 * A node which is on the rbtree can't be pointed to from elsewhere yet
261 * and thus can't be updated and repositioned. Instead, we collect the
262 * vtime deltas separately and apply it asynchronously here.
264 delta
= __sync_fetch_and_sub(&cgc
->cvtime_delta
, cgc
->cvtime_delta
);
265 cvtime
= cgv_node
->cvtime
+ delta
;
268 * Allow a cgroup to carry the maximum budget proportional to its
269 * hweight such that a full-hweight cgroup can immediately take up half
270 * of the CPUs at the most while staying at the front of the rbtree.
272 max_budget
= (cgrp_slice_ns
* nr_cpus
* cgc
->hweight
) /
273 (2 * FCG_HWEIGHT_ONE
);
274 if (vtime_before(cvtime
, cvtime_now
- max_budget
))
275 cvtime
= cvtime_now
- max_budget
;
277 cgv_node
->cvtime
= cvtime
;
280 static void cgrp_enqueued(struct cgroup
*cgrp
, struct fcg_cgrp_ctx
*cgc
)
282 struct cgv_node_stash
*stash
;
283 struct cgv_node
*cgv_node
;
284 u64 cgid
= cgrp
->kn
->id
;
286 /* paired with cmpxchg in try_pick_next_cgroup() */
287 if (__sync_val_compare_and_swap(&cgc
->queued
, 0, 1)) {
288 stat_inc(FCG_STAT_ENQ_SKIP
);
292 stash
= bpf_map_lookup_elem(&cgv_node_stash
, &cgid
);
294 scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid
);
298 /* NULL if the node is already on the rbtree */
299 cgv_node
= bpf_kptr_xchg(&stash
->node
, NULL
);
301 stat_inc(FCG_STAT_ENQ_RACE
);
305 bpf_spin_lock(&cgv_tree_lock
);
306 cgrp_cap_budget(cgv_node
, cgc
);
307 bpf_rbtree_add(&cgv_tree
, &cgv_node
->rb_node
, cgv_node_less
);
308 bpf_spin_unlock(&cgv_tree_lock
);
311 static void set_bypassed_at(struct task_struct
*p
, struct fcg_task_ctx
*taskc
)
314 * Tell fcg_stopping() that this bypassed the regular scheduling path
315 * and should be force charged to the cgroup. 0 is used to indicate that
316 * the task isn't bypassing, so if the current runtime is 0, go back by
319 taskc
->bypassed_at
= p
->se
.sum_exec_runtime
?: (u64
)-1;
322 s32
BPF_STRUCT_OPS(fcg_select_cpu
, struct task_struct
*p
, s32 prev_cpu
, u64 wake_flags
)
324 struct fcg_task_ctx
*taskc
;
325 bool is_idle
= false;
328 cpu
= scx_bpf_select_cpu_dfl(p
, prev_cpu
, wake_flags
, &is_idle
);
330 taskc
= bpf_task_storage_get(&task_ctx
, p
, 0, 0);
332 scx_bpf_error("task_ctx lookup failed");
337 * If select_cpu_dfl() is recommending local enqueue, the target CPU is
338 * idle. Follow it and charge the cgroup later in fcg_stopping() after
342 set_bypassed_at(p
, taskc
);
343 stat_inc(FCG_STAT_LOCAL
);
344 scx_bpf_dsq_insert(p
, SCX_DSQ_LOCAL
, SCX_SLICE_DFL
, 0);
350 void BPF_STRUCT_OPS(fcg_enqueue
, struct task_struct
*p
, u64 enq_flags
)
352 struct fcg_task_ctx
*taskc
;
354 struct fcg_cgrp_ctx
*cgc
;
356 taskc
= bpf_task_storage_get(&task_ctx
, p
, 0, 0);
358 scx_bpf_error("task_ctx lookup failed");
363 * Use the direct dispatching and force charging to deal with tasks with
364 * custom affinities so that we don't have to worry about per-cgroup
365 * dq's containing tasks that can't be executed from some CPUs.
367 if (p
->nr_cpus_allowed
!= nr_cpus
) {
368 set_bypassed_at(p
, taskc
);
371 * The global dq is deprioritized as we don't want to let tasks
372 * to boost themselves by constraining its cpumask. The
373 * deprioritization is rather severe, so let's not apply that to
374 * per-cpu kernel threads. This is ham-fisted. We probably wanna
375 * implement per-cgroup fallback dq's instead so that we have
376 * more control over when tasks with custom cpumask get issued.
378 if (p
->nr_cpus_allowed
== 1 && (p
->flags
& PF_KTHREAD
)) {
379 stat_inc(FCG_STAT_LOCAL
);
380 scx_bpf_dsq_insert(p
, SCX_DSQ_LOCAL
, SCX_SLICE_DFL
,
383 stat_inc(FCG_STAT_GLOBAL
);
384 scx_bpf_dsq_insert(p
, FALLBACK_DSQ
, SCX_SLICE_DFL
,
390 cgrp
= __COMPAT_scx_bpf_task_cgroup(p
);
391 cgc
= find_cgrp_ctx(cgrp
);
396 scx_bpf_dsq_insert(p
, cgrp
->kn
->id
, SCX_SLICE_DFL
, enq_flags
);
398 u64 tvtime
= p
->scx
.dsq_vtime
;
401 * Limit the amount of budget that an idling task can accumulate
404 if (vtime_before(tvtime
, cgc
->tvtime_now
- SCX_SLICE_DFL
))
405 tvtime
= cgc
->tvtime_now
- SCX_SLICE_DFL
;
407 scx_bpf_dsq_insert_vtime(p
, cgrp
->kn
->id
, SCX_SLICE_DFL
,
411 cgrp_enqueued(cgrp
, cgc
);
413 bpf_cgroup_release(cgrp
);
417 * Walk the cgroup tree to update the active weight sums as tasks wake up and
418 * sleep. The weight sums are used as the base when calculating the proportion a
419 * given cgroup or task is entitled to at each level.
421 static void update_active_weight_sums(struct cgroup
*cgrp
, bool runnable
)
423 struct fcg_cgrp_ctx
*cgc
;
424 bool updated
= false;
427 cgc
= find_cgrp_ctx(cgrp
);
432 * In most cases, a hot cgroup would have multiple threads going to
433 * sleep and waking up while the whole cgroup stays active. In leaf
434 * cgroups, ->nr_runnable which is updated with __sync operations gates
435 * ->nr_active updates, so that we don't have to grab the cgv_tree_lock
436 * repeatedly for a busy cgroup which is staying active.
439 if (__sync_fetch_and_add(&cgc
->nr_runnable
, 1))
441 stat_inc(FCG_STAT_ACT
);
443 if (__sync_sub_and_fetch(&cgc
->nr_runnable
, 1))
445 stat_inc(FCG_STAT_DEACT
);
449 * If @cgrp is becoming runnable, its hweight should be refreshed after
450 * it's added to the weight tree so that enqueue has the up-to-date
451 * value. If @cgrp is becoming quiescent, the hweight should be
452 * refreshed before it's removed from the weight tree so that the usage
453 * charging which happens afterwards has access to the latest value.
456 cgrp_refresh_hweight(cgrp
, cgc
);
458 /* propagate upwards */
459 bpf_for(idx
, 0, cgrp
->level
) {
460 int level
= cgrp
->level
- idx
;
461 struct fcg_cgrp_ctx
*cgc
, *pcgc
= NULL
;
462 bool propagate
= false;
464 cgc
= find_ancestor_cgrp_ctx(cgrp
, level
);
468 pcgc
= find_ancestor_cgrp_ctx(cgrp
, level
- 1);
474 * We need the propagation protected by a lock to synchronize
475 * against weight changes. There's no reason to drop the lock at
476 * each level but bpf_spin_lock() doesn't want any function
477 * calls while locked.
479 bpf_spin_lock(&cgv_tree_lock
);
482 if (!cgc
->nr_active
++) {
486 pcgc
->child_weight_sum
+= cgc
->weight
;
490 if (!--cgc
->nr_active
) {
494 pcgc
->child_weight_sum
-= cgc
->weight
;
499 bpf_spin_unlock(&cgv_tree_lock
);
506 __sync_fetch_and_add(&hweight_gen
, 1);
509 cgrp_refresh_hweight(cgrp
, cgc
);
512 void BPF_STRUCT_OPS(fcg_runnable
, struct task_struct
*p
, u64 enq_flags
)
516 cgrp
= __COMPAT_scx_bpf_task_cgroup(p
);
517 update_active_weight_sums(cgrp
, true);
518 bpf_cgroup_release(cgrp
);
521 void BPF_STRUCT_OPS(fcg_running
, struct task_struct
*p
)
524 struct fcg_cgrp_ctx
*cgc
;
529 cgrp
= __COMPAT_scx_bpf_task_cgroup(p
);
530 cgc
= find_cgrp_ctx(cgrp
);
533 * @cgc->tvtime_now always progresses forward as tasks start
534 * executing. The test and update can be performed concurrently
535 * from multiple CPUs and thus racy. Any error should be
536 * contained and temporary. Let's just live with it.
538 if (vtime_before(cgc
->tvtime_now
, p
->scx
.dsq_vtime
))
539 cgc
->tvtime_now
= p
->scx
.dsq_vtime
;
541 bpf_cgroup_release(cgrp
);
544 void BPF_STRUCT_OPS(fcg_stopping
, struct task_struct
*p
, bool runnable
)
546 struct fcg_task_ctx
*taskc
;
548 struct fcg_cgrp_ctx
*cgc
;
551 * Scale the execution time by the inverse of the weight and charge.
553 * Note that the default yield implementation yields by setting
554 * @p->scx.slice to zero and the following would treat the yielding task
555 * as if it has consumed all its slice. If this penalizes yielding tasks
556 * too much, determine the execution time by taking explicit timestamps
557 * instead of depending on @p->scx.slice.
561 (SCX_SLICE_DFL
- p
->scx
.slice
) * 100 / p
->scx
.weight
;
563 taskc
= bpf_task_storage_get(&task_ctx
, p
, 0, 0);
565 scx_bpf_error("task_ctx lookup failed");
569 if (!taskc
->bypassed_at
)
572 cgrp
= __COMPAT_scx_bpf_task_cgroup(p
);
573 cgc
= find_cgrp_ctx(cgrp
);
575 __sync_fetch_and_add(&cgc
->cvtime_delta
,
576 p
->se
.sum_exec_runtime
- taskc
->bypassed_at
);
577 taskc
->bypassed_at
= 0;
579 bpf_cgroup_release(cgrp
);
582 void BPF_STRUCT_OPS(fcg_quiescent
, struct task_struct
*p
, u64 deq_flags
)
586 cgrp
= __COMPAT_scx_bpf_task_cgroup(p
);
587 update_active_weight_sums(cgrp
, false);
588 bpf_cgroup_release(cgrp
);
591 void BPF_STRUCT_OPS(fcg_cgroup_set_weight
, struct cgroup
*cgrp
, u32 weight
)
593 struct fcg_cgrp_ctx
*cgc
, *pcgc
= NULL
;
595 cgc
= find_cgrp_ctx(cgrp
);
600 pcgc
= find_ancestor_cgrp_ctx(cgrp
, cgrp
->level
- 1);
605 bpf_spin_lock(&cgv_tree_lock
);
606 if (pcgc
&& cgc
->nr_active
)
607 pcgc
->child_weight_sum
+= (s64
)weight
- cgc
->weight
;
608 cgc
->weight
= weight
;
609 bpf_spin_unlock(&cgv_tree_lock
);
612 static bool try_pick_next_cgroup(u64
*cgidp
)
614 struct bpf_rb_node
*rb_node
;
615 struct cgv_node_stash
*stash
;
616 struct cgv_node
*cgv_node
;
617 struct fcg_cgrp_ctx
*cgc
;
621 /* pop the front cgroup and wind cvtime_now accordingly */
622 bpf_spin_lock(&cgv_tree_lock
);
624 rb_node
= bpf_rbtree_first(&cgv_tree
);
626 bpf_spin_unlock(&cgv_tree_lock
);
627 stat_inc(FCG_STAT_PNC_NO_CGRP
);
632 rb_node
= bpf_rbtree_remove(&cgv_tree
, rb_node
);
633 bpf_spin_unlock(&cgv_tree_lock
);
637 * This should never happen. bpf_rbtree_first() was called
638 * above while the tree lock was held, so the node should
641 scx_bpf_error("node could not be removed");
645 cgv_node
= container_of(rb_node
, struct cgv_node
, rb_node
);
646 cgid
= cgv_node
->cgid
;
648 if (vtime_before(cvtime_now
, cgv_node
->cvtime
))
649 cvtime_now
= cgv_node
->cvtime
;
652 * If lookup fails, the cgroup's gone. Free and move on. See
655 cgrp
= bpf_cgroup_from_id(cgid
);
657 stat_inc(FCG_STAT_PNC_GONE
);
661 cgc
= bpf_cgrp_storage_get(&cgrp_ctx
, cgrp
, 0, 0);
663 bpf_cgroup_release(cgrp
);
664 stat_inc(FCG_STAT_PNC_GONE
);
668 if (!scx_bpf_dsq_move_to_local(cgid
)) {
669 bpf_cgroup_release(cgrp
);
670 stat_inc(FCG_STAT_PNC_EMPTY
);
675 * Successfully consumed from the cgroup. This will be our current
676 * cgroup for the new slice. Refresh its hweight.
678 cgrp_refresh_hweight(cgrp
, cgc
);
680 bpf_cgroup_release(cgrp
);
683 * As the cgroup may have more tasks, add it back to the rbtree. Note
684 * that here we charge the full slice upfront and then exact later
685 * according to the actual consumption. This prevents lowpri thundering
686 * herd from saturating the machine.
688 bpf_spin_lock(&cgv_tree_lock
);
689 cgv_node
->cvtime
+= cgrp_slice_ns
* FCG_HWEIGHT_ONE
/ (cgc
->hweight
?: 1);
690 cgrp_cap_budget(cgv_node
, cgc
);
691 bpf_rbtree_add(&cgv_tree
, &cgv_node
->rb_node
, cgv_node_less
);
692 bpf_spin_unlock(&cgv_tree_lock
);
695 stat_inc(FCG_STAT_PNC_NEXT
);
699 stash
= bpf_map_lookup_elem(&cgv_node_stash
, &cgid
);
701 stat_inc(FCG_STAT_PNC_GONE
);
706 * Paired with cmpxchg in cgrp_enqueued(). If they see the following
707 * transition, they'll enqueue the cgroup. If they are earlier, we'll
708 * see their task in the dq below and requeue the cgroup.
710 __sync_val_compare_and_swap(&cgc
->queued
, 1, 0);
712 if (scx_bpf_dsq_nr_queued(cgid
)) {
713 bpf_spin_lock(&cgv_tree_lock
);
714 bpf_rbtree_add(&cgv_tree
, &cgv_node
->rb_node
, cgv_node_less
);
715 bpf_spin_unlock(&cgv_tree_lock
);
716 stat_inc(FCG_STAT_PNC_RACE
);
718 cgv_node
= bpf_kptr_xchg(&stash
->node
, cgv_node
);
720 scx_bpf_error("unexpected !NULL cgv_node stash");
728 bpf_obj_drop(cgv_node
);
732 void BPF_STRUCT_OPS(fcg_dispatch
, s32 cpu
, struct task_struct
*prev
)
734 struct fcg_cpu_ctx
*cpuc
;
735 struct fcg_cgrp_ctx
*cgc
;
737 u64 now
= bpf_ktime_get_ns();
738 bool picked_next
= false;
740 cpuc
= find_cpu_ctx();
745 goto pick_next_cgroup
;
747 if (vtime_before(now
, cpuc
->cur_at
+ cgrp_slice_ns
)) {
748 if (scx_bpf_dsq_move_to_local(cpuc
->cur_cgid
)) {
749 stat_inc(FCG_STAT_CNS_KEEP
);
752 stat_inc(FCG_STAT_CNS_EMPTY
);
754 stat_inc(FCG_STAT_CNS_EXPIRE
);
758 * The current cgroup is expiring. It was already charged a full slice.
759 * Calculate the actual usage and accumulate the delta.
761 cgrp
= bpf_cgroup_from_id(cpuc
->cur_cgid
);
763 stat_inc(FCG_STAT_CNS_GONE
);
764 goto pick_next_cgroup
;
767 cgc
= bpf_cgrp_storage_get(&cgrp_ctx
, cgrp
, 0, 0);
770 * We want to update the vtime delta and then look for the next
771 * cgroup to execute but the latter needs to be done in a loop
772 * and we can't keep the lock held. Oh well...
774 bpf_spin_lock(&cgv_tree_lock
);
775 __sync_fetch_and_add(&cgc
->cvtime_delta
,
776 (cpuc
->cur_at
+ cgrp_slice_ns
- now
) *
777 FCG_HWEIGHT_ONE
/ (cgc
->hweight
?: 1));
778 bpf_spin_unlock(&cgv_tree_lock
);
780 stat_inc(FCG_STAT_CNS_GONE
);
783 bpf_cgroup_release(cgrp
);
788 if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ
)) {
793 bpf_repeat(CGROUP_MAX_RETRIES
) {
794 if (try_pick_next_cgroup(&cpuc
->cur_cgid
)) {
801 * This only happens if try_pick_next_cgroup() races against enqueue
802 * path for more than CGROUP_MAX_RETRIES times, which is extremely
803 * unlikely and likely indicates an underlying bug. There shouldn't be
804 * any stall risk as the race is against enqueue.
807 stat_inc(FCG_STAT_PNC_FAIL
);
810 s32
BPF_STRUCT_OPS(fcg_init_task
, struct task_struct
*p
,
811 struct scx_init_task_args
*args
)
813 struct fcg_task_ctx
*taskc
;
814 struct fcg_cgrp_ctx
*cgc
;
817 * @p is new. Let's ensure that its task_ctx is available. We can sleep
818 * in this function and the following will automatically use GFP_KERNEL.
820 taskc
= bpf_task_storage_get(&task_ctx
, p
, 0,
821 BPF_LOCAL_STORAGE_GET_F_CREATE
);
825 taskc
->bypassed_at
= 0;
827 if (!(cgc
= find_cgrp_ctx(args
->cgroup
)))
830 p
->scx
.dsq_vtime
= cgc
->tvtime_now
;
835 int BPF_STRUCT_OPS_SLEEPABLE(fcg_cgroup_init
, struct cgroup
*cgrp
,
836 struct scx_cgroup_init_args
*args
)
838 struct fcg_cgrp_ctx
*cgc
;
839 struct cgv_node
*cgv_node
;
840 struct cgv_node_stash empty_stash
= {}, *stash
;
841 u64 cgid
= cgrp
->kn
->id
;
845 * Technically incorrect as cgroup ID is full 64bit while dsq ID is
846 * 63bit. Should not be a problem in practice and easy to spot in the
847 * unlikely case that it breaks.
849 ret
= scx_bpf_create_dsq(cgid
, -1);
853 cgc
= bpf_cgrp_storage_get(&cgrp_ctx
, cgrp
, 0,
854 BPF_LOCAL_STORAGE_GET_F_CREATE
);
857 goto err_destroy_dsq
;
860 cgc
->weight
= args
->weight
;
861 cgc
->hweight
= FCG_HWEIGHT_ONE
;
863 ret
= bpf_map_update_elem(&cgv_node_stash
, &cgid
, &empty_stash
,
867 scx_bpf_error("unexpected stash creation error (%d)",
869 goto err_destroy_dsq
;
872 stash
= bpf_map_lookup_elem(&cgv_node_stash
, &cgid
);
874 scx_bpf_error("unexpected cgv_node stash lookup failure");
876 goto err_destroy_dsq
;
879 cgv_node
= bpf_obj_new(struct cgv_node
);
882 goto err_del_cgv_node
;
885 cgv_node
->cgid
= cgid
;
886 cgv_node
->cvtime
= cvtime_now
;
888 cgv_node
= bpf_kptr_xchg(&stash
->node
, cgv_node
);
890 scx_bpf_error("unexpected !NULL cgv_node stash");
898 bpf_obj_drop(cgv_node
);
900 bpf_map_delete_elem(&cgv_node_stash
, &cgid
);
902 scx_bpf_destroy_dsq(cgid
);
906 void BPF_STRUCT_OPS(fcg_cgroup_exit
, struct cgroup
*cgrp
)
908 u64 cgid
= cgrp
->kn
->id
;
911 * For now, there's no way find and remove the cgv_node if it's on the
912 * cgv_tree. Let's drain them in the dispatch path as they get popped
913 * off the front of the tree.
915 bpf_map_delete_elem(&cgv_node_stash
, &cgid
);
916 scx_bpf_destroy_dsq(cgid
);
919 void BPF_STRUCT_OPS(fcg_cgroup_move
, struct task_struct
*p
,
920 struct cgroup
*from
, struct cgroup
*to
)
922 struct fcg_cgrp_ctx
*from_cgc
, *to_cgc
;
925 /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */
926 if (!(from_cgc
= find_cgrp_ctx(from
)) || !(to_cgc
= find_cgrp_ctx(to
)))
929 vtime_delta
= p
->scx
.dsq_vtime
- from_cgc
->tvtime_now
;
930 p
->scx
.dsq_vtime
= to_cgc
->tvtime_now
+ vtime_delta
;
933 s32
BPF_STRUCT_OPS_SLEEPABLE(fcg_init
)
935 return scx_bpf_create_dsq(FALLBACK_DSQ
, -1);
938 void BPF_STRUCT_OPS(fcg_exit
, struct scx_exit_info
*ei
)
943 SCX_OPS_DEFINE(flatcg_ops
,
944 .select_cpu
= (void *)fcg_select_cpu
,
945 .enqueue
= (void *)fcg_enqueue
,
946 .dispatch
= (void *)fcg_dispatch
,
947 .runnable
= (void *)fcg_runnable
,
948 .running
= (void *)fcg_running
,
949 .stopping
= (void *)fcg_stopping
,
950 .quiescent
= (void *)fcg_quiescent
,
951 .init_task
= (void *)fcg_init_task
,
952 .cgroup_set_weight
= (void *)fcg_cgroup_set_weight
,
953 .cgroup_init
= (void *)fcg_cgroup_init
,
954 .cgroup_exit
= (void *)fcg_cgroup_exit
,
955 .cgroup_move
= (void *)fcg_cgroup_move
,
956 .init
= (void *)fcg_init
,
957 .exit
= (void *)fcg_exit
,
958 .flags
= SCX_OPS_HAS_CGROUP_WEIGHT
| SCX_OPS_ENQ_EXITING
,