1 /* SPDX-License-Identifier: GPL-2.0 */
3 * A central FIFO sched_ext scheduler which demonstrates the followings:
5 * a. Making all scheduling decisions from one CPU:
7 * The central CPU is the only one making scheduling decisions. All other
8 * CPUs kick the central CPU when they run out of tasks to run.
10 * There is one global BPF queue and the central CPU schedules all CPUs by
11 * dispatching from the global queue to each CPU's local dsq from dispatch().
12 * This isn't the most straightforward. e.g. It'd be easier to bounce
13 * through per-CPU BPF queues. The current design is chosen to maximally
14 * utilize and verify various SCX mechanisms such as LOCAL_ON dispatching.
16 * b. Tickless operation
18 * All tasks are dispatched with the infinite slice which allows stopping the
19 * ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full
20 * parameter. The tickless operation can be observed through
23 * Periodic switching is enforced by a periodic timer checking all CPUs and
24 * preempting them as necessary. Unfortunately, BPF timer currently doesn't
25 * have a way to pin to a specific CPU, so the periodic timer isn't pinned to
30 * Kthreads are unconditionally queued to the head of a matching local dsq
31 * and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always
32 * prioritized over user threads, which is required for ensuring forward
33 * progress as e.g. the periodic timer may run on a ksoftirqd and if the
34 * ksoftirqd gets starved by a user thread, there may not be anything else to
35 * vacate that user thread.
37 * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the
40 * This scheduler is designed to maximize usage of various SCX mechanisms. A
41 * more practical implementation would likely put the scheduling loop outside
42 * the central CPU's dispatch() path and add some form of priority mechanism.
44 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
45 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
46 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
48 #include <scx/common.bpf.h>
50 char _license
[] SEC("license") = "GPL";
54 MS_TO_NS
= 1000LLU * 1000,
55 TIMER_INTERVAL_NS
= 1 * MS_TO_NS
,
58 const volatile s32 central_cpu
;
59 const volatile u32 nr_cpu_ids
= 1; /* !0 for veristat, set during init */
60 const volatile u64 slice_ns
= SCX_SLICE_DFL
;
62 bool timer_pinned
= true;
63 u64 nr_total
, nr_locals
, nr_queued
, nr_lost_pids
;
64 u64 nr_timers
, nr_dispatches
, nr_mismatches
, nr_retries
;
70 __uint(type
, BPF_MAP_TYPE_QUEUE
);
71 __uint(max_entries
, 4096);
73 } central_q
SEC(".maps");
75 /* can't use percpu map due to bad lookups */
76 bool RESIZABLE_ARRAY(data
, cpu_gimme_task
);
77 u64
RESIZABLE_ARRAY(data
, cpu_started_at
);
79 struct central_timer
{
80 struct bpf_timer timer
;
84 __uint(type
, BPF_MAP_TYPE_ARRAY
);
85 __uint(max_entries
, 1);
87 __type(value
, struct central_timer
);
88 } central_timer
SEC(".maps");
90 static bool vtime_before(u64 a
, u64 b
)
92 return (s64
)(a
- b
) < 0;
95 s32
BPF_STRUCT_OPS(central_select_cpu
, struct task_struct
*p
,
96 s32 prev_cpu
, u64 wake_flags
)
99 * Steer wakeups to the central CPU as much as possible to avoid
100 * disturbing other CPUs. It's safe to blindly return the central cpu as
101 * select_cpu() is a hint and if @p can't be on it, the kernel will
102 * automatically pick a fallback CPU.
107 void BPF_STRUCT_OPS(central_enqueue
, struct task_struct
*p
, u64 enq_flags
)
111 __sync_fetch_and_add(&nr_total
, 1);
114 * Push per-cpu kthreads at the head of local dsq's and preempt the
115 * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked
116 * behind other threads which is necessary for forward progress
117 * guarantee as we depend on the BPF timer which may run from ksoftirqd.
119 if ((p
->flags
& PF_KTHREAD
) && p
->nr_cpus_allowed
== 1) {
120 __sync_fetch_and_add(&nr_locals
, 1);
121 scx_bpf_dsq_insert(p
, SCX_DSQ_LOCAL
, SCX_SLICE_INF
,
122 enq_flags
| SCX_ENQ_PREEMPT
);
126 if (bpf_map_push_elem(¢ral_q
, &pid
, 0)) {
127 __sync_fetch_and_add(&nr_overflows
, 1);
128 scx_bpf_dsq_insert(p
, FALLBACK_DSQ_ID
, SCX_SLICE_INF
, enq_flags
);
132 __sync_fetch_and_add(&nr_queued
, 1);
134 if (!scx_bpf_task_running(p
))
135 scx_bpf_kick_cpu(central_cpu
, SCX_KICK_PREEMPT
);
138 static bool dispatch_to_cpu(s32 cpu
)
140 struct task_struct
*p
;
143 bpf_repeat(BPF_MAX_LOOPS
) {
144 if (bpf_map_pop_elem(¢ral_q
, &pid
))
147 __sync_fetch_and_sub(&nr_queued
, 1);
149 p
= bpf_task_from_pid(pid
);
151 __sync_fetch_and_add(&nr_lost_pids
, 1);
156 * If we can't run the task at the top, do the dumb thing and
157 * bounce it to the fallback dsq.
159 if (!bpf_cpumask_test_cpu(cpu
, p
->cpus_ptr
)) {
160 __sync_fetch_and_add(&nr_mismatches
, 1);
161 scx_bpf_dsq_insert(p
, FALLBACK_DSQ_ID
, SCX_SLICE_INF
, 0);
164 * We might run out of dispatch buffer slots if we continue dispatching
165 * to the fallback DSQ, without dispatching to the local DSQ of the
166 * target CPU. In such a case, break the loop now as will fail the
167 * next dispatch operation.
169 if (!scx_bpf_dispatch_nr_slots())
174 /* dispatch to local and mark that @cpu doesn't need more */
175 scx_bpf_dsq_insert(p
, SCX_DSQ_LOCAL_ON
| cpu
, SCX_SLICE_INF
, 0);
177 if (cpu
!= central_cpu
)
178 scx_bpf_kick_cpu(cpu
, SCX_KICK_IDLE
);
187 void BPF_STRUCT_OPS(central_dispatch
, s32 cpu
, struct task_struct
*prev
)
189 if (cpu
== central_cpu
) {
190 /* dispatch for all other CPUs first */
191 __sync_fetch_and_add(&nr_dispatches
, 1);
193 bpf_for(cpu
, 0, nr_cpu_ids
) {
196 if (!scx_bpf_dispatch_nr_slots())
199 /* central's gimme is never set */
200 gimme
= ARRAY_ELEM_PTR(cpu_gimme_task
, cpu
, nr_cpu_ids
);
201 if (!gimme
|| !*gimme
)
204 if (dispatch_to_cpu(cpu
))
209 * Retry if we ran out of dispatch buffer slots as we might have
210 * skipped some CPUs and also need to dispatch for self. The ext
211 * core automatically retries if the local dsq is empty but we
212 * can't rely on that as we're dispatching for other CPUs too.
213 * Kick self explicitly to retry.
215 if (!scx_bpf_dispatch_nr_slots()) {
216 __sync_fetch_and_add(&nr_retries
, 1);
217 scx_bpf_kick_cpu(central_cpu
, SCX_KICK_PREEMPT
);
221 /* look for a task to run on the central CPU */
222 if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID
))
224 dispatch_to_cpu(central_cpu
);
228 if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID
))
231 gimme
= ARRAY_ELEM_PTR(cpu_gimme_task
, cpu
, nr_cpu_ids
);
236 * Force dispatch on the scheduling CPU so that it finds a task
239 scx_bpf_kick_cpu(central_cpu
, SCX_KICK_PREEMPT
);
243 void BPF_STRUCT_OPS(central_running
, struct task_struct
*p
)
245 s32 cpu
= scx_bpf_task_cpu(p
);
246 u64
*started_at
= ARRAY_ELEM_PTR(cpu_started_at
, cpu
, nr_cpu_ids
);
248 *started_at
= bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */
251 void BPF_STRUCT_OPS(central_stopping
, struct task_struct
*p
, bool runnable
)
253 s32 cpu
= scx_bpf_task_cpu(p
);
254 u64
*started_at
= ARRAY_ELEM_PTR(cpu_started_at
, cpu
, nr_cpu_ids
);
259 static int central_timerfn(void *map
, int *key
, struct bpf_timer
*timer
)
261 u64 now
= bpf_ktime_get_ns();
262 u64 nr_to_kick
= nr_queued
;
265 curr_cpu
= bpf_get_smp_processor_id();
266 if (timer_pinned
&& (curr_cpu
!= central_cpu
)) {
267 scx_bpf_error("Central timer ran on CPU %d, not central CPU %d",
268 curr_cpu
, central_cpu
);
272 bpf_for(i
, 0, nr_cpu_ids
) {
273 s32 cpu
= (nr_timers
+ i
) % nr_cpu_ids
;
276 if (cpu
== central_cpu
)
279 /* kick iff the current one exhausted its slice */
280 started_at
= ARRAY_ELEM_PTR(cpu_started_at
, cpu
, nr_cpu_ids
);
281 if (started_at
&& *started_at
&&
282 vtime_before(now
, *started_at
+ slice_ns
))
285 /* and there's something pending */
286 if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID
) ||
287 scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON
| cpu
))
294 scx_bpf_kick_cpu(cpu
, SCX_KICK_PREEMPT
);
297 bpf_timer_start(timer
, TIMER_INTERVAL_NS
, BPF_F_TIMER_CPU_PIN
);
298 __sync_fetch_and_add(&nr_timers
, 1);
302 int BPF_STRUCT_OPS_SLEEPABLE(central_init
)
305 struct bpf_timer
*timer
;
308 ret
= scx_bpf_create_dsq(FALLBACK_DSQ_ID
, -1);
312 timer
= bpf_map_lookup_elem(¢ral_timer
, &key
);
316 if (bpf_get_smp_processor_id() != central_cpu
) {
317 scx_bpf_error("init from non-central CPU");
321 bpf_timer_init(timer
, ¢ral_timer
, CLOCK_MONOTONIC
);
322 bpf_timer_set_callback(timer
, central_timerfn
);
324 ret
= bpf_timer_start(timer
, TIMER_INTERVAL_NS
, BPF_F_TIMER_CPU_PIN
);
326 * BPF_F_TIMER_CPU_PIN is pretty new (>=6.7). If we're running in a
327 * kernel which doesn't have it, bpf_timer_start() will return -EINVAL.
328 * Retry without the PIN. This would be the perfect use case for
329 * bpf_core_enum_value_exists() but the enum type doesn't have a name
330 * and can't be used with bpf_core_enum_value_exists(). Oh well...
332 if (ret
== -EINVAL
) {
333 timer_pinned
= false;
334 ret
= bpf_timer_start(timer
, TIMER_INTERVAL_NS
, 0);
337 scx_bpf_error("bpf_timer_start failed (%d)", ret
);
341 void BPF_STRUCT_OPS(central_exit
, struct scx_exit_info
*ei
)
346 SCX_OPS_DEFINE(central_ops
,
348 * We are offloading all scheduling decisions to the central CPU
349 * and thus being the last task on a given CPU doesn't mean
350 * anything special. Enqueue the last tasks like any other tasks.
352 .flags
= SCX_OPS_ENQ_LAST
,
354 .select_cpu
= (void *)central_select_cpu
,
355 .enqueue
= (void *)central_enqueue
,
356 .dispatch
= (void *)central_dispatch
,
357 .running
= (void *)central_running
,
358 .stopping
= (void *)central_stopping
,
359 .init
= (void *)central_init
,
360 .exit
= (void *)central_exit
,