1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
4 * Copyright (c) 2022 Tejun Heo <tj@kernel.org>
5 * Copyright (c) 2022 David Vernet <dvernet@meta.com>
14 #include <scx/common.h>
15 #include "scx_qmap.bpf.skel.h"
17 const char help_fmt
[] =
18 "A simple five-level FIFO queue sched_ext scheduler.\n"
20 "See the top-level comment in .bpf.c for more details.\n"
22 "Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-b COUNT]\n"
23 " [-P] [-d PID] [-D LEN] [-p] [-v]\n"
25 " -s SLICE_US Override slice duration\n"
26 " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n"
27 " -t COUNT Stall every COUNT'th user thread\n"
28 " -T COUNT Stall every COUNT'th kernel thread\n"
29 " -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n"
30 " -b COUNT Dispatch upto COUNT tasks together\n"
31 " -P Print out DSQ content to trace_pipe every second, use with -b\n"
32 " -H Boost nice -20 tasks in SHARED_DSQ, use with -b\n"
33 " -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n"
34 " -D LEN Set scx_exit_info.dump buffer length\n"
35 " -S Suppress qmap-specific debug dump\n"
36 " -p Switch only tasks on SCHED_EXT policy instead of all\n"
37 " -v Print libbpf debug messages\n"
38 " -h Display this help and exit\n";
41 static volatile int exit_req
;
43 static int libbpf_print_fn(enum libbpf_print_level level
, const char *format
, va_list args
)
45 if (level
== LIBBPF_DEBUG
&& !verbose
)
47 return vfprintf(stderr
, format
, args
);
50 static void sigint_handler(int dummy
)
55 int main(int argc
, char **argv
)
57 struct scx_qmap
*skel
;
58 struct bpf_link
*link
;
61 libbpf_set_print(libbpf_print_fn
);
62 signal(SIGINT
, sigint_handler
);
63 signal(SIGTERM
, sigint_handler
);
65 skel
= SCX_OPS_OPEN(qmap_ops
, scx_qmap
);
67 while ((opt
= getopt(argc
, argv
, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) {
70 skel
->rodata
->slice_ns
= strtoull(optarg
, NULL
, 0) * 1000;
73 skel
->bss
->test_error_cnt
= strtoul(optarg
, NULL
, 0);
76 skel
->rodata
->stall_user_nth
= strtoul(optarg
, NULL
, 0);
79 skel
->rodata
->stall_kernel_nth
= strtoul(optarg
, NULL
, 0);
82 skel
->rodata
->dsp_inf_loop_after
= strtoul(optarg
, NULL
, 0);
85 skel
->rodata
->dsp_batch
= strtoul(optarg
, NULL
, 0);
88 skel
->rodata
->print_shared_dsq
= true;
91 skel
->rodata
->highpri_boosting
= true;
94 skel
->rodata
->disallow_tgid
= strtol(optarg
, NULL
, 0);
95 if (skel
->rodata
->disallow_tgid
< 0)
96 skel
->rodata
->disallow_tgid
= getpid();
99 skel
->struct_ops
.qmap_ops
->exit_dump_len
= strtoul(optarg
, NULL
, 0);
102 skel
->rodata
->suppress_dump
= true;
105 skel
->struct_ops
.qmap_ops
->flags
|= SCX_OPS_SWITCH_PARTIAL
;
111 fprintf(stderr
, help_fmt
, basename(argv
[0]));
116 SCX_OPS_LOAD(skel
, qmap_ops
, scx_qmap
, uei
);
117 link
= SCX_OPS_ATTACH(skel
, qmap_ops
, scx_qmap
);
119 while (!exit_req
&& !UEI_EXITED(skel
, uei
)) {
120 long nr_enqueued
= skel
->bss
->nr_enqueued
;
121 long nr_dispatched
= skel
->bss
->nr_dispatched
;
123 printf("stats : enq=%lu dsp=%lu delta=%ld reenq=%"PRIu64
" deq=%"PRIu64
" core=%"PRIu64
" enq_ddsp=%"PRIu64
"\n",
124 nr_enqueued
, nr_dispatched
, nr_enqueued
- nr_dispatched
,
125 skel
->bss
->nr_reenqueued
, skel
->bss
->nr_dequeued
,
126 skel
->bss
->nr_core_sched_execed
,
127 skel
->bss
->nr_ddsp_from_enq
);
128 printf(" exp_local=%"PRIu64
" exp_remote=%"PRIu64
" exp_timer=%"PRIu64
" exp_lost=%"PRIu64
"\n",
129 skel
->bss
->nr_expedited_local
,
130 skel
->bss
->nr_expedited_remote
,
131 skel
->bss
->nr_expedited_from_timer
,
132 skel
->bss
->nr_expedited_lost
);
133 if (__COMPAT_has_ksym("scx_bpf_cpuperf_cur"))
134 printf("cpuperf: cur min/avg/max=%u/%u/%u target min/avg/max=%u/%u/%u\n",
135 skel
->bss
->cpuperf_min
,
136 skel
->bss
->cpuperf_avg
,
137 skel
->bss
->cpuperf_max
,
138 skel
->bss
->cpuperf_target_min
,
139 skel
->bss
->cpuperf_target_avg
,
140 skel
->bss
->cpuperf_target_max
);
145 bpf_link__destroy(link
);
146 UEI_REPORT(skel
, uei
);
147 scx_qmap__destroy(skel
);
149 * scx_qmap implements ops.cpu_on/offline() and doesn't need to restart
150 * on CPU hotplug events.