2 * Copyright (C) 2006-2013 Gilles Chanteperdrix <gch@xenomai.org>
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36 #include <semaphore.h>
41 #include <xeno_config.h>
42 #include <asm/xenomai/fptest.h>
43 #include <asm-generic/stack.h>
44 #include <nucleus/trace.h>
45 #include <rtdm/rttesting.h>
47 #ifdef HAVE_RECENT_SETAFFINITY
48 #define do_sched_setaffinity(pid,len,mask) sched_setaffinity(pid,len,mask)
49 #else /* !HAVE_RECENT_SETAFFINITY */
50 #ifdef HAVE_OLD_SETAFFINITY
51 #define do_sched_setaffinity(pid,len,mask) sched_setaffinity(pid,mask)
52 #else /* !HAVE_OLD_SETAFFINITY */
53 #ifndef __cpu_set_t_defined
54 typedef unsigned long cpu_set_t
;
56 #define do_sched_setaffinity(pid,len,mask) 0
58 #define CPU_ZERO(set) do { *(set) = 0; } while(0)
59 #define CPU_SET(n,set) do { *(set) |= (1 << n); } while(0)
61 #endif /* HAVE_OLD_SETAFFINITY */
62 #endif /* HAVE_RECENT_SETAFFINITY */
65 #define smp_sched_setaffinity(pid,len,mask) do_sched_setaffinity(pid,len,mask)
66 #else /* !CONFIG_SMP */
67 #define smp_sched_setaffinity(pid,len,mask) 0
68 #endif /* !CONFIG_SMP */
70 #define SMALL_STACK_MIN xeno_stacksize(32 * 1024)
71 #define LARGE_STACK_MIN xeno_stacksize(64 * 1024)
76 RTK
= 1, /* kernel-space thread. */
77 RTUP
= 2, /* user-space real-time thread in primary mode. */
78 RTUS
= 3, /* user-space real-time thread in secondary mode. */
79 RTUO
= 4, /* user-space real-time thread oscillating
80 between primary and secondary mode. */
86 AFP
= 1, /* arm the FPU task bit (only make sense for RTK) */
87 UFPP
= 2, /* use the FPU while in primary mode. */
88 UFPS
= 4 /* use the FPU while in secondary mode. */
97 struct cpu_tasks
*cpu
;
98 struct rttst_swtest_task swt
;
103 struct task_params
*tasks
;
104 unsigned tasks_count
;
107 unsigned long last_switches_count
;
110 static sem_t sleeper_start
;
111 static int quiet
, status
;
112 static struct timespec start
;
113 static pthread_mutex_t headers_lock
;
114 static unsigned long data_lines
= 21;
115 static unsigned freeze_on_error
;
117 static inline void clean_exit(int retval
)
120 kill(getpid(), SIGTERM
);
122 /* Wait for cancellation. */
123 __real_sem_wait(&sleeper_start
);
126 static void timespec_substract(struct timespec
*result
,
127 const struct timespec
*lhs
,
128 const struct timespec
*rhs
)
130 result
->tv_sec
= lhs
->tv_sec
- rhs
->tv_sec
;
131 if (lhs
->tv_nsec
>= rhs
->tv_nsec
)
132 result
->tv_nsec
= lhs
->tv_nsec
- rhs
->tv_nsec
;
135 result
->tv_nsec
= lhs
->tv_nsec
+ (1000000000 - rhs
->tv_nsec
);
139 static char *task_name(char *buf
, size_t sz
,
140 struct cpu_tasks
*cpu
, unsigned task
)
142 char *basename
[] = {
143 [SLEEPER
] = "sleeper",
148 [SWITCHER
] = "switcher",
149 [FPU_STRESS
] = "fpu_stress",
155 { .flag
= AFP
, .name
= "fp" },
156 { .flag
= UFPP
, .name
= "ufpp" },
157 { .flag
= UFPS
, .name
= "ufps" },
159 struct task_params
*param
;
162 if (task
> cpu
->tasks_count
)
165 if (task
== cpu
->tasks_count
)
166 param
= &cpu
->tasks
[task
];
168 for (param
= &cpu
->tasks
[0]; param
->swt
.index
!= task
; param
++)
171 pos
= snprintf(buf
, sz
, "%s", basename
[param
->type
]);
172 for (i
= 0; i
< sizeof(flags
) / sizeof(flags
[0]); i
++) {
173 if (!(param
->fp
& flags
[i
].flag
))
176 pos
+= snprintf(&buf
[pos
],
177 sz
- pos
, "_%s", flags
[i
].name
);
181 pos
+= snprintf(&buf
[pos
], sz
- pos
, "%u", cpu
->index
);
182 #endif /* !CONFIG_SMP */
184 snprintf(&buf
[pos
], sz
- pos
, "-%u", param
->swt
.index
);
189 static void handle_bad_fpreg(struct cpu_tasks
*cpu
, unsigned fp_val
)
191 struct rttst_swtest_error err
;
196 xntrace_user_freeze(0, 0);
198 ioctl(cpu
->fd
, RTTST_RTIOC_SWTEST_GET_LAST_ERROR
, &err
);
203 from
= err
.last_switch
.from
;
204 to
= err
.last_switch
.to
;
206 fprintf(stderr
, "Error after context switch from task %d(%s) ",
207 from
, task_name(buffer
, sizeof(buffer
), cpu
, from
));
208 fprintf(stderr
, "to task %d(%s),\nFPU registers were set to %u ",
209 to
, task_name(buffer
, sizeof(buffer
), cpu
, to
), fp_val
);
212 fprintf(stderr
, "(maybe task %s)\n",
213 task_name(buffer
, sizeof(buffer
), cpu
, fp_val
/ 1000));
216 if (fp_val
> cpu
->tasks_count
)
217 fprintf(stderr
, "(unidentified task)\n");
219 fprintf(stderr
, "(maybe task %s, having used fpu in "
221 task_name(buffer
, sizeof(buffer
), cpu
, fp_val
/ 1000));
224 clean_exit(EXIT_FAILURE
);
227 void display_cleanup(void *cookie
)
229 pthread_mutex_t
*mutex
= (pthread_mutex_t
*) cookie
;
230 __real_pthread_mutex_unlock(mutex
);
233 void display_switches_count(struct cpu_tasks
*cpu
, struct timespec
*now
)
235 unsigned long switches_count
;
236 static unsigned nlines
= 0;
239 RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT
,&switches_count
)) {
240 perror("sleeper: ioctl(RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT)");
241 clean_exit(EXIT_FAILURE
);
244 if (switches_count
&&
245 switches_count
== cpu
->last_switches_count
) {
246 fprintf(stderr
, "No context switches during one second, "
248 clean_exit(EXIT_FAILURE
);
254 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED
, NULL
);
255 pthread_cleanup_push(display_cleanup
, &headers_lock
);
256 __real_pthread_mutex_lock(&headers_lock
);
258 if (data_lines
&& (nlines
++ % data_lines
) == 0) {
259 struct timespec diff
;
262 timespec_substract(&diff
, now
, &start
);
265 printf("RTT| %.2ld:%.2ld:%.2ld\n",
266 dt
/ 3600, (dt
/ 60) % 60, dt
% 60);
268 printf("RTH|%12s|%12s|%12s\n",
269 "---------cpu","ctx switches","-------total");
270 #else /* !CONFIG_SMP */
271 printf("RTH|%12s|%12s\n", "ctx switches","-------total");
272 #endif /* !CONFIG_SMP */
276 printf("RTD|%12u|%12lu|%12lu\n", cpu
->index
,
277 switches_count
- cpu
->last_switches_count
, switches_count
);
278 #else /* !CONFIG_SMP */
279 printf("RTD|%12lu|%12lu\n",
280 switches_count
- cpu
->last_switches_count
, switches_count
);
281 #endif /* !CONFIG_SMP */
283 pthread_cleanup_pop(1);
284 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, NULL
);
286 cpu
->last_switches_count
= switches_count
;
289 static void *sleeper_switcher(void *cookie
)
291 struct task_params
*param
= (struct task_params
*) cookie
;
292 unsigned to
, tasks_count
= param
->cpu
->tasks_count
;
293 struct timespec ts
, last
;
294 int fd
= param
->cpu
->fd
;
295 struct rttst_swtest_dir rtsw
;
297 unsigned i
= 1; /* Start at 1 to avoid returning to a
298 non-existing task. */
302 CPU_SET(param
->cpu
->index
, &cpu_set
);
303 if (smp_sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
)) {
304 perror("sleeper: sched_setaffinity");
305 clean_exit(EXIT_FAILURE
);
308 rtsw
.from
= param
->swt
.index
;
309 to
= param
->swt
.index
;
312 ts
.tv_nsec
= 1000000;
314 ret
= __real_sem_wait(&sleeper_start
);
316 fprintf(stderr
, "sem_wait FAILED (%d)\n", errno
);
321 clock_gettime(CLOCK_REALTIME
, &last
);
323 /* ioctl is not a cancellation point, but we want cancellation to be
324 allowed when suspended in ioctl. */
325 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, NULL
);
328 struct timespec now
, diff
;
329 unsigned expected
, fp_val
;
331 if (param
->type
== SLEEPER
)
332 __real_nanosleep(&ts
, NULL
);
334 clock_gettime(CLOCK_REALTIME
, &now
);
336 timespec_substract(&diff
, &now
, &last
);
337 if (diff
.tv_sec
>= 1) {
340 display_switches_count(param
->cpu
, &now
);
343 if (tasks_count
== 1)
348 /* to == from means "return to last task" */
353 if (++to
== rtsw
.from
)
355 if (to
> tasks_count
- 1)
361 /* If i % 3 == 2, repeat the same switch. */
364 expected
= rtsw
.from
* 1000 + i
* 1000000;
365 if (param
->fp
& UFPS
)
366 fp_regs_set(expected
);
367 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_SWITCH_TO
, &rtsw
);
368 while (err
== -1 && errno
== EINTR
)
369 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_PEND
, ¶m
->swt
);
375 handle_bad_fpreg(param
->cpu
, ~0);
377 clean_exit(EXIT_FAILURE
);
379 if (param
->fp
& UFPS
) {
380 fp_val
= fp_regs_check(expected
);
381 if (fp_val
!= expected
)
382 handle_bad_fpreg(param
->cpu
, fp_val
);
385 if(++i
== 4000000000U)
393 static double dot(volatile double *a
, volatile double *b
, int n
)
403 static void *fpu_stress(void *cookie
)
405 static volatile double a
[10000], b
[sizeof(a
)/sizeof(a
[0])];
406 struct task_params
*param
= (struct task_params
*) cookie
;
411 CPU_SET(param
->cpu
->index
, &cpu_set
);
412 if (smp_sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
)) {
413 perror("sleeper: sched_setaffinity");
414 clean_exit(EXIT_FAILURE
);
417 for (i
= 0; i
< sizeof(a
)/sizeof(a
[0]); i
++)
421 double s
= dot(a
, b
, sizeof(a
)/sizeof(a
[0]));
422 if ((unsigned) (s
+ 0.5) != 98596) {
423 fprintf(stderr
, "fpu stress task failure! dot: %g\n", s
);
424 clean_exit(EXIT_FAILURE
);
426 pthread_testcancel();
432 static void *rtup(void *cookie
)
434 struct task_params
*param
= (struct task_params
*) cookie
;
435 unsigned to
, tasks_count
= param
->cpu
->tasks_count
;
436 int err
, fd
= param
->cpu
->fd
;
437 struct rttst_swtest_dir rtsw
;
442 CPU_SET(param
->cpu
->index
, &cpu_set
);
443 if (smp_sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
)) {
444 perror("rtup: sched_setaffinity");
445 clean_exit(EXIT_FAILURE
);
448 rtsw
.from
= param
->swt
.index
;
449 to
= param
->swt
.index
;
451 /* ioctl is not a cancellation point, but we want cancellation to be
452 allowed when suspended in ioctl. */
453 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, NULL
);
455 if ((err
= pthread_set_mode_np(0, PTHREAD_PRIMARY
))) {
457 "rtup: pthread_set_mode_np: %s\n",
459 clean_exit(EXIT_FAILURE
);
463 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_PEND
, ¶m
->swt
);
464 } while (err
== -1 && errno
== EINTR
);
470 unsigned expected
, fp_val
;
474 /* to == from means "return to last task" */
479 if (++to
== rtsw
.from
)
481 if (to
> tasks_count
- 1)
487 /* If i % 3 == 2, repeat the same switch. */
490 expected
= rtsw
.from
* 1000 + i
* 1000000;
491 if (param
->fp
& UFPP
)
492 fp_regs_set(expected
);
493 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_SWITCH_TO
, &rtsw
);
494 while (err
== -1 && errno
== EINTR
)
495 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_PEND
, ¶m
->swt
);
501 handle_bad_fpreg(param
->cpu
, ~0);
503 clean_exit(EXIT_FAILURE
);
505 if (param
->fp
& UFPP
) {
506 fp_val
= fp_regs_check(expected
);
507 if (fp_val
!= expected
)
508 handle_bad_fpreg(param
->cpu
, fp_val
);
511 if(++i
== 4000000000U)
518 static void *rtus(void *cookie
)
520 struct task_params
*param
= (struct task_params
*) cookie
;
521 unsigned to
, tasks_count
= param
->cpu
->tasks_count
;
522 int err
, fd
= param
->cpu
->fd
;
523 struct rttst_swtest_dir rtsw
;
528 CPU_SET(param
->cpu
->index
, &cpu_set
);
529 if (smp_sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
)) {
530 perror("rtus: sched_setaffinity");
531 clean_exit(EXIT_FAILURE
);
534 rtsw
.from
= param
->swt
.index
;
535 to
= param
->swt
.index
;
537 /* ioctl is not a cancellation point, but we want cancellation to be
538 allowed when suspended in ioctl. */
539 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, NULL
);
541 if ((err
= pthread_set_mode_np(PTHREAD_PRIMARY
, 0))) {
543 "rtus: pthread_set_mode_np: %s\n",
545 clean_exit(EXIT_FAILURE
);
549 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_PEND
, ¶m
->swt
);
550 } while (err
== -1 && errno
== EINTR
);
556 unsigned expected
, fp_val
;
560 /* to == from means "return to last task" */
565 if (++to
== rtsw
.from
)
567 if (to
> tasks_count
- 1)
573 /* If i % 3 == 2, repeat the same switch. */
576 expected
= rtsw
.from
* 1000 + i
* 1000000;
577 if (param
->fp
& UFPS
)
578 fp_regs_set(expected
);
579 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_SWITCH_TO
, &rtsw
);
580 while (err
== -1 && errno
== EINTR
)
581 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_PEND
, ¶m
->swt
);
587 handle_bad_fpreg(param
->cpu
, ~0);
589 clean_exit(EXIT_FAILURE
);
591 if (param
->fp
& UFPS
) {
592 fp_val
= fp_regs_check(expected
);
593 if (fp_val
!= expected
)
594 handle_bad_fpreg(param
->cpu
, fp_val
);
597 if(++i
== 4000000000U)
604 static void *rtuo(void *cookie
)
606 struct task_params
*param
= (struct task_params
*) cookie
;
607 unsigned mode
, to
, tasks_count
= param
->cpu
->tasks_count
;
608 int err
, fd
= param
->cpu
->fd
;
609 struct rttst_swtest_dir rtsw
;
614 CPU_SET(param
->cpu
->index
, &cpu_set
);
615 if (smp_sched_setaffinity(0, sizeof(cpu_set
), &cpu_set
)) {
616 perror("rtuo: sched_setaffinity");
617 clean_exit(EXIT_FAILURE
);
620 rtsw
.from
= param
->swt
.index
;
621 to
= param
->swt
.index
;
623 /* ioctl is not a cancellation point, but we want cancellation to be
624 allowed when suspended in ioctl. */
625 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, NULL
);
627 if ((err
= pthread_set_mode_np(0, PTHREAD_PRIMARY
))) {
629 "rtup: pthread_set_mode_np: %s\n",
631 clean_exit(EXIT_FAILURE
);
634 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_PEND
, ¶m
->swt
);
635 } while (err
== -1 && errno
== EINTR
);
640 mode
= PTHREAD_PRIMARY
;
642 unsigned expected
, fp_val
;
646 /* to == from means "return to last task" */
651 if (++to
== rtsw
.from
)
653 if (to
> tasks_count
- 1)
659 /* If i % 3 == 2, repeat the same switch. */
662 expected
= rtsw
.from
* 1000 + i
* 1000000;
663 if ((mode
&& param
->fp
& UFPP
) || (!mode
&& param
->fp
& UFPS
))
664 fp_regs_set(expected
);
665 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_SWITCH_TO
, &rtsw
);
666 while (err
== -1 && errno
== EINTR
)
667 err
= ioctl(fd
, RTTST_RTIOC_SWTEST_PEND
, ¶m
->swt
);
673 handle_bad_fpreg(param
->cpu
, ~0);
675 clean_exit(EXIT_FAILURE
);
678 if ((mode
&& param
->fp
& UFPP
) || (!mode
&& param
->fp
& UFPS
)) {
679 fp_val
= fp_regs_check(expected
);
680 if (fp_val
!= expected
)
681 handle_bad_fpreg(param
->cpu
, fp_val
);
688 next_mode
= PTHREAD_PRIMARY
- mode
;
692 if ((mode
&& param
->fp
& UFPP
) ||
693 (!mode
&& param
->fp
& UFPS
))
694 fp_regs_set(expected
);
696 if ((err
= pthread_set_mode_np(mode
, next_mode
))) {
698 "rtuo: pthread_set_mode_np: %s\n",
700 clean_exit(EXIT_FAILURE
);
703 if ((mode
&& param
->fp
& UFPP
) ||
704 (!mode
&& param
->fp
& UFPS
)) {
705 fp_val
= fp_regs_check(expected
);
706 if (fp_val
!= expected
)
707 handle_bad_fpreg(param
->cpu
, fp_val
);
713 if(++i
== 4000000000U)
720 static int parse_arg(struct task_params
*param
,
722 struct cpu_tasks
*cpus
)
729 static struct t2f type2flags
[] = {
736 static struct t2f fp2flags
[] = {
746 param
->type
= param
->fp
= 0;
747 param
->cpu
= &cpus
[0];
749 for(i
= 0; i
< sizeof(type2flags
)/sizeof(struct t2f
); i
++) {
750 size_t len
= strlen(type2flags
[i
].text
);
752 if(!strncmp(text
, type2flags
[i
].text
, len
)) {
753 param
->type
= type2flags
[i
].flag
;
768 for(i
= 0; i
< sizeof(fp2flags
)/sizeof(struct t2f
); i
++) {
769 size_t len
= strlen(fp2flags
[i
].text
);
771 if(!strncmp(text
, fp2flags
[i
].text
, len
)) {
772 param
->fp
|= fp2flags
[i
].flag
;
782 cpu
= strtoul(text
, &cpu_end
, 0);
784 if (*cpu_end
!= '\0' || (cpu
== ULONG_MAX
&& errno
))
787 param
->cpu
= &cpus
[cpu
];
791 static int check_arg(const struct task_params
*param
, struct cpu_tasks
*end_cpu
)
793 if (param
->cpu
> end_cpu
- 1)
796 switch (param
->type
) {
803 if (param
->fp
& UFPS
)
808 if (param
->fp
& (AFP
|UFPS
))
813 if (param
->fp
& (AFP
|UFPP
))
828 static int task_create(struct cpu_tasks
*cpu
,
829 struct task_params
*param
,
830 pthread_attr_t
*rt_attr
)
833 typedef void *thread_routine(void *);
834 thread_routine
*task_routine
[] = {
841 switch(param
->type
) {
843 param
->swt
.flags
= (param
->fp
& AFP
? RTTST_SWTEST_FPU
: 0)
844 | (param
->fp
& UFPP
? RTTST_SWTEST_USE_FPU
: 0)
845 | (freeze_on_error
? RTTST_SWTEST_FREEZE
: 0);
847 err
=ioctl(cpu
->fd
,RTTST_RTIOC_SWTEST_CREATE_KTASK
,¶m
->swt
);
849 perror("ioctl(RTTST_RTIOC_SWTEST_CREATE_KTASK)");
859 param
->swt
.flags
= 0;
861 err
=ioctl(cpu
->fd
,RTTST_RTIOC_SWTEST_REGISTER_UTASK
,¶m
->swt
);
863 perror("ioctl(RTTST_RTIOC_SWTEST_REGISTER_UTASK)");
872 fprintf(stderr
, "Invalid task type %d. Aborting\n", param
->type
);
876 if (param
->type
== RTK
)
879 if (param
->type
== SLEEPER
|| param
->type
== SWITCHER
) {
882 pthread_attr_init(&attr
);
883 pthread_attr_setstacksize(&attr
, SMALL_STACK_MIN
);
885 err
= __real_pthread_create(¶m
->thread
,
890 pthread_attr_destroy(&attr
);
893 fprintf(stderr
,"pthread_create: %s\n",strerror(err
));
899 if (param
->type
== FPU_STRESS
) {
902 pthread_attr_init(&attr
);
903 pthread_attr_setstacksize(&attr
, LARGE_STACK_MIN
);
905 err
= __real_pthread_create(¶m
->thread
,
910 pthread_attr_destroy(&attr
);
913 fprintf(stderr
,"pthread_create: %s\n",strerror(err
));
919 err
= pthread_create(¶m
->thread
, rt_attr
,
920 task_routine
[param
->type
], param
);
922 fprintf(stderr
, "pthread_create: %s\n", strerror(err
));
926 err
= pthread_set_name_np(param
->thread
,
927 task_name(buffer
, sizeof(buffer
),
928 param
->cpu
,param
->swt
.index
));
931 fprintf(stderr
,"pthread_set_name_np: %s\n", strerror(err
));
936 #define DEV_NR_MAX 256
938 static int open_rttest(char *buf
, size_t size
, unsigned count
)
940 static unsigned dev_nr
= 0;
944 snprintf(buf
, size
, "/dev/rttest-switchtest%d", dev_nr
);
946 status
= fd
= open(buf
, O_RDWR
);
951 status
= ioctl(fd
, RTTST_RTIOC_SWTEST_SET_TASKS_COUNT
, count
);
956 if (errno
!= ENOSYS
&& errno
!= ENOTTY
) {
957 fprintf(stderr
, "switchtest: open: %m\n");
965 if (++dev_nr
!= DEV_NR_MAX
)
968 fprintf(stderr
, "switchtest: Unable to open switchtest device.\n"
969 "(modprobe xeno_switchtest ?)\n");
972 } while (status
== -1);
977 const char *all_nofp
[] = {
988 const char *all_fp
[] = {
1013 unsigned long xatoul(const char *str
)
1015 unsigned long result
;
1018 result
= strtoul(str
, &endptr
, 0);
1020 if (result
== ULONG_MAX
&& errno
== ERANGE
) {
1021 fprintf(stderr
, "Overflow while parsing %s\n", str
);
1025 if (*endptr
!= '\0') {
1026 fprintf(stderr
, "Error while parsing \"%s\" as a number\n", str
);
1033 void usage(FILE *fd
, const char *progname
)
1035 unsigned i
, j
, nr_cpus
;
1038 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
1039 #else /* !CONFIG_SMP */
1041 #endif /* !CONFIG_SMP */
1045 "%s [options] threadspec threadspec...\n"
1046 "Create threads of various types and attempt to switch context "
1047 "between these\nthreads, printing the count of context switches "
1049 "Available options are:\n"
1050 "--help or -h, cause this program to print this help string and "
1052 "--lines <lines> or -l <lines> print headers every <lines> "
1054 "--quiet or -q, prevent this program from printing every "
1055 "second the count of\ncontext switches;\n"
1056 "--timeout <duration> or -T <duration>, limit the test duration "
1057 "to <duration>\nseconds;\n"
1058 "--nofpu or -n, disables any use of FPU instructions.\n"
1059 "--stress <period> or -s <period> enable a stress mode where:\n"
1060 " context switches occur every <period> us;\n"
1061 " a background task uses fpu (and check) fpu all the time.\n"
1062 "--freeze trace upon error.\n\n"
1063 "Each 'threadspec' specifies the characteristics of a "
1064 "thread to be created:\n"
1065 "threadspec = (rtk|rtup|rtus|rtuo)(_fp|_ufpp|_ufps)*[0-9]*\n"
1066 "rtk for a kernel-space real-time thread;\n"
1067 "rtup for a user-space real-time thread running in primary"
1069 "rtus for a user-space real-time thread running in secondary"
1071 "rtuo for a user-space real-time thread oscillating between"
1072 " primary and\nsecondary mode,\n\n"
1073 "_fp means that the created thread will have the XNFPU bit"
1074 " armed (only valid for\nrtk),\n"
1075 "_ufpp means that the created thread will use the FPU when in "
1076 "primary mode\n(invalid for rtus),\n"
1077 "_ufps means that the created thread will use the FPU when in "
1078 "secondary mode\n(invalid for rtk and rtup),\n\n"
1079 "[0-9]* specifies the ID of the CPU where the created thread "
1080 "will run, 0 if\nunspecified.\n\n"
1081 "Passing no 'threadspec' is equivalent to running:\n%s",
1082 progname
, progname
);
1084 for (i
= 0; i
< nr_cpus
; i
++)
1085 for (j
= 0; j
< sizeof(all_fp
)/sizeof(char *); j
++)
1086 fprintf(fd
, " %s%d", all_fp
[j
], i
);
1089 "\n\nPassing only the --nofpu or -n argument is equivalent to "
1090 "running:\n%s", progname
);
1092 for (i
= 0; i
< nr_cpus
; i
++)
1093 for (j
= 0; j
< sizeof(all_nofp
)/sizeof(char *); j
++)
1094 fprintf(fd
, " %s%d", all_nofp
[j
], i
);
1095 fprintf(fd
, "\n\n");
1100 void illegal_instruction(int sig
)
1102 signal(sig
, SIG_DFL
);
1103 siglongjmp(jump
, 1);
1106 /* We run the FPU check in a thread to avoid clobbering the main thread FPU
1107 backup area. This is important on x86, where this results on all RT threads
1108 FPU backup areas to be clobbered, and thus their FPU context being switched
1109 systematically (and the case where FPU has never been used not to be tested). */
1110 void *check_fpu_thread(void *cookie
)
1114 /* Check if fp routines are dummy or if hw fpu is not supported. */
1115 fprintf(stderr
, "== Testing FPU check routines...\n");
1116 if(sigsetjmp(jump
, 1)) {
1117 fprintf(stderr
, "== Hardware FPU not available on your board"
1118 " or not enabled in Linux kernel\n== configuration:"
1119 " skipping FPU switches tests.\n");
1122 signal(SIGILL
, illegal_instruction
);
1124 check
= fp_regs_check(2);
1125 signal(SIGILL
, SIG_DFL
);
1128 "== FPU check routines: unimplemented, "
1129 "skipping FPU switches tests.\n");
1133 fprintf(stderr
, "== FPU check routines: OK.\n");
1143 err
= __real_pthread_create(&tid
, NULL
, check_fpu_thread
, NULL
);
1145 fprintf(stderr
, "pthread_create: %s\n", strerror(err
));
1149 err
= pthread_join(tid
, &status
);
1151 fprintf(stderr
, "pthread_join: %s\n", strerror(err
));
1155 return (long) status
;
1158 int main(int argc
, const char *argv
[])
1160 unsigned i
, j
, nr_cpus
, use_fp
= 1, stress
= 0;
1161 pthread_attr_t rt_attr
;
1162 const char *progname
= argv
[0];
1163 struct cpu_tasks
*cpus
;
1164 struct sched_param sp
;
1165 char devname
[RTDM_MAX_DEVNAME_LEN
+1];
1169 status
= EXIT_SUCCESS
;
1171 /* Initializations. */
1172 if (mlockall(MCL_CURRENT
|MCL_FUTURE
)) {
1177 if (__real_sem_init(&sleeper_start
, 0, 0)) {
1183 nr_cpus
= sysconf(_SC_NPROCESSORS_ONLN
);
1184 #else /* !CONFIG_SMP */
1186 #endif /* !CONFIG_SMP */
1188 if (nr_cpus
== -1) {
1190 "Error %d while getting the number of cpus (%s)\n",
1198 /* Parse command line options. */
1201 static struct option long_options
[] = {
1202 { "freeze", 0, NULL
, 'f' },
1203 { "help", 0, NULL
, 'h' },
1204 { "lines", 1, NULL
, 'l' },
1205 { "nofpu", 0, NULL
, 'n' },
1206 { "quiet", 0, NULL
, 'q' },
1207 { "stress", 1, NULL
, 's' },
1208 { "timeout", 1, NULL
, 'T' },
1209 { NULL
, 0, NULL
, 0 }
1212 int c
= getopt_long(argc
, (char *const *) argv
, "fhl:nqs:T:",
1220 freeze_on_error
= 1;
1224 usage(stdout
, progname
);
1228 data_lines
= xatoul(optarg
);
1240 stress
= xatoul(optarg
);
1244 alarm(xatoul(optarg
));
1248 usage(stderr
, progname
);
1249 fprintf(stderr
, "%s: Invalid option.\n", argv
[optind
-1]);
1253 usage(stderr
, progname
);
1254 fprintf(stderr
, "Missing argument of option %s.\n",
1260 if (setvbuf(stdout
, NULL
, _IOLBF
, 0)) {
1265 /* If no argument was passed (or only -n), replace argc and argv with
1266 default values, given by all_fp or all_nofp depending on the presence
1268 if (optind
== argc
) {
1274 use_fp
= check_fpu();
1278 count
= sizeof(all_fp
)/sizeof(char *);
1281 count
= sizeof(all_nofp
)/sizeof(char *);
1284 argc
= count
* nr_cpus
+ 1;
1285 argv
= (const char **) malloc(argc
* sizeof(char *));
1287 for (i
= 0; i
< nr_cpus
; i
++)
1288 for (j
= 0; j
< count
; j
++) {
1294 argv
[i
* count
+ j
+ 1] = strdup(buffer
);
1300 cpus
= (struct cpu_tasks
*) malloc(sizeof(*cpus
) * nr_cpus
);
1306 for (i
= 0; i
< nr_cpus
; i
++) {
1310 cpus
[i
].capacity
= 2;
1311 size
= cpus
[i
].capacity
* sizeof(struct task_params
);
1312 cpus
[i
].tasks_count
= 1;
1313 cpus
[i
].tasks
= (struct task_params
*) malloc(size
);
1314 cpus
[i
].last_switches_count
= 0;
1316 if (!cpus
[i
].tasks
) {
1321 cpus
[i
].tasks
[0].type
= stress
? SWITCHER
: SLEEPER
;
1322 cpus
[i
].tasks
[0].fp
= use_fp
? UFPS
: 0;
1323 cpus
[i
].tasks
[0].cpu
= &cpus
[i
];
1324 cpus
[i
].tasks
[0].thread
= 0;
1325 cpus
[i
].tasks
[0].swt
.index
= cpus
[i
].tasks
[0].swt
.flags
= 0;
1329 /* Parse arguments and build data structures. */
1330 for(i
= optind
; i
< argc
; i
++) {
1331 struct task_params params
;
1332 struct cpu_tasks
*cpu
;
1334 if(parse_arg(¶ms
, argv
[i
], cpus
)) {
1335 usage(stderr
, progname
);
1336 fprintf(stderr
, "Unable to parse %s as a thread type. "
1337 "Aborting.\n", argv
[i
]);
1341 if (!check_arg(¶ms
, &cpus
[nr_cpus
])) {
1342 usage(stderr
, progname
);
1344 "Invalid parameters %s. Aborting\n",
1349 if (!use_fp
&& params
.fp
) {
1350 usage(stderr
, progname
);
1352 "%s is invalid because FPU is disabled"
1353 " (option -n passed).\n", argv
[i
]);
1358 if(++cpu
->tasks_count
> cpu
->capacity
) {
1360 cpu
->capacity
+= cpu
->capacity
/ 2;
1361 size
= cpu
->capacity
* sizeof(struct task_params
);
1363 (struct task_params
*) realloc(cpu
->tasks
, size
);
1371 params
.swt
.index
= params
.swt
.flags
= 0;
1372 cpu
->tasks
[cpu
->tasks_count
- 1] = params
;
1376 for (i
= 0; i
< nr_cpus
; i
++) {
1377 struct task_params params
;
1378 struct cpu_tasks
*cpu
= &cpus
[i
];
1380 if(cpu
->tasks_count
+ 1> cpu
->capacity
) {
1382 cpu
->capacity
+= cpu
->capacity
/ 2;
1383 size
= cpu
->capacity
* sizeof(struct task_params
);
1385 (struct task_params
*) realloc(cpu
->tasks
, size
);
1392 params
.type
= FPU_STRESS
;
1396 params
.swt
.index
= cpu
->tasks_count
;
1397 params
.swt
.flags
= 0;
1398 cpu
->tasks
[cpu
->tasks_count
] = params
;
1401 /* For best compatibility with both LinuxThreads and NPTL, block the
1402 termination signals on all threads. */
1404 sigaddset(&mask
, SIGINT
);
1405 sigaddset(&mask
, SIGTERM
);
1406 sigaddset(&mask
, SIGALRM
);
1407 pthread_sigmask(SIG_BLOCK
, &mask
, NULL
);
1409 __real_pthread_mutex_init(&headers_lock
, NULL
);
1411 /* Prepare attributes for real-time tasks. */
1412 pthread_attr_init(&rt_attr
);
1413 pthread_attr_setinheritsched(&rt_attr
, PTHREAD_EXPLICIT_SCHED
);
1414 pthread_attr_setschedpolicy(&rt_attr
, SCHED_FIFO
);
1415 sp
.sched_priority
= 1;
1416 pthread_attr_setschedparam(&rt_attr
, &sp
);
1417 pthread_attr_setstacksize(&rt_attr
, SMALL_STACK_MIN
);
1419 printf("== Threads:");
1420 /* Create and register all tasks. */
1421 for (i
= 0; i
< nr_cpus
; i
++) {
1422 struct cpu_tasks
*cpu
= &cpus
[i
];
1425 cpu
->fd
= open_rttest(devname
,sizeof(devname
),cpu
->tasks_count
);
1430 if (ioctl(cpu
->fd
, RTTST_RTIOC_SWTEST_SET_CPU
, i
)) {
1431 perror("ioctl(RTTST_RTIOC_SWTEST_SET_CPU)");
1436 ioctl(cpu
->fd
, RTTST_RTIOC_SWTEST_SET_PAUSE
, stress
)) {
1437 perror("ioctl(RTTST_RTIOC_SWTEST_SET_PAUSE)");
1441 for (j
= 0; j
< cpu
->tasks_count
+ !!stress
; j
++) {
1442 struct task_params
*param
= &cpu
->tasks
[j
];
1443 if (task_create(cpu
, param
, &rt_attr
)) {
1445 status
= EXIT_FAILURE
;
1449 task_name(buffer
, sizeof(buffer
),
1450 param
->cpu
, param
->swt
.index
));
1455 clock_gettime(CLOCK_REALTIME
, &start
);
1457 /* Start the sleeper tasks. */
1458 for (i
= 0; i
< nr_cpus
; i
++)
1459 __real_sem_post(&sleeper_start
);
1461 /* Wait for interruption. */
1462 sigwait(&mask
, &sig
);
1464 /* Allow a second Ctrl-C in case of lockup. */
1465 pthread_sigmask(SIG_UNBLOCK
, &mask
, NULL
);
1469 for (i
= 0; i
< nr_cpus
; i
++) {
1470 struct cpu_tasks
*cpu
= &cpus
[i
];
1472 /* kill the user-space tasks. */
1473 for (j
= 0; j
< cpu
->tasks_count
+ !!stress
; j
++) {
1474 struct task_params
*param
= &cpu
->tasks
[j
];
1476 if (param
->type
!= RTK
&& param
->thread
)
1477 pthread_cancel(param
->thread
);
1481 for (i
= 0; i
< nr_cpus
; i
++) {
1482 struct cpu_tasks
*cpu
= &cpus
[i
];
1484 /* join the user-space tasks. */
1485 for (j
= 0; j
< cpu
->tasks_count
+ !!stress
; j
++) {
1486 struct task_params
*param
= &cpu
->tasks
[j
];
1488 if (param
->type
!= RTK
&& param
->thread
)
1489 pthread_join(param
->thread
, NULL
);
1492 if (cpus
[i
].fd
!= -1) {
1493 struct timespec now
;
1495 clock_gettime(CLOCK_REALTIME
, &now
);
1498 display_switches_count(&cpus
[i
], &now
);
1500 /* Kill the kernel-space tasks. */
1506 __real_sem_destroy(&sleeper_start
);
1507 __real_pthread_mutex_destroy(&headers_lock
);