bfin: remove inline keyword
[xenomai-head.git] / src / testsuite / switchtest / switchtest.c
blobea195b4f3a2da5e9f4dcff4940af6f46e2379b5c
1 /*
2 * Copyright (C) 2006-2013 Gilles Chanteperdrix <gch@xenomai.org>
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include <ctype.h>
25 #include <errno.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <limits.h>
31 #include <sched.h>
32 #include <signal.h>
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <sys/mman.h>
36 #include <semaphore.h>
37 #include <setjmp.h>
39 #include <getopt.h>
41 #include <xeno_config.h>
42 #include <asm/xenomai/fptest.h>
43 #include <asm-generic/stack.h>
44 #include <nucleus/trace.h>
45 #include <rtdm/rttesting.h>
47 #ifdef HAVE_RECENT_SETAFFINITY
48 #define do_sched_setaffinity(pid,len,mask) sched_setaffinity(pid,len,mask)
49 #else /* !HAVE_RECENT_SETAFFINITY */
50 #ifdef HAVE_OLD_SETAFFINITY
51 #define do_sched_setaffinity(pid,len,mask) sched_setaffinity(pid,mask)
52 #else /* !HAVE_OLD_SETAFFINITY */
53 #ifndef __cpu_set_t_defined
54 typedef unsigned long cpu_set_t;
55 #endif
56 #define do_sched_setaffinity(pid,len,mask) 0
57 #ifndef CPU_ZERO
58 #define CPU_ZERO(set) do { *(set) = 0; } while(0)
59 #define CPU_SET(n,set) do { *(set) |= (1 << n); } while(0)
60 #endif
61 #endif /* HAVE_OLD_SETAFFINITY */
62 #endif /* HAVE_RECENT_SETAFFINITY */
64 #if CONFIG_SMP
65 #define smp_sched_setaffinity(pid,len,mask) do_sched_setaffinity(pid,len,mask)
66 #else /* !CONFIG_SMP */
67 #define smp_sched_setaffinity(pid,len,mask) 0
68 #endif /* !CONFIG_SMP */
70 #define SMALL_STACK_MIN xeno_stacksize(32 * 1024)
71 #define LARGE_STACK_MIN xeno_stacksize(64 * 1024)
73 /* Thread type. */
74 typedef enum {
75 SLEEPER = 0,
76 RTK = 1, /* kernel-space thread. */
77 RTUP = 2, /* user-space real-time thread in primary mode. */
78 RTUS = 3, /* user-space real-time thread in secondary mode. */
79 RTUO = 4, /* user-space real-time thread oscillating
80 between primary and secondary mode. */
81 SWITCHER = 8,
82 FPU_STRESS = 16,
83 } threadtype;
85 typedef enum {
86 AFP = 1, /* arm the FPU task bit (only make sense for RTK) */
87 UFPP = 2, /* use the FPU while in primary mode. */
88 UFPS = 4 /* use the FPU while in secondary mode. */
89 } fpflags;
91 struct cpu_tasks;
93 struct task_params {
94 threadtype type;
95 fpflags fp;
96 pthread_t thread;
97 struct cpu_tasks *cpu;
98 struct rttst_swtest_task swt;
101 struct cpu_tasks {
102 unsigned index;
103 struct task_params *tasks;
104 unsigned tasks_count;
105 unsigned capacity;
106 unsigned fd;
107 unsigned long last_switches_count;
110 static sem_t sleeper_start;
111 static int quiet, status;
112 static struct timespec start;
113 static pthread_mutex_t headers_lock;
114 static unsigned long data_lines = 21;
115 static unsigned freeze_on_error;
117 static inline void clean_exit(int retval)
119 status = retval;
120 kill(getpid(), SIGTERM);
121 for (;;)
122 /* Wait for cancellation. */
123 __real_sem_wait(&sleeper_start);
126 static void timespec_substract(struct timespec *result,
127 const struct timespec *lhs,
128 const struct timespec *rhs)
130 result->tv_sec = lhs->tv_sec - rhs->tv_sec;
131 if (lhs->tv_nsec >= rhs->tv_nsec)
132 result->tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
133 else {
134 result->tv_sec -= 1;
135 result->tv_nsec = lhs->tv_nsec + (1000000000 - rhs->tv_nsec);
139 static char *task_name(char *buf, size_t sz,
140 struct cpu_tasks *cpu, unsigned task)
142 char *basename [] = {
143 [SLEEPER] = "sleeper",
144 [RTK] = "rtk",
145 [RTUP] = "rtup",
146 [RTUS] = "rtus",
147 [RTUO] = "rtuo",
148 [SWITCHER] = "switcher",
149 [FPU_STRESS] = "fpu_stress",
151 struct {
152 unsigned flag;
153 char *name;
154 } flags [] = {
155 { .flag = AFP, .name = "fp" },
156 { .flag = UFPP, .name = "ufpp" },
157 { .flag = UFPS, .name = "ufps" },
159 struct task_params *param;
160 unsigned pos, i;
162 if (task > cpu->tasks_count)
163 return "???";
165 if (task == cpu->tasks_count)
166 param = &cpu->tasks[task];
167 else
168 for (param = &cpu->tasks[0]; param->swt.index != task; param++)
171 pos = snprintf(buf, sz, "%s", basename[param->type]);
172 for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) {
173 if (!(param->fp & flags[i].flag))
174 continue;
176 pos += snprintf(&buf[pos],
177 sz - pos, "_%s", flags[i].name);
180 #ifdef CONFIG_SMP
181 pos += snprintf(&buf[pos], sz - pos, "%u", cpu->index);
182 #endif /* !CONFIG_SMP */
184 snprintf(&buf[pos], sz - pos, "-%u", param->swt.index);
186 return buf;
189 static void handle_bad_fpreg(struct cpu_tasks *cpu, unsigned fp_val)
191 struct rttst_swtest_error err;
192 unsigned from, to;
193 char buffer[64];
195 if (freeze_on_error)
196 xntrace_user_freeze(0, 0);
198 ioctl(cpu->fd, RTTST_RTIOC_SWTEST_GET_LAST_ERROR, &err);
200 if (fp_val == ~0)
201 fp_val = err.fp_val;
203 from = err.last_switch.from;
204 to = err.last_switch.to;
206 fprintf(stderr, "Error after context switch from task %d(%s) ",
207 from, task_name(buffer, sizeof(buffer), cpu, from));
208 fprintf(stderr, "to task %d(%s),\nFPU registers were set to %u ",
209 to, task_name(buffer, sizeof(buffer), cpu, to), fp_val);
210 fp_val %= 1000000;
211 if (fp_val < 500000)
212 fprintf(stderr, "(maybe task %s)\n",
213 task_name(buffer, sizeof(buffer), cpu, fp_val / 1000));
214 else {
215 fp_val -= 500000;
216 if (fp_val > cpu->tasks_count)
217 fprintf(stderr, "(unidentified task)\n");
218 else
219 fprintf(stderr, "(maybe task %s, having used fpu in "
220 "kernel-space)\n",
221 task_name(buffer, sizeof(buffer), cpu, fp_val / 1000));
224 clean_exit(EXIT_FAILURE);
227 void display_cleanup(void *cookie)
229 pthread_mutex_t *mutex = (pthread_mutex_t *) cookie;
230 __real_pthread_mutex_unlock(mutex);
233 void display_switches_count(struct cpu_tasks *cpu, struct timespec *now)
235 unsigned long switches_count;
236 static unsigned nlines = 0;
238 if (ioctl(cpu->fd,
239 RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT,&switches_count)) {
240 perror("sleeper: ioctl(RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT)");
241 clean_exit(EXIT_FAILURE);
244 if (switches_count &&
245 switches_count == cpu->last_switches_count) {
246 fprintf(stderr, "No context switches during one second, "
247 "aborting.\n");
248 clean_exit(EXIT_FAILURE);
251 if (quiet)
252 return;
254 pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
255 pthread_cleanup_push(display_cleanup, &headers_lock);
256 __real_pthread_mutex_lock(&headers_lock);
258 if (data_lines && (nlines++ % data_lines) == 0) {
259 struct timespec diff;
260 long dt;
262 timespec_substract(&diff, now, &start);
263 dt = diff.tv_sec;
265 printf("RTT| %.2ld:%.2ld:%.2ld\n",
266 dt / 3600, (dt / 60) % 60, dt % 60);
267 #ifdef CONFIG_SMP
268 printf("RTH|%12s|%12s|%12s\n",
269 "---------cpu","ctx switches","-------total");
270 #else /* !CONFIG_SMP */
271 printf("RTH|%12s|%12s\n", "ctx switches","-------total");
272 #endif /* !CONFIG_SMP */
275 #ifdef CONFIG_SMP
276 printf("RTD|%12u|%12lu|%12lu\n", cpu->index,
277 switches_count - cpu->last_switches_count, switches_count);
278 #else /* !CONFIG_SMP */
279 printf("RTD|%12lu|%12lu\n",
280 switches_count - cpu->last_switches_count, switches_count);
281 #endif /* !CONFIG_SMP */
283 pthread_cleanup_pop(1);
284 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
286 cpu->last_switches_count = switches_count;
289 static void *sleeper_switcher(void *cookie)
291 struct task_params *param = (struct task_params *) cookie;
292 unsigned to, tasks_count = param->cpu->tasks_count;
293 struct timespec ts, last;
294 int fd = param->cpu->fd;
295 struct rttst_swtest_dir rtsw;
296 cpu_set_t cpu_set;
297 unsigned i = 1; /* Start at 1 to avoid returning to a
298 non-existing task. */
299 int ret;
301 CPU_ZERO(&cpu_set);
302 CPU_SET(param->cpu->index, &cpu_set);
303 if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
304 perror("sleeper: sched_setaffinity");
305 clean_exit(EXIT_FAILURE);
308 rtsw.from = param->swt.index;
309 to = param->swt.index;
311 ts.tv_sec = 0;
312 ts.tv_nsec = 1000000;
314 ret = __real_sem_wait(&sleeper_start);
315 if (ret) {
316 fprintf(stderr, "sem_wait FAILED (%d)\n", errno);
317 fflush(stderr);
318 exit(77);
321 clock_gettime(CLOCK_REALTIME, &last);
323 /* ioctl is not a cancellation point, but we want cancellation to be
324 allowed when suspended in ioctl. */
325 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
327 for (;;) {
328 struct timespec now, diff;
329 unsigned expected, fp_val;
330 int err;
331 if (param->type == SLEEPER)
332 __real_nanosleep(&ts, NULL);
334 clock_gettime(CLOCK_REALTIME, &now);
336 timespec_substract(&diff, &now, &last);
337 if (diff.tv_sec >= 1) {
338 last = now;
340 display_switches_count(param->cpu, &now);
343 if (tasks_count == 1)
344 continue;
346 switch (i % 3) {
347 case 0:
348 /* to == from means "return to last task" */
349 rtsw.to = rtsw.from;
350 break;
352 case 1:
353 if (++to == rtsw.from)
354 ++to;
355 if (to > tasks_count - 1)
356 to = 0;
357 if (to == rtsw.from)
358 ++to;
359 rtsw.to = to;
361 /* If i % 3 == 2, repeat the same switch. */
364 expected = rtsw.from * 1000 + i * 1000000;
365 if (param->fp & UFPS)
366 fp_regs_set(expected);
367 err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
368 while (err == -1 && errno == EINTR)
369 err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
371 switch (err) {
372 case 0:
373 break;
374 case 1:
375 handle_bad_fpreg(param->cpu, ~0);
376 case -1:
377 clean_exit(EXIT_FAILURE);
379 if (param->fp & UFPS) {
380 fp_val = fp_regs_check(expected);
381 if (fp_val != expected)
382 handle_bad_fpreg(param->cpu, fp_val);
385 if(++i == 4000000000U)
386 i = 0;
389 return NULL;
393 static double dot(volatile double *a, volatile double *b, int n)
395 int k = n - 1;
396 double s = 0.0;
397 for(; k >= 0; k--)
398 s = s + a[k]*b[k];
400 return s;
403 static void *fpu_stress(void *cookie)
405 static volatile double a[10000], b[sizeof(a)/sizeof(a[0])];
406 struct task_params *param = (struct task_params *) cookie;
407 cpu_set_t cpu_set;
408 unsigned i;
410 CPU_ZERO(&cpu_set);
411 CPU_SET(param->cpu->index, &cpu_set);
412 if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
413 perror("sleeper: sched_setaffinity");
414 clean_exit(EXIT_FAILURE);
417 for (i = 0; i < sizeof(a)/sizeof(a[0]); i++)
418 a[i] = b[i] = 3.14;
420 for (;;) {
421 double s = dot(a, b, sizeof(a)/sizeof(a[0]));
422 if ((unsigned) (s + 0.5) != 98596) {
423 fprintf(stderr, "fpu stress task failure! dot: %g\n", s);
424 clean_exit(EXIT_FAILURE);
426 pthread_testcancel();
429 return NULL;
432 static void *rtup(void *cookie)
434 struct task_params *param = (struct task_params *) cookie;
435 unsigned to, tasks_count = param->cpu->tasks_count;
436 int err, fd = param->cpu->fd;
437 struct rttst_swtest_dir rtsw;
438 cpu_set_t cpu_set;
439 unsigned i = 0;
441 CPU_ZERO(&cpu_set);
442 CPU_SET(param->cpu->index, &cpu_set);
443 if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
444 perror("rtup: sched_setaffinity");
445 clean_exit(EXIT_FAILURE);
448 rtsw.from = param->swt.index;
449 to = param->swt.index;
451 /* ioctl is not a cancellation point, but we want cancellation to be
452 allowed when suspended in ioctl. */
453 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
455 if ((err = pthread_set_mode_np(0, PTHREAD_PRIMARY))) {
456 fprintf(stderr,
457 "rtup: pthread_set_mode_np: %s\n",
458 strerror(err));
459 clean_exit(EXIT_FAILURE);
462 do {
463 err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
464 } while (err == -1 && errno == EINTR);
466 if (err == -1)
467 return NULL;
469 for (;;) {
470 unsigned expected, fp_val;
472 switch (i % 3) {
473 case 0:
474 /* to == from means "return to last task" */
475 rtsw.to = rtsw.from;
476 break;
478 case 1:
479 if (++to == rtsw.from)
480 ++to;
481 if (to > tasks_count - 1)
482 to = 0;
483 if (to == rtsw.from)
484 ++to;
485 rtsw.to = to;
487 /* If i % 3 == 2, repeat the same switch. */
490 expected = rtsw.from * 1000 + i * 1000000;
491 if (param->fp & UFPP)
492 fp_regs_set(expected);
493 err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
494 while (err == -1 && errno == EINTR)
495 err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
497 switch (err) {
498 case 0:
499 break;
500 case 1:
501 handle_bad_fpreg(param->cpu, ~0);
502 case -1:
503 clean_exit(EXIT_FAILURE);
505 if (param->fp & UFPP) {
506 fp_val = fp_regs_check(expected);
507 if (fp_val != expected)
508 handle_bad_fpreg(param->cpu, fp_val);
511 if(++i == 4000000000U)
512 i = 0;
515 return NULL;
518 static void *rtus(void *cookie)
520 struct task_params *param = (struct task_params *) cookie;
521 unsigned to, tasks_count = param->cpu->tasks_count;
522 int err, fd = param->cpu->fd;
523 struct rttst_swtest_dir rtsw;
524 cpu_set_t cpu_set;
525 unsigned i = 0;
527 CPU_ZERO(&cpu_set);
528 CPU_SET(param->cpu->index, &cpu_set);
529 if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
530 perror("rtus: sched_setaffinity");
531 clean_exit(EXIT_FAILURE);
534 rtsw.from = param->swt.index;
535 to = param->swt.index;
537 /* ioctl is not a cancellation point, but we want cancellation to be
538 allowed when suspended in ioctl. */
539 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
541 if ((err = pthread_set_mode_np(PTHREAD_PRIMARY, 0))) {
542 fprintf(stderr,
543 "rtus: pthread_set_mode_np: %s\n",
544 strerror(err));
545 clean_exit(EXIT_FAILURE);
548 do {
549 err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
550 } while (err == -1 && errno == EINTR);
552 if (err == -1)
553 return NULL;
555 for (;;) {
556 unsigned expected, fp_val;
558 switch (i % 3) {
559 case 0:
560 /* to == from means "return to last task" */
561 rtsw.to = rtsw.from;
562 break;
564 case 1:
565 if (++to == rtsw.from)
566 ++to;
567 if (to > tasks_count - 1)
568 to = 0;
569 if (to == rtsw.from)
570 ++to;
571 rtsw.to = to;
573 /* If i % 3 == 2, repeat the same switch. */
576 expected = rtsw.from * 1000 + i * 1000000;
577 if (param->fp & UFPS)
578 fp_regs_set(expected);
579 err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
580 while (err == -1 && errno == EINTR)
581 err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
583 switch (err) {
584 case 0:
585 break;
586 case 1:
587 handle_bad_fpreg(param->cpu, ~0);
588 case -1:
589 clean_exit(EXIT_FAILURE);
591 if (param->fp & UFPS) {
592 fp_val = fp_regs_check(expected);
593 if (fp_val != expected)
594 handle_bad_fpreg(param->cpu, fp_val);
597 if(++i == 4000000000U)
598 i = 0;
601 return NULL;
604 static void *rtuo(void *cookie)
606 struct task_params *param = (struct task_params *) cookie;
607 unsigned mode, to, tasks_count = param->cpu->tasks_count;
608 int err, fd = param->cpu->fd;
609 struct rttst_swtest_dir rtsw;
610 cpu_set_t cpu_set;
611 unsigned i = 0;
613 CPU_ZERO(&cpu_set);
614 CPU_SET(param->cpu->index, &cpu_set);
615 if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
616 perror("rtuo: sched_setaffinity");
617 clean_exit(EXIT_FAILURE);
620 rtsw.from = param->swt.index;
621 to = param->swt.index;
623 /* ioctl is not a cancellation point, but we want cancellation to be
624 allowed when suspended in ioctl. */
625 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
627 if ((err = pthread_set_mode_np(0, PTHREAD_PRIMARY))) {
628 fprintf(stderr,
629 "rtup: pthread_set_mode_np: %s\n",
630 strerror(err));
631 clean_exit(EXIT_FAILURE);
633 do {
634 err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
635 } while (err == -1 && errno == EINTR);
637 if (err == -1)
638 return NULL;
640 mode = PTHREAD_PRIMARY;
641 for (;;) {
642 unsigned expected, fp_val;
644 switch (i % 3) {
645 case 0:
646 /* to == from means "return to last task" */
647 rtsw.to = rtsw.from;
648 break;
650 case 1:
651 if (++to == rtsw.from)
652 ++to;
653 if (to > tasks_count - 1)
654 to = 0;
655 if (to == rtsw.from)
656 ++to;
657 rtsw.to = to;
659 /* If i % 3 == 2, repeat the same switch. */
662 expected = rtsw.from * 1000 + i * 1000000;
663 if ((mode && param->fp & UFPP) || (!mode && param->fp & UFPS))
664 fp_regs_set(expected);
665 err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
666 while (err == -1 && errno == EINTR)
667 err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
669 switch (err) {
670 case 0:
671 break;
672 case 1:
673 handle_bad_fpreg(param->cpu, ~0);
674 case -1:
675 clean_exit(EXIT_FAILURE);
678 if ((mode && param->fp & UFPP) || (!mode && param->fp & UFPS)) {
679 fp_val = fp_regs_check(expected);
680 if (fp_val != expected)
681 handle_bad_fpreg(param->cpu, fp_val);
684 /* Switch mode. */
685 if (i % 3 == 2) {
686 unsigned next_mode;
688 next_mode = PTHREAD_PRIMARY - mode;
690 expected += 128;
692 if ((mode && param->fp & UFPP) ||
693 (!mode && param->fp & UFPS))
694 fp_regs_set(expected);
696 if ((err = pthread_set_mode_np(mode, next_mode))) {
697 fprintf(stderr,
698 "rtuo: pthread_set_mode_np: %s\n",
699 strerror(err));
700 clean_exit(EXIT_FAILURE);
703 if ((mode && param->fp & UFPP) ||
704 (!mode && param->fp & UFPS)) {
705 fp_val = fp_regs_check(expected);
706 if (fp_val != expected)
707 handle_bad_fpreg(param->cpu, fp_val);
710 mode = next_mode;
713 if(++i == 4000000000U)
714 i = 0;
717 return NULL;
720 static int parse_arg(struct task_params *param,
721 const char *text,
722 struct cpu_tasks *cpus)
724 struct t2f {
725 const char *text;
726 unsigned flag;
729 static struct t2f type2flags [] = {
730 { "rtk", RTK },
731 { "rtup", RTUP },
732 { "rtus", RTUS },
733 { "rtuo", RTUO }
736 static struct t2f fp2flags [] = {
737 { "_fp", AFP },
738 { "_ufpp", UFPP },
739 { "_ufps", UFPS }
742 unsigned long cpu;
743 char *cpu_end;
744 unsigned i;
746 param->type = param->fp = 0;
747 param->cpu = &cpus[0];
749 for(i = 0; i < sizeof(type2flags)/sizeof(struct t2f); i++) {
750 size_t len = strlen(type2flags[i].text);
752 if(!strncmp(text, type2flags[i].text, len)) {
753 param->type = type2flags[i].flag;
754 text += len;
755 goto fpflags;
759 return -1;
761 fpflags:
762 if (*text == '\0')
763 return 0;
765 if (isdigit(*text))
766 goto cpu_nr;
768 for(i = 0; i < sizeof(fp2flags)/sizeof(struct t2f); i++) {
769 size_t len = strlen(fp2flags[i].text);
771 if(!strncmp(text, fp2flags[i].text, len)) {
772 param->fp |= fp2flags[i].flag;
773 text += len;
775 goto fpflags;
779 return -1;
781 cpu_nr:
782 cpu = strtoul(text, &cpu_end, 0);
784 if (*cpu_end != '\0' || (cpu == ULONG_MAX && errno))
785 return -1;
787 param->cpu = &cpus[cpu];
788 return 0;
791 static int check_arg(const struct task_params *param, struct cpu_tasks *end_cpu)
793 if (param->cpu > end_cpu - 1)
794 return 0;
796 switch (param->type) {
797 case SLEEPER:
798 case SWITCHER:
799 case FPU_STRESS:
800 break;
802 case RTK:
803 if (param->fp & UFPS)
804 return 0;
805 break;
807 case RTUP:
808 if (param->fp & (AFP|UFPS))
809 return 0;
810 break;
812 case RTUS:
813 if (param->fp & (AFP|UFPP))
814 return 0;
815 break;
817 case RTUO:
818 if (param->fp & AFP)
819 return 0;
820 break;
821 default:
822 return 0;
825 return 1;
828 static int task_create(struct cpu_tasks *cpu,
829 struct task_params *param,
830 pthread_attr_t *rt_attr)
832 char buffer[64];
833 typedef void *thread_routine(void *);
834 thread_routine *task_routine [] = {
835 [RTUP] = &rtup,
836 [RTUS] = &rtus,
837 [RTUO] = &rtuo
839 int err;
841 switch(param->type) {
842 case RTK:
843 param->swt.flags = (param->fp & AFP ? RTTST_SWTEST_FPU : 0)
844 | (param->fp & UFPP ? RTTST_SWTEST_USE_FPU : 0)
845 | (freeze_on_error ? RTTST_SWTEST_FREEZE : 0);
847 err=ioctl(cpu->fd,RTTST_RTIOC_SWTEST_CREATE_KTASK,&param->swt);
848 if (err) {
849 perror("ioctl(RTTST_RTIOC_SWTEST_CREATE_KTASK)");
850 return -1;
852 break;
854 case RTUP:
855 case RTUS:
856 case RTUO:
857 case SLEEPER:
858 case SWITCHER:
859 param->swt.flags = 0;
861 err=ioctl(cpu->fd,RTTST_RTIOC_SWTEST_REGISTER_UTASK,&param->swt);
862 if (err) {
863 perror("ioctl(RTTST_RTIOC_SWTEST_REGISTER_UTASK)");
864 return -1;
866 break;
868 case FPU_STRESS:
869 break;
871 default:
872 fprintf(stderr, "Invalid task type %d. Aborting\n", param->type);
873 return EINVAL;
876 if (param->type == RTK)
877 return 0;
879 if (param->type == SLEEPER || param->type == SWITCHER) {
880 pthread_attr_t attr;
882 pthread_attr_init(&attr);
883 pthread_attr_setstacksize(&attr, SMALL_STACK_MIN);
885 err = __real_pthread_create(&param->thread,
886 &attr,
887 sleeper_switcher,
888 param);
890 pthread_attr_destroy(&attr);
892 if (err)
893 fprintf(stderr,"pthread_create: %s\n",strerror(err));
896 return err;
899 if (param->type == FPU_STRESS) {
900 pthread_attr_t attr;
902 pthread_attr_init(&attr);
903 pthread_attr_setstacksize(&attr, LARGE_STACK_MIN);
905 err = __real_pthread_create(&param->thread,
906 &attr,
907 fpu_stress,
908 param);
910 pthread_attr_destroy(&attr);
912 if (err)
913 fprintf(stderr,"pthread_create: %s\n",strerror(err));
916 return err;
919 err = pthread_create(&param->thread, rt_attr,
920 task_routine[param->type], param);
921 if (err) {
922 fprintf(stderr, "pthread_create: %s\n", strerror(err));
923 return err;
926 err = pthread_set_name_np(param->thread,
927 task_name(buffer, sizeof(buffer),
928 param->cpu,param->swt.index));
930 if (err)
931 fprintf(stderr,"pthread_set_name_np: %s\n", strerror(err));
933 return err;
936 #define DEV_NR_MAX 256
938 static int open_rttest(char *buf, size_t size, unsigned count)
940 static unsigned dev_nr = 0;
941 int fd, status;
943 do {
944 snprintf(buf, size, "/dev/rttest-switchtest%d", dev_nr);
946 status = fd = open(buf, O_RDWR);
948 if (fd == -1)
949 goto next_dev;
951 status = ioctl(fd, RTTST_RTIOC_SWTEST_SET_TASKS_COUNT, count);
953 if (status == 0)
954 break;
956 if (errno != ENOSYS && errno != ENOTTY) {
957 fprintf(stderr, "switchtest: open: %m\n");
958 return -1;
961 next_dev:
962 if (fd != -1)
963 close(fd);
965 if (++dev_nr != DEV_NR_MAX)
966 continue;
968 fprintf(stderr, "switchtest: Unable to open switchtest device.\n"
969 "(modprobe xeno_switchtest ?)\n");
971 return -1;
972 } while (status == -1);
974 return fd;
977 const char *all_nofp [] = {
978 "rtk",
979 "rtk",
980 "rtup",
981 "rtup",
982 "rtus",
983 "rtus",
984 "rtuo",
985 "rtuo",
988 const char *all_fp [] = {
989 "rtk",
990 "rtk",
991 "rtk_fp",
992 "rtk_fp",
993 "rtk_fp_ufpp",
994 "rtk_fp_ufpp",
995 "rtup",
996 "rtup",
997 "rtup_ufpp",
998 "rtup_ufpp",
999 "rtus",
1000 "rtus",
1001 "rtus_ufps",
1002 "rtus_ufps",
1003 "rtuo",
1004 "rtuo",
1005 "rtuo_ufpp",
1006 "rtuo_ufpp",
1007 "rtuo_ufps",
1008 "rtuo_ufps",
1009 "rtuo_ufpp_ufps",
1010 "rtuo_ufpp_ufps"
1013 unsigned long xatoul(const char *str)
1015 unsigned long result;
1016 char *endptr;
1018 result = strtoul(str, &endptr, 0);
1020 if (result == ULONG_MAX && errno == ERANGE) {
1021 fprintf(stderr, "Overflow while parsing %s\n", str);
1022 exit(EXIT_FAILURE);
1025 if (*endptr != '\0') {
1026 fprintf(stderr, "Error while parsing \"%s\" as a number\n", str);
1027 exit(EXIT_FAILURE);
1030 return result;
1033 void usage(FILE *fd, const char *progname)
1035 unsigned i, j, nr_cpus;
1037 #if CONFIG_SMP
1038 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
1039 #else /* !CONFIG_SMP */
1040 nr_cpus = 1;
1041 #endif /* !CONFIG_SMP */
1043 fprintf(fd,
1044 "Usage:\n"
1045 "%s [options] threadspec threadspec...\n"
1046 "Create threads of various types and attempt to switch context "
1047 "between these\nthreads, printing the count of context switches "
1048 "every second.\n\n"
1049 "Available options are:\n"
1050 "--help or -h, cause this program to print this help string and "
1051 "exit;\n"
1052 "--lines <lines> or -l <lines> print headers every <lines> "
1053 "lines.\n"
1054 "--quiet or -q, prevent this program from printing every "
1055 "second the count of\ncontext switches;\n"
1056 "--timeout <duration> or -T <duration>, limit the test duration "
1057 "to <duration>\nseconds;\n"
1058 "--nofpu or -n, disables any use of FPU instructions.\n"
1059 "--stress <period> or -s <period> enable a stress mode where:\n"
1060 " context switches occur every <period> us;\n"
1061 " a background task uses fpu (and check) fpu all the time.\n"
1062 "--freeze trace upon error.\n\n"
1063 "Each 'threadspec' specifies the characteristics of a "
1064 "thread to be created:\n"
1065 "threadspec = (rtk|rtup|rtus|rtuo)(_fp|_ufpp|_ufps)*[0-9]*\n"
1066 "rtk for a kernel-space real-time thread;\n"
1067 "rtup for a user-space real-time thread running in primary"
1068 " mode,\n"
1069 "rtus for a user-space real-time thread running in secondary"
1070 " mode,\n"
1071 "rtuo for a user-space real-time thread oscillating between"
1072 " primary and\nsecondary mode,\n\n"
1073 "_fp means that the created thread will have the XNFPU bit"
1074 " armed (only valid for\nrtk),\n"
1075 "_ufpp means that the created thread will use the FPU when in "
1076 "primary mode\n(invalid for rtus),\n"
1077 "_ufps means that the created thread will use the FPU when in "
1078 "secondary mode\n(invalid for rtk and rtup),\n\n"
1079 "[0-9]* specifies the ID of the CPU where the created thread "
1080 "will run, 0 if\nunspecified.\n\n"
1081 "Passing no 'threadspec' is equivalent to running:\n%s",
1082 progname, progname);
1084 for (i = 0; i < nr_cpus; i++)
1085 for (j = 0; j < sizeof(all_fp)/sizeof(char *); j++)
1086 fprintf(fd, " %s%d", all_fp[j], i);
1088 fprintf(fd,
1089 "\n\nPassing only the --nofpu or -n argument is equivalent to "
1090 "running:\n%s", progname);
1092 for (i = 0; i < nr_cpus; i++)
1093 for (j = 0; j < sizeof(all_nofp)/sizeof(char *); j++)
1094 fprintf(fd, " %s%d", all_nofp[j], i);
1095 fprintf(fd, "\n\n");
1098 sigjmp_buf jump;
1100 void illegal_instruction(int sig)
1102 signal(sig, SIG_DFL);
1103 siglongjmp(jump, 1);
1106 /* We run the FPU check in a thread to avoid clobbering the main thread FPU
1107 backup area. This is important on x86, where this results on all RT threads
1108 FPU backup areas to be clobbered, and thus their FPU context being switched
1109 systematically (and the case where FPU has never been used not to be tested). */
1110 void *check_fpu_thread(void *cookie)
1112 int check;
1114 /* Check if fp routines are dummy or if hw fpu is not supported. */
1115 fprintf(stderr, "== Testing FPU check routines...\n");
1116 if(sigsetjmp(jump, 1)) {
1117 fprintf(stderr, "== Hardware FPU not available on your board"
1118 " or not enabled in Linux kernel\n== configuration:"
1119 " skipping FPU switches tests.\n");
1120 return NULL;
1122 signal(SIGILL, illegal_instruction);
1123 fp_regs_set(1);
1124 check = fp_regs_check(2);
1125 signal(SIGILL, SIG_DFL);
1126 if (check != 1) {
1127 fprintf(stderr,
1128 "== FPU check routines: unimplemented, "
1129 "skipping FPU switches tests.\n");
1130 return NULL;
1133 fprintf(stderr, "== FPU check routines: OK.\n");
1134 return (void *) 1;
1137 int check_fpu(void)
1139 pthread_t tid;
1140 void *status;
1141 int err;
1143 err = __real_pthread_create(&tid, NULL, check_fpu_thread, NULL);
1144 if (err) {
1145 fprintf(stderr, "pthread_create: %s\n", strerror(err));
1146 exit(EXIT_FAILURE);
1149 err = pthread_join(tid, &status);
1150 if (err) {
1151 fprintf(stderr, "pthread_join: %s\n", strerror(err));
1152 exit(EXIT_FAILURE);
1155 return (long) status;
1158 int main(int argc, const char *argv[])
1160 unsigned i, j, nr_cpus, use_fp = 1, stress = 0;
1161 pthread_attr_t rt_attr;
1162 const char *progname = argv[0];
1163 struct cpu_tasks *cpus;
1164 struct sched_param sp;
1165 char devname[RTDM_MAX_DEVNAME_LEN+1];
1166 sigset_t mask;
1167 int sig;
1169 status = EXIT_SUCCESS;
1171 /* Initializations. */
1172 if (mlockall(MCL_CURRENT|MCL_FUTURE)) {
1173 perror("mlockall");
1174 exit(EXIT_FAILURE);
1177 if (__real_sem_init(&sleeper_start, 0, 0)) {
1178 perror("sem_init");
1179 exit(EXIT_FAILURE);
1182 #if CONFIG_SMP
1183 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
1184 #else /* !CONFIG_SMP */
1185 nr_cpus = 1;
1186 #endif /* !CONFIG_SMP */
1188 if (nr_cpus == -1) {
1189 fprintf(stderr,
1190 "Error %d while getting the number of cpus (%s)\n",
1191 errno,
1192 strerror(errno));
1193 exit(EXIT_FAILURE);
1196 fp_features_init();
1198 /* Parse command line options. */
1199 opterr = 0;
1200 for (;;) {
1201 static struct option long_options[] = {
1202 { "freeze", 0, NULL, 'f' },
1203 { "help", 0, NULL, 'h' },
1204 { "lines", 1, NULL, 'l' },
1205 { "nofpu", 0, NULL, 'n' },
1206 { "quiet", 0, NULL, 'q' },
1207 { "stress", 1, NULL, 's' },
1208 { "timeout", 1, NULL, 'T' },
1209 { NULL, 0, NULL, 0 }
1211 int i = 0;
1212 int c = getopt_long(argc, (char *const *) argv, "fhl:nqs:T:",
1213 long_options, &i);
1215 if (c == -1)
1216 break;
1218 switch(c) {
1219 case 'f':
1220 freeze_on_error = 1;
1221 break;
1223 case 'h':
1224 usage(stdout, progname);
1225 exit(EXIT_SUCCESS);
1227 case 'l':
1228 data_lines = xatoul(optarg);
1229 break;
1231 case 'n':
1232 use_fp = 0;
1233 break;
1235 case 'q':
1236 quiet = 1;
1237 break;
1239 case 's':
1240 stress = xatoul(optarg);
1241 break;
1243 case 'T':
1244 alarm(xatoul(optarg));
1245 break;
1247 case '?':
1248 usage(stderr, progname);
1249 fprintf(stderr, "%s: Invalid option.\n", argv[optind-1]);
1250 exit(EXIT_FAILURE);
1252 case ':':
1253 usage(stderr, progname);
1254 fprintf(stderr, "Missing argument of option %s.\n",
1255 argv[optind-1]);
1256 exit(EXIT_FAILURE);
1260 if (setvbuf(stdout, NULL, _IOLBF, 0)) {
1261 perror("setvbuf");
1262 exit(EXIT_FAILURE);
1265 /* If no argument was passed (or only -n), replace argc and argv with
1266 default values, given by all_fp or all_nofp depending on the presence
1267 of the -n flag. */
1268 if (optind == argc) {
1269 const char **all;
1270 char buffer[32];
1271 unsigned count;
1273 if (use_fp)
1274 use_fp = check_fpu();
1276 if (use_fp) {
1277 all = all_fp;
1278 count = sizeof(all_fp)/sizeof(char *);
1279 } else {
1280 all = all_nofp;
1281 count = sizeof(all_nofp)/sizeof(char *);
1284 argc = count * nr_cpus + 1;
1285 argv = (const char **) malloc(argc * sizeof(char *));
1286 argv[0] = progname;
1287 for (i = 0; i < nr_cpus; i++)
1288 for (j = 0; j < count; j++) {
1289 snprintf(buffer,
1290 sizeof(buffer),
1291 "%s%d",
1292 all[j],
1294 argv[i * count + j + 1] = strdup(buffer);
1297 optind = 1;
1300 cpus = (struct cpu_tasks *) malloc(sizeof(*cpus) * nr_cpus);
1301 if (!cpus) {
1302 perror("malloc");
1303 exit(EXIT_FAILURE);
1306 for (i = 0; i < nr_cpus; i++) {
1307 size_t size;
1308 cpus[i].fd = -1;
1309 cpus[i].index = i;
1310 cpus[i].capacity = 2;
1311 size = cpus[i].capacity * sizeof(struct task_params);
1312 cpus[i].tasks_count = 1;
1313 cpus[i].tasks = (struct task_params *) malloc(size);
1314 cpus[i].last_switches_count = 0;
1316 if (!cpus[i].tasks) {
1317 perror("malloc");
1318 exit(EXIT_FAILURE);
1321 cpus[i].tasks[0].type = stress ? SWITCHER : SLEEPER;
1322 cpus[i].tasks[0].fp = use_fp ? UFPS : 0;
1323 cpus[i].tasks[0].cpu = &cpus[i];
1324 cpus[i].tasks[0].thread = 0;
1325 cpus[i].tasks[0].swt.index = cpus[i].tasks[0].swt.flags = 0;
1329 /* Parse arguments and build data structures. */
1330 for(i = optind; i < argc; i++) {
1331 struct task_params params;
1332 struct cpu_tasks *cpu;
1334 if(parse_arg(&params, argv[i], cpus)) {
1335 usage(stderr, progname);
1336 fprintf(stderr, "Unable to parse %s as a thread type. "
1337 "Aborting.\n", argv[i]);
1338 exit(EXIT_FAILURE);
1341 if (!check_arg(&params, &cpus[nr_cpus])) {
1342 usage(stderr, progname);
1343 fprintf(stderr,
1344 "Invalid parameters %s. Aborting\n",
1345 argv[i]);
1346 exit(EXIT_FAILURE);
1349 if (!use_fp && params.fp) {
1350 usage(stderr, progname);
1351 fprintf(stderr,
1352 "%s is invalid because FPU is disabled"
1353 " (option -n passed).\n", argv[i]);
1354 exit(EXIT_FAILURE);
1357 cpu = params.cpu;
1358 if(++cpu->tasks_count > cpu->capacity) {
1359 size_t size;
1360 cpu->capacity += cpu->capacity / 2;
1361 size = cpu->capacity * sizeof(struct task_params);
1362 cpu->tasks =
1363 (struct task_params *) realloc(cpu->tasks, size);
1364 if (!cpu->tasks) {
1365 perror("realloc");
1366 exit(EXIT_FAILURE);
1370 params.thread = 0;
1371 params.swt.index = params.swt.flags = 0;
1372 cpu->tasks[cpu->tasks_count - 1] = params;
1375 if (stress)
1376 for (i = 0; i < nr_cpus; i++) {
1377 struct task_params params;
1378 struct cpu_tasks *cpu = &cpus[i];
1380 if(cpu->tasks_count + 1> cpu->capacity) {
1381 size_t size;
1382 cpu->capacity += cpu->capacity / 2;
1383 size = cpu->capacity * sizeof(struct task_params);
1384 cpu->tasks =
1385 (struct task_params *) realloc(cpu->tasks, size);
1386 if (!cpu->tasks) {
1387 perror("realloc");
1388 exit(EXIT_FAILURE);
1392 params.type = FPU_STRESS;
1393 params.fp = UFPS;
1394 params.cpu = cpu;
1395 params.thread = 0;
1396 params.swt.index = cpu->tasks_count;
1397 params.swt.flags = 0;
1398 cpu->tasks[cpu->tasks_count] = params;
1401 /* For best compatibility with both LinuxThreads and NPTL, block the
1402 termination signals on all threads. */
1403 sigemptyset(&mask);
1404 sigaddset(&mask, SIGINT);
1405 sigaddset(&mask, SIGTERM);
1406 sigaddset(&mask, SIGALRM);
1407 pthread_sigmask(SIG_BLOCK, &mask, NULL);
1409 __real_pthread_mutex_init(&headers_lock, NULL);
1411 /* Prepare attributes for real-time tasks. */
1412 pthread_attr_init(&rt_attr);
1413 pthread_attr_setinheritsched(&rt_attr, PTHREAD_EXPLICIT_SCHED);
1414 pthread_attr_setschedpolicy(&rt_attr, SCHED_FIFO);
1415 sp.sched_priority = 1;
1416 pthread_attr_setschedparam(&rt_attr, &sp);
1417 pthread_attr_setstacksize(&rt_attr, SMALL_STACK_MIN);
1419 printf("== Threads:");
1420 /* Create and register all tasks. */
1421 for (i = 0; i < nr_cpus; i ++) {
1422 struct cpu_tasks *cpu = &cpus[i];
1423 char buffer[64];
1425 cpu->fd = open_rttest(devname,sizeof(devname),cpu->tasks_count);
1427 if (cpu->fd == -1)
1428 goto failure;
1430 if (ioctl(cpu->fd, RTTST_RTIOC_SWTEST_SET_CPU, i)) {
1431 perror("ioctl(RTTST_RTIOC_SWTEST_SET_CPU)");
1432 goto failure;
1435 if (stress &&
1436 ioctl(cpu->fd, RTTST_RTIOC_SWTEST_SET_PAUSE, stress)) {
1437 perror("ioctl(RTTST_RTIOC_SWTEST_SET_PAUSE)");
1438 goto failure;
1441 for (j = 0; j < cpu->tasks_count + !!stress; j++) {
1442 struct task_params *param = &cpu->tasks[j];
1443 if (task_create(cpu, param, &rt_attr)) {
1444 failure:
1445 status = EXIT_FAILURE;
1446 goto cleanup;
1448 printf(" %s",
1449 task_name(buffer, sizeof(buffer),
1450 param->cpu, param->swt.index));
1453 printf("\n");
1455 clock_gettime(CLOCK_REALTIME, &start);
1457 /* Start the sleeper tasks. */
1458 for (i = 0; i < nr_cpus; i ++)
1459 __real_sem_post(&sleeper_start);
1461 /* Wait for interruption. */
1462 sigwait(&mask, &sig);
1464 /* Allow a second Ctrl-C in case of lockup. */
1465 pthread_sigmask(SIG_UNBLOCK, &mask, NULL);
1467 /* Cleanup. */
1468 cleanup:
1469 for (i = 0; i < nr_cpus; i ++) {
1470 struct cpu_tasks *cpu = &cpus[i];
1472 /* kill the user-space tasks. */
1473 for (j = 0; j < cpu->tasks_count + !!stress; j++) {
1474 struct task_params *param = &cpu->tasks[j];
1476 if (param->type != RTK && param->thread)
1477 pthread_cancel(param->thread);
1481 for (i = 0; i < nr_cpus; i ++) {
1482 struct cpu_tasks *cpu = &cpus[i];
1484 /* join the user-space tasks. */
1485 for (j = 0; j < cpu->tasks_count + !!stress; j++) {
1486 struct task_params *param = &cpu->tasks[j];
1488 if (param->type != RTK && param->thread)
1489 pthread_join(param->thread, NULL);
1492 if (cpus[i].fd != -1) {
1493 struct timespec now;
1495 clock_gettime(CLOCK_REALTIME, &now);
1497 quiet = 0;
1498 display_switches_count(&cpus[i], &now);
1500 /* Kill the kernel-space tasks. */
1501 close(cpus[i].fd);
1503 free(cpu->tasks);
1505 free(cpus);
1506 __real_sem_destroy(&sleeper_start);
1507 __real_pthread_mutex_destroy(&headers_lock);
1509 return status;