[NETFILTER]: nf_conntrack: add helper function for expectation initialization
[hh.org.git] / kernel / rtmutex-tester.c
blob6dcea9dd8c94a23ffa4fcb1dd21ea6ba685692bf
1 /*
2 * RT-Mutex-tester: scriptable tester for rt mutexes
4 * started by Thomas Gleixner:
6 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
8 */
9 #include <linux/kthread.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/smp_lock.h>
13 #include <linux/spinlock.h>
14 #include <linux/sysdev.h>
15 #include <linux/timer.h>
17 #include "rtmutex.h"
19 #define MAX_RT_TEST_THREADS 8
20 #define MAX_RT_TEST_MUTEXES 8
22 static spinlock_t rttest_lock;
23 static atomic_t rttest_event;
25 struct test_thread_data {
26 int opcode;
27 int opdata;
28 int mutexes[MAX_RT_TEST_MUTEXES];
29 int bkl;
30 int event;
31 struct sys_device sysdev;
34 static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
35 static struct task_struct *threads[MAX_RT_TEST_THREADS];
36 static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
38 enum test_opcodes {
39 RTTEST_NOP = 0,
40 RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
41 RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
42 RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
43 RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
44 RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
45 RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
46 RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
47 RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
48 RTTEST_LOCKBKL, /* 9 Lock BKL */
49 RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
50 RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
51 RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
52 RTTEST_RESET = 99, /* 99 Reset all pending operations */
55 static int handle_op(struct test_thread_data *td, int lockwakeup)
57 int i, id, ret = -EINVAL;
59 switch(td->opcode) {
61 case RTTEST_NOP:
62 return 0;
64 case RTTEST_LOCKCONT:
65 td->mutexes[td->opdata] = 1;
66 td->event = atomic_add_return(1, &rttest_event);
67 return 0;
69 case RTTEST_RESET:
70 for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
71 if (td->mutexes[i] == 4) {
72 rt_mutex_unlock(&mutexes[i]);
73 td->mutexes[i] = 0;
77 if (!lockwakeup && td->bkl == 4) {
78 unlock_kernel();
79 td->bkl = 0;
81 return 0;
83 case RTTEST_RESETEVENT:
84 atomic_set(&rttest_event, 0);
85 return 0;
87 default:
88 if (lockwakeup)
89 return ret;
92 switch(td->opcode) {
94 case RTTEST_LOCK:
95 case RTTEST_LOCKNOWAIT:
96 id = td->opdata;
97 if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
98 return ret;
100 td->mutexes[id] = 1;
101 td->event = atomic_add_return(1, &rttest_event);
102 rt_mutex_lock(&mutexes[id]);
103 td->event = atomic_add_return(1, &rttest_event);
104 td->mutexes[id] = 4;
105 return 0;
107 case RTTEST_LOCKINT:
108 case RTTEST_LOCKINTNOWAIT:
109 id = td->opdata;
110 if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
111 return ret;
113 td->mutexes[id] = 1;
114 td->event = atomic_add_return(1, &rttest_event);
115 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
116 td->event = atomic_add_return(1, &rttest_event);
117 td->mutexes[id] = ret ? 0 : 4;
118 return ret ? -EINTR : 0;
120 case RTTEST_UNLOCK:
121 id = td->opdata;
122 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
123 return ret;
125 td->event = atomic_add_return(1, &rttest_event);
126 rt_mutex_unlock(&mutexes[id]);
127 td->event = atomic_add_return(1, &rttest_event);
128 td->mutexes[id] = 0;
129 return 0;
131 case RTTEST_LOCKBKL:
132 if (td->bkl)
133 return 0;
134 td->bkl = 1;
135 lock_kernel();
136 td->bkl = 4;
137 return 0;
139 case RTTEST_UNLOCKBKL:
140 if (td->bkl != 4)
141 break;
142 unlock_kernel();
143 td->bkl = 0;
144 return 0;
146 default:
147 break;
149 return ret;
153 * Schedule replacement for rtsem_down(). Only called for threads with
154 * PF_MUTEX_TESTER set.
156 * This allows us to have finegrained control over the event flow.
159 void schedule_rt_mutex_test(struct rt_mutex *mutex)
161 int tid, op, dat;
162 struct test_thread_data *td;
164 /* We have to lookup the task */
165 for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
166 if (threads[tid] == current)
167 break;
170 BUG_ON(tid == MAX_RT_TEST_THREADS);
172 td = &thread_data[tid];
174 op = td->opcode;
175 dat = td->opdata;
177 switch (op) {
178 case RTTEST_LOCK:
179 case RTTEST_LOCKINT:
180 case RTTEST_LOCKNOWAIT:
181 case RTTEST_LOCKINTNOWAIT:
182 if (mutex != &mutexes[dat])
183 break;
185 if (td->mutexes[dat] != 1)
186 break;
188 td->mutexes[dat] = 2;
189 td->event = atomic_add_return(1, &rttest_event);
190 break;
192 case RTTEST_LOCKBKL:
193 default:
194 break;
197 schedule();
200 switch (op) {
201 case RTTEST_LOCK:
202 case RTTEST_LOCKINT:
203 if (mutex != &mutexes[dat])
204 return;
206 if (td->mutexes[dat] != 2)
207 return;
209 td->mutexes[dat] = 3;
210 td->event = atomic_add_return(1, &rttest_event);
211 break;
213 case RTTEST_LOCKNOWAIT:
214 case RTTEST_LOCKINTNOWAIT:
215 if (mutex != &mutexes[dat])
216 return;
218 if (td->mutexes[dat] != 2)
219 return;
221 td->mutexes[dat] = 1;
222 td->event = atomic_add_return(1, &rttest_event);
223 return;
225 case RTTEST_LOCKBKL:
226 return;
227 default:
228 return;
231 td->opcode = 0;
233 for (;;) {
234 set_current_state(TASK_INTERRUPTIBLE);
236 if (td->opcode > 0) {
237 int ret;
239 set_current_state(TASK_RUNNING);
240 ret = handle_op(td, 1);
241 set_current_state(TASK_INTERRUPTIBLE);
242 if (td->opcode == RTTEST_LOCKCONT)
243 break;
244 td->opcode = ret;
247 /* Wait for the next command to be executed */
248 schedule();
251 /* Restore previous command and data */
252 td->opcode = op;
253 td->opdata = dat;
256 static int test_func(void *data)
258 struct test_thread_data *td = data;
259 int ret;
261 current->flags |= PF_MUTEX_TESTER;
262 allow_signal(SIGHUP);
264 for(;;) {
266 set_current_state(TASK_INTERRUPTIBLE);
268 if (td->opcode > 0) {
269 set_current_state(TASK_RUNNING);
270 ret = handle_op(td, 0);
271 set_current_state(TASK_INTERRUPTIBLE);
272 td->opcode = ret;
275 /* Wait for the next command to be executed */
276 schedule();
277 try_to_freeze();
279 if (signal_pending(current))
280 flush_signals(current);
282 if(kthread_should_stop())
283 break;
285 return 0;
289 * sysfs_test_command - interface for test commands
290 * @dev: thread reference
291 * @buf: command for actual step
292 * @count: length of buffer
294 * command syntax:
296 * opcode:data
298 static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
299 size_t count)
301 struct sched_param schedpar;
302 struct test_thread_data *td;
303 char cmdbuf[32];
304 int op, dat, tid, ret;
306 td = container_of(dev, struct test_thread_data, sysdev);
307 tid = td->sysdev.id;
309 /* strings from sysfs write are not 0 terminated! */
310 if (count >= sizeof(cmdbuf))
311 return -EINVAL;
313 /* strip of \n: */
314 if (buf[count-1] == '\n')
315 count--;
316 if (count < 1)
317 return -EINVAL;
319 memcpy(cmdbuf, buf, count);
320 cmdbuf[count] = 0;
322 if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
323 return -EINVAL;
325 switch (op) {
326 case RTTEST_SCHEDOT:
327 schedpar.sched_priority = 0;
328 ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
329 if (ret)
330 return ret;
331 set_user_nice(current, 0);
332 break;
334 case RTTEST_SCHEDRT:
335 schedpar.sched_priority = dat;
336 ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
337 if (ret)
338 return ret;
339 break;
341 case RTTEST_SIGNAL:
342 send_sig(SIGHUP, threads[tid], 0);
343 break;
345 default:
346 if (td->opcode > 0)
347 return -EBUSY;
348 td->opdata = dat;
349 td->opcode = op;
350 wake_up_process(threads[tid]);
353 return count;
357 * sysfs_test_status - sysfs interface for rt tester
358 * @dev: thread to query
359 * @buf: char buffer to be filled with thread status info
361 static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
363 struct test_thread_data *td;
364 struct task_struct *tsk;
365 char *curr = buf;
366 int i;
368 td = container_of(dev, struct test_thread_data, sysdev);
369 tsk = threads[td->sysdev.id];
371 spin_lock(&rttest_lock);
373 curr += sprintf(curr,
374 "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
375 td->opcode, td->event, tsk->state,
376 (MAX_RT_PRIO - 1) - tsk->prio,
377 (MAX_RT_PRIO - 1) - tsk->normal_prio,
378 tsk->pi_blocked_on, td->bkl);
380 for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
381 curr += sprintf(curr, "%d", td->mutexes[i]);
383 spin_unlock(&rttest_lock);
385 curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
386 mutexes[td->sysdev.id].owner);
388 return curr - buf;
391 static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
392 static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
394 static struct sysdev_class rttest_sysclass = {
395 set_kset_name("rttest"),
398 static int init_test_thread(int id)
400 thread_data[id].sysdev.cls = &rttest_sysclass;
401 thread_data[id].sysdev.id = id;
403 threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
404 if (IS_ERR(threads[id]))
405 return PTR_ERR(threads[id]);
407 return sysdev_register(&thread_data[id].sysdev);
410 static int init_rttest(void)
412 int ret, i;
414 spin_lock_init(&rttest_lock);
416 for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
417 rt_mutex_init(&mutexes[i]);
419 ret = sysdev_class_register(&rttest_sysclass);
420 if (ret)
421 return ret;
423 for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
424 ret = init_test_thread(i);
425 if (ret)
426 break;
427 ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
428 if (ret)
429 break;
430 ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
431 if (ret)
432 break;
435 printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
437 return ret;
440 device_initcall(init_rttest);