[IPV4]: Correct rp_filter help text.
[linux-2.6/verdex.git] / arch / frv / kernel / semaphore.c
blob8e182ced1a0f028d1020e5f44df4da8e5512722c
1 /* semaphore.c: FR-V semaphores
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from lib/rwsem-spinlock.c
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/sched.h>
14 #include <linux/module.h>
15 #include <asm/semaphore.h>
17 struct sem_waiter {
18 struct list_head list;
19 struct task_struct *task;
22 #ifdef CONFIG_DEBUG_SEMAPHORE
23 void semtrace(struct semaphore *sem, const char *str)
25 if (sem->debug)
26 printk("[%d] %s({%d,%d})\n",
27 current->pid,
28 str,
29 sem->counter,
30 list_empty(&sem->wait_list) ? 0 : 1);
32 #else
33 #define semtrace(SEM,STR) do { } while(0)
34 #endif
37 * wait for a token to be granted from a semaphore
38 * - entered with lock held and interrupts disabled
40 void __down(struct semaphore *sem, unsigned long flags)
42 struct task_struct *tsk = current;
43 struct sem_waiter waiter;
45 semtrace(sem, "Entering __down");
47 /* set up my own style of waitqueue */
48 waiter.task = tsk;
49 get_task_struct(tsk);
51 list_add_tail(&waiter.list, &sem->wait_list);
53 /* we don't need to touch the semaphore struct anymore */
54 spin_unlock_irqrestore(&sem->wait_lock, flags);
56 /* wait to be given the semaphore */
57 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
59 for (;;) {
60 if (list_empty(&waiter.list))
61 break;
62 schedule();
63 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
66 tsk->state = TASK_RUNNING;
67 semtrace(sem, "Leaving __down");
70 EXPORT_SYMBOL(__down);
73 * interruptibly wait for a token to be granted from a semaphore
74 * - entered with lock held and interrupts disabled
76 int __down_interruptible(struct semaphore *sem, unsigned long flags)
78 struct task_struct *tsk = current;
79 struct sem_waiter waiter;
80 int ret;
82 semtrace(sem,"Entering __down_interruptible");
84 /* set up my own style of waitqueue */
85 waiter.task = tsk;
86 get_task_struct(tsk);
88 list_add_tail(&waiter.list, &sem->wait_list);
90 /* we don't need to touch the semaphore struct anymore */
91 set_task_state(tsk, TASK_INTERRUPTIBLE);
93 spin_unlock_irqrestore(&sem->wait_lock, flags);
95 /* wait to be given the semaphore */
96 ret = 0;
97 for (;;) {
98 if (list_empty(&waiter.list))
99 break;
100 if (unlikely(signal_pending(current)))
101 goto interrupted;
102 schedule();
103 set_task_state(tsk, TASK_INTERRUPTIBLE);
106 out:
107 tsk->state = TASK_RUNNING;
108 semtrace(sem, "Leaving __down_interruptible");
109 return ret;
111 interrupted:
112 spin_lock_irqsave(&sem->wait_lock, flags);
114 if (!list_empty(&waiter.list)) {
115 list_del(&waiter.list);
116 ret = -EINTR;
119 spin_unlock_irqrestore(&sem->wait_lock, flags);
120 if (ret == -EINTR)
121 put_task_struct(current);
122 goto out;
125 EXPORT_SYMBOL(__down_interruptible);
128 * release a single token back to a semaphore
129 * - entered with lock held and interrupts disabled
131 void __up(struct semaphore *sem)
133 struct task_struct *tsk;
134 struct sem_waiter *waiter;
136 semtrace(sem,"Entering __up");
138 /* grant the token to the process at the front of the queue */
139 waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
141 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
142 * It is an allocated on the waiter's stack and may become invalid at
143 * any time after that point (due to a wakeup from another source).
145 list_del_init(&waiter->list);
146 tsk = waiter->task;
147 mb();
148 waiter->task = NULL;
149 wake_up_process(tsk);
150 put_task_struct(tsk);
152 semtrace(sem,"Leaving __up");
155 EXPORT_SYMBOL(__up);