1 #ifndef _ASM_PARISC_FUTEX_H
2 #define _ASM_PARISC_FUTEX_H
6 #include <linux/futex.h>
7 #include <linux/uaccess.h>
8 #include <asm/atomic.h>
11 /* The following has to match the LWS code in syscall.S. We have
12 sixteen four-word locks. */
15 _futex_spin_lock_irqsave(u32 __user
*uaddr
, unsigned long int *flags
)
17 extern u32 lws_lock_start
[];
18 long index
= ((long)uaddr
& 0xf0) >> 2;
19 arch_spinlock_t
*s
= (arch_spinlock_t
*)&lws_lock_start
[index
];
20 local_irq_save(*flags
);
25 _futex_spin_unlock_irqrestore(u32 __user
*uaddr
, unsigned long int *flags
)
27 extern u32 lws_lock_start
[];
28 long index
= ((long)uaddr
& 0xf0) >> 2;
29 arch_spinlock_t
*s
= (arch_spinlock_t
*)&lws_lock_start
[index
];
31 local_irq_restore(*flags
);
35 futex_atomic_op_inuser (int encoded_op
, u32 __user
*uaddr
)
37 unsigned long int flags
;
39 int op
= (encoded_op
>> 28) & 7;
40 int cmp
= (encoded_op
>> 24) & 15;
41 int oparg
= (encoded_op
<< 8) >> 20;
42 int cmparg
= (encoded_op
<< 20) >> 20;
44 if (encoded_op
& (FUTEX_OP_OPARG_SHIFT
<< 28))
47 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(*uaddr
)))
52 _futex_spin_lock_irqsave(uaddr
, &flags
);
56 /* *(int *)UADDR2 = OPARG; */
57 ret
= get_user(oldval
, uaddr
);
59 ret
= put_user(oparg
, uaddr
);
62 /* *(int *)UADDR2 += OPARG; */
63 ret
= get_user(oldval
, uaddr
);
66 ret
= put_user(val
, uaddr
);
70 /* *(int *)UADDR2 |= OPARG; */
71 ret
= get_user(oldval
, uaddr
);
74 ret
= put_user(val
, uaddr
);
78 /* *(int *)UADDR2 &= ~OPARG; */
79 ret
= get_user(oldval
, uaddr
);
81 val
= oldval
& ~oparg
;
82 ret
= put_user(val
, uaddr
);
86 /* *(int *)UADDR2 ^= OPARG; */
87 ret
= get_user(oldval
, uaddr
);
90 ret
= put_user(val
, uaddr
);
97 _futex_spin_unlock_irqrestore(uaddr
, &flags
);
103 case FUTEX_OP_CMP_EQ
: ret
= (oldval
== cmparg
); break;
104 case FUTEX_OP_CMP_NE
: ret
= (oldval
!= cmparg
); break;
105 case FUTEX_OP_CMP_LT
: ret
= (oldval
< cmparg
); break;
106 case FUTEX_OP_CMP_GE
: ret
= (oldval
>= cmparg
); break;
107 case FUTEX_OP_CMP_LE
: ret
= (oldval
<= cmparg
); break;
108 case FUTEX_OP_CMP_GT
: ret
= (oldval
> cmparg
); break;
109 default: ret
= -ENOSYS
;
115 /* Non-atomic version */
117 futex_atomic_cmpxchg_inatomic(u32
*uval
, u32 __user
*uaddr
,
118 u32 oldval
, u32 newval
)
124 /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
125 * our gateway page, and causes no end of trouble...
127 if (segment_eq(KERNEL_DS
, get_fs()) && !uaddr
)
130 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(u32
)))
133 /* HPPA has no cmpxchg in hardware and therefore the
134 * best we can do here is use an array of locks. The
135 * lock selected is based on a hash of the userspace
136 * address. This should scale to a couple of CPUs.
139 _futex_spin_lock_irqsave(uaddr
, &flags
);
141 ret
= get_user(val
, uaddr
);
143 if (!ret
&& val
== oldval
)
144 ret
= put_user(newval
, uaddr
);
148 _futex_spin_unlock_irqrestore(uaddr
, &flags
);
153 #endif /*__KERNEL__*/
154 #endif /*_ASM_PARISC_FUTEX_H*/