1 #ifndef _ASM_PARISC_FUTEX_H
2 #define _ASM_PARISC_FUTEX_H
6 #include <linux/futex.h>
7 #include <linux/uaccess.h>
8 #include <asm/atomic.h>
12 futex_atomic_op_inuser (int encoded_op
, u32 __user
*uaddr
)
14 unsigned long int flags
;
16 int op
= (encoded_op
>> 28) & 7;
17 int cmp
= (encoded_op
>> 24) & 15;
18 int oparg
= (encoded_op
<< 8) >> 20;
19 int cmparg
= (encoded_op
<< 20) >> 20;
21 if (encoded_op
& (FUTEX_OP_OPARG_SHIFT
<< 28))
24 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(*uaddr
)))
29 _atomic_spin_lock_irqsave(uaddr
, flags
);
33 /* *(int *)UADDR2 = OPARG; */
34 ret
= get_user(oldval
, uaddr
);
36 ret
= put_user(oparg
, uaddr
);
39 /* *(int *)UADDR2 += OPARG; */
40 ret
= get_user(oldval
, uaddr
);
43 ret
= put_user(val
, uaddr
);
47 /* *(int *)UADDR2 |= OPARG; */
48 ret
= get_user(oldval
, uaddr
);
51 ret
= put_user(val
, uaddr
);
55 /* *(int *)UADDR2 &= ~OPARG; */
56 ret
= get_user(oldval
, uaddr
);
58 val
= oldval
& ~oparg
;
59 ret
= put_user(val
, uaddr
);
63 /* *(int *)UADDR2 ^= OPARG; */
64 ret
= get_user(oldval
, uaddr
);
67 ret
= put_user(val
, uaddr
);
74 _atomic_spin_unlock_irqrestore(uaddr
, flags
);
80 case FUTEX_OP_CMP_EQ
: ret
= (oldval
== cmparg
); break;
81 case FUTEX_OP_CMP_NE
: ret
= (oldval
!= cmparg
); break;
82 case FUTEX_OP_CMP_LT
: ret
= (oldval
< cmparg
); break;
83 case FUTEX_OP_CMP_GE
: ret
= (oldval
>= cmparg
); break;
84 case FUTEX_OP_CMP_LE
: ret
= (oldval
<= cmparg
); break;
85 case FUTEX_OP_CMP_GT
: ret
= (oldval
> cmparg
); break;
86 default: ret
= -ENOSYS
;
92 /* Non-atomic version */
94 futex_atomic_cmpxchg_inatomic(u32
*uval
, u32 __user
*uaddr
,
95 u32 oldval
, u32 newval
)
101 /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
102 * our gateway page, and causes no end of trouble...
104 if (segment_eq(KERNEL_DS
, get_fs()) && !uaddr
)
107 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(u32
)))
110 /* HPPA has no cmpxchg in hardware and therefore the
111 * best we can do here is use an array of locks. The
112 * lock selected is based on a hash of the userspace
113 * address. This should scale to a couple of CPUs.
116 _atomic_spin_lock_irqsave(uaddr
, flags
);
118 ret
= get_user(val
, uaddr
);
120 if (!ret
&& val
== oldval
)
121 ret
= put_user(newval
, uaddr
);
125 _atomic_spin_unlock_irqrestore(uaddr
, flags
);
130 #endif /*__KERNEL__*/
131 #endif /*_ASM_PARISC_FUTEX_H*/