2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * These routines make two important assumptions:
16 * 1. atomic_t is really an int and can be freely cast back and forth
17 * (validated in __init_atomic_per_cpu).
19 * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
20 * the same locking convention that all the kernel atomic routines use.
23 #ifndef _ASM_TILE_FUTEX_H
24 #define _ASM_TILE_FUTEX_H
28 #include <linux/futex.h>
29 #include <linux/uaccess.h>
30 #include <linux/errno.h>
32 extern struct __get_user
futex_set(u32 __user
*v
, int i
);
33 extern struct __get_user
futex_add(u32 __user
*v
, int n
);
34 extern struct __get_user
futex_or(u32 __user
*v
, int n
);
35 extern struct __get_user
futex_andn(u32 __user
*v
, int n
);
36 extern struct __get_user
futex_cmpxchg(u32 __user
*v
, int o
, int n
);
39 extern struct __get_user
futex_xor(u32 __user
*v
, int n
);
41 static inline struct __get_user
futex_xor(u32 __user
*uaddr
, int n
)
43 struct __get_user asm_ret
= __get_user_4(uaddr
);
49 asm_ret
= futex_cmpxchg(uaddr
, oldval
, newval
);
50 } while (asm_ret
.err
== 0 && oldval
!= asm_ret
.val
);
56 static inline int futex_atomic_op_inuser(int encoded_op
, u32 __user
*uaddr
)
58 int op
= (encoded_op
>> 28) & 7;
59 int cmp
= (encoded_op
>> 24) & 15;
60 int oparg
= (encoded_op
<< 8) >> 20;
61 int cmparg
= (encoded_op
<< 20) >> 20;
63 struct __get_user asm_ret
;
65 if (encoded_op
& (FUTEX_OP_OPARG_SHIFT
<< 28))
68 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(u32
)))
74 asm_ret
= futex_set(uaddr
, oparg
);
77 asm_ret
= futex_add(uaddr
, oparg
);
80 asm_ret
= futex_or(uaddr
, oparg
);
83 asm_ret
= futex_andn(uaddr
, oparg
);
86 asm_ret
= futex_xor(uaddr
, oparg
);
89 asm_ret
.err
= -ENOSYS
;
98 ret
= (asm_ret
.val
== cmparg
);
100 case FUTEX_OP_CMP_NE
:
101 ret
= (asm_ret
.val
!= cmparg
);
103 case FUTEX_OP_CMP_LT
:
104 ret
= (asm_ret
.val
< cmparg
);
106 case FUTEX_OP_CMP_GE
:
107 ret
= (asm_ret
.val
>= cmparg
);
109 case FUTEX_OP_CMP_LE
:
110 ret
= (asm_ret
.val
<= cmparg
);
112 case FUTEX_OP_CMP_GT
:
113 ret
= (asm_ret
.val
> cmparg
);
122 static inline int futex_atomic_cmpxchg_inatomic(u32
*uval
, u32 __user
*uaddr
,
123 u32 oldval
, u32 newval
)
125 struct __get_user asm_ret
;
127 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(u32
)))
130 asm_ret
= futex_cmpxchg(uaddr
, oldval
, newval
);
136 /* Return failure from the atomic wrappers. */
137 struct __get_user
__atomic_bad_address(int __user
*addr
);
140 #endif /* !__ASSEMBLY__ */
142 #endif /* _ASM_TILE_FUTEX_H */