spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / arch / tile / include / asm / futex.h
blobd03ec124a598bc4b8d287ee9ed028289f07d3cc9
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
14 * These routines make two important assumptions:
16 * 1. atomic_t is really an int and can be freely cast back and forth
17 * (validated in __init_atomic_per_cpu).
19 * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
20 * the same locking convention that all the kernel atomic routines use.
23 #ifndef _ASM_TILE_FUTEX_H
24 #define _ASM_TILE_FUTEX_H
26 #ifndef __ASSEMBLY__
28 #include <linux/futex.h>
29 #include <linux/uaccess.h>
30 #include <linux/errno.h>
32 extern struct __get_user futex_set(u32 __user *v, int i);
33 extern struct __get_user futex_add(u32 __user *v, int n);
34 extern struct __get_user futex_or(u32 __user *v, int n);
35 extern struct __get_user futex_andn(u32 __user *v, int n);
36 extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
38 #ifndef __tilegx__
39 extern struct __get_user futex_xor(u32 __user *v, int n);
40 #else
41 static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
43 struct __get_user asm_ret = __get_user_4(uaddr);
44 if (!asm_ret.err) {
45 int oldval, newval;
46 do {
47 oldval = asm_ret.val;
48 newval = oldval ^ n;
49 asm_ret = futex_cmpxchg(uaddr, oldval, newval);
50 } while (asm_ret.err == 0 && oldval != asm_ret.val);
52 return asm_ret;
54 #endif
56 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
58 int op = (encoded_op >> 28) & 7;
59 int cmp = (encoded_op >> 24) & 15;
60 int oparg = (encoded_op << 8) >> 20;
61 int cmparg = (encoded_op << 20) >> 20;
62 int ret;
63 struct __get_user asm_ret;
65 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
66 oparg = 1 << oparg;
68 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
69 return -EFAULT;
71 pagefault_disable();
72 switch (op) {
73 case FUTEX_OP_SET:
74 asm_ret = futex_set(uaddr, oparg);
75 break;
76 case FUTEX_OP_ADD:
77 asm_ret = futex_add(uaddr, oparg);
78 break;
79 case FUTEX_OP_OR:
80 asm_ret = futex_or(uaddr, oparg);
81 break;
82 case FUTEX_OP_ANDN:
83 asm_ret = futex_andn(uaddr, oparg);
84 break;
85 case FUTEX_OP_XOR:
86 asm_ret = futex_xor(uaddr, oparg);
87 break;
88 default:
89 asm_ret.err = -ENOSYS;
91 pagefault_enable();
93 ret = asm_ret.err;
95 if (!ret) {
96 switch (cmp) {
97 case FUTEX_OP_CMP_EQ:
98 ret = (asm_ret.val == cmparg);
99 break;
100 case FUTEX_OP_CMP_NE:
101 ret = (asm_ret.val != cmparg);
102 break;
103 case FUTEX_OP_CMP_LT:
104 ret = (asm_ret.val < cmparg);
105 break;
106 case FUTEX_OP_CMP_GE:
107 ret = (asm_ret.val >= cmparg);
108 break;
109 case FUTEX_OP_CMP_LE:
110 ret = (asm_ret.val <= cmparg);
111 break;
112 case FUTEX_OP_CMP_GT:
113 ret = (asm_ret.val > cmparg);
114 break;
115 default:
116 ret = -ENOSYS;
119 return ret;
122 static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
123 u32 oldval, u32 newval)
125 struct __get_user asm_ret;
127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
128 return -EFAULT;
130 asm_ret = futex_cmpxchg(uaddr, oldval, newval);
131 *uval = asm_ret.val;
132 return asm_ret.err;
135 #ifndef __tilegx__
136 /* Return failure from the atomic wrappers. */
137 struct __get_user __atomic_bad_address(int __user *addr);
138 #endif
140 #endif /* !__ASSEMBLY__ */
142 #endif /* _ASM_TILE_FUTEX_H */