* better
[mascara-docs.git] / i386 / linux-2.3.21 / include / asm-arm / proc-armo / uaccess.h
blob1a6b950edb3ff688afaa8cf719e352da541f57c9
1 /*
2 * linux/include/asm-arm/proc-armo/segment.h
4 * Copyright (C) 1996 Russell King
5 */
7 /*
8 * The fs functions are implemented on the ARM2 and ARM3 architectures
9 * manually.
10 * Use *_user functions to access user memory with faulting behaving
11 * as though the user is accessing the memory.
12 * Use set_fs(get_ds()) and then the *_user functions to allow them to
13 * access kernel memory.
17 * These are the values used to represent the user `fs' and the kernel `ds'
19 #define KERNEL_DS 0x03000000
20 #define USER_DS 0x02000000
22 #define get_ds() (KERNEL_DS)
23 #define get_fs() (current->addr_limit)
24 #define segment_eq(a,b) ((a) == (b))
26 extern uaccess_t uaccess_user, uaccess_kernel;
28 extern __inline__ void set_fs (mm_segment_t fs)
30 current->addr_limit = fs;
31 current->tss.uaccess = fs == USER_DS ? &uaccess_user : &uaccess_kernel;
34 #define __range_ok(addr,size) ({ \
35 unsigned long flag, sum; \
36 __asm__ __volatile__("subs %1, %0, %3; cmpcs %1, %2; movcs %0, #0" \
37 : "=&r" (flag), "=&r" (sum) \
38 : "r" (addr), "Ir" (size), "0" (current->addr_limit) \
39 : "cc"); \
40 flag; })
42 #define __addr_ok(addr) ({ \
43 unsigned long flag; \
44 __asm__ __volatile__("cmp %2, %0; movlo %0, #0" \
45 : "=&r" (flag) \
46 : "0" (current->addr_limit), "r" (addr) \
47 : "cc"); \
48 (flag == 0); })
50 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
52 #define __put_user_asm_byte(x,addr,err) \
53 __asm__ __volatile__( \
54 " mov r0, %1\n" \
55 " mov r1, %2\n" \
56 " mov r2, %0\n" \
57 " mov lr, pc\n" \
58 " mov pc, %3\n" \
59 " mov %0, r2\n" \
60 : "=r" (err) \
61 : "r" (x), "r" (addr), "r" (current->tss.uaccess->put_byte), \
62 "0" (err) \
63 : "r0", "r1", "r2", "lr")
65 #define __put_user_asm_half(x,addr,err) \
66 __asm__ __volatile__( \
67 " mov r0, %1\n" \
68 " mov r1, %2\n" \
69 " mov r2, %0\n" \
70 " mov lr, pc\n" \
71 " mov pc, %3\n" \
72 " mov %0, r2\n" \
73 : "=r" (err) \
74 : "r" (x), "r" (addr), "r" (current->tss.uaccess->put_half), \
75 "0" (err) \
76 : "r0", "r1", "r2", "lr")
78 #define __put_user_asm_word(x,addr,err) \
79 __asm__ __volatile__( \
80 " mov r0, %1\n" \
81 " mov r1, %2\n" \
82 " mov r2, %0\n" \
83 " mov lr, pc\n" \
84 " mov pc, %3\n" \
85 " mov %0, r2\n" \
86 : "=r" (err) \
87 : "r" (x), "r" (addr), "r" (current->tss.uaccess->put_word), \
88 "0" (err) \
89 : "r0", "r1", "r2", "lr")
91 #define __get_user_asm_byte(x,addr,err) \
92 __asm__ __volatile__( \
93 " mov r0, %2\n" \
94 " mov r1, %0\n" \
95 " mov lr, pc\n" \
96 " mov pc, %3\n" \
97 " mov %0, r1\n" \
98 " mov %1, r0\n" \
99 : "=r" (err), "=r" (x) \
100 : "r" (addr), "r" (current->tss.uaccess->get_byte), "0" (err) \
101 : "r0", "r1", "r2", "lr")
103 #define __get_user_asm_half(x,addr,err) \
104 __asm__ __volatile__( \
105 " mov r0, %2\n" \
106 " mov r1, %0\n" \
107 " mov lr, pc\n" \
108 " mov pc, %3\n" \
109 " mov %0, r1\n" \
110 " mov %1, r0\n" \
111 : "=r" (err), "=r" (x) \
112 : "r" (addr), "r" (current->tss.uaccess->get_half), "0" (err) \
113 : "r0", "r1", "r2", "lr")
115 #define __get_user_asm_word(x,addr,err) \
116 __asm__ __volatile__( \
117 " mov r0, %2\n" \
118 " mov r1, %0\n" \
119 " mov lr, pc\n" \
120 " mov pc, %3\n" \
121 " mov %0, r1\n" \
122 " mov %1, r0\n" \
123 : "=r" (err), "=r" (x) \
124 : "r" (addr), "r" (current->tss.uaccess->get_word), "0" (err) \
125 : "r0", "r1", "r2", "lr")
127 #define __do_copy_from_user(to,from,n) \
128 (n) = current->tss.uaccess->copy_from_user((to),(from),(n))
130 #define __do_copy_to_user(to,from,n) \
131 (n) = current->tss.uaccess->copy_to_user((to),(from),(n))
133 #define __do_clear_user(addr,sz) \
134 (sz) = current->tss.uaccess->clear_user((addr),(sz))
136 #define __do_strncpy_from_user(dst,src,count,res) \
137 (res) = current->tss.uaccess->strncpy_from_user(dst,src,count)
139 #define __do_strlen_user(s,res) \
140 (res) = current->tss.uaccess->strlen_user(s)