2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
10 #include <linux/capability.h>
11 #include <linux/errno.h>
12 #include <linux/linkage.h>
15 #include <linux/smp.h>
16 #include <linux/mman.h>
17 #include <linux/ptrace.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/utsname.h>
23 #include <linux/unistd.h>
24 #include <linux/sem.h>
25 #include <linux/msg.h>
26 #include <linux/shm.h>
27 #include <linux/compiler.h>
28 #include <linux/module.h>
29 #include <linux/ipc.h>
30 #include <linux/uaccess.h>
31 #include <linux/slab.h>
34 #include <asm/branch.h>
35 #include <asm/cachectl.h>
36 #include <asm/cacheflush.h>
37 #include <asm/asm-offsets.h>
38 #include <asm/signal.h>
40 #include <asm/shmparam.h>
41 #include <asm/sysmips.h>
42 #include <asm/uaccess.h>
45 * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
46 * convention. It returns results in registers $v0 / $v1 which means there
47 * is no need for it to do verify the validity of a userspace pointer
48 * argument. Historically that used to be expensive in Linux. These days
49 * the performance advantage is negligible.
51 asmlinkage
int sysm_pipe(nabi_no_regargs
volatile struct pt_regs regs
)
56 error
= do_pipe_flags(fd
, 0);
67 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
69 EXPORT_SYMBOL(shm_align_mask
);
71 #define COLOUR_ALIGN(addr,pgoff) \
72 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
73 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
75 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
76 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
78 struct vm_area_struct
* vmm
;
80 unsigned long task_size
;
83 task_size
= TASK_SIZE
;
84 #else /* Must be CONFIG_64BIT*/
85 task_size
= test_thread_flag(TIF_32BIT_ADDR
) ? TASK_SIZE32
: TASK_SIZE
;
91 if (flags
& MAP_FIXED
) {
92 /* Even MAP_FIXED mappings must reside within task_size. */
93 if (task_size
- len
< addr
)
97 * We do not accept a shared mapping if it would violate
98 * cache aliasing constraints.
100 if ((flags
& MAP_SHARED
) &&
101 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
107 if (filp
|| (flags
& MAP_SHARED
))
111 addr
= COLOUR_ALIGN(addr
, pgoff
);
113 addr
= PAGE_ALIGN(addr
);
114 vmm
= find_vma(current
->mm
, addr
);
115 if (task_size
- len
>= addr
&&
116 (!vmm
|| addr
+ len
<= vmm
->vm_start
))
119 addr
= TASK_UNMAPPED_BASE
;
121 addr
= COLOUR_ALIGN(addr
, pgoff
);
123 addr
= PAGE_ALIGN(addr
);
125 for (vmm
= find_vma(current
->mm
, addr
); ; vmm
= vmm
->vm_next
) {
126 /* At this point: (!vmm || addr < vmm->vm_end). */
127 if (task_size
- len
< addr
)
129 if (!vmm
|| addr
+ len
<= vmm
->vm_start
)
133 addr
= COLOUR_ALIGN(addr
, pgoff
);
137 SYSCALL_DEFINE6(mips_mmap
, unsigned long, addr
, unsigned long, len
,
138 unsigned long, prot
, unsigned long, flags
, unsigned long,
141 unsigned long result
;
144 if (offset
& ~PAGE_MASK
)
147 result
= sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, offset
>> PAGE_SHIFT
);
153 SYSCALL_DEFINE6(mips_mmap2
, unsigned long, addr
, unsigned long, len
,
154 unsigned long, prot
, unsigned long, flags
, unsigned long, fd
,
155 unsigned long, pgoff
)
157 if (pgoff
& (~PAGE_MASK
>> 12))
160 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, pgoff
>> (PAGE_SHIFT
-12));
163 save_static_function(sys_fork
);
164 static int __used noinline
165 _sys_fork(nabi_no_regargs
struct pt_regs regs
)
167 return do_fork(SIGCHLD
, regs
.regs
[29], ®s
, 0, NULL
, NULL
);
170 save_static_function(sys_clone
);
171 static int __used noinline
172 _sys_clone(nabi_no_regargs
struct pt_regs regs
)
174 unsigned long clone_flags
;
176 int __user
*parent_tidptr
, *child_tidptr
;
178 clone_flags
= regs
.regs
[4];
179 newsp
= regs
.regs
[5];
181 newsp
= regs
.regs
[29];
182 parent_tidptr
= (int __user
*) regs
.regs
[6];
184 /* We need to fetch the fifth argument off the stack. */
186 if (clone_flags
& (CLONE_CHILD_SETTID
| CLONE_CHILD_CLEARTID
)) {
187 int __user
*__user
*usp
= (int __user
*__user
*) regs
.regs
[29];
188 if (regs
.regs
[2] == __NR_syscall
) {
189 if (get_user (child_tidptr
, &usp
[5]))
192 else if (get_user (child_tidptr
, &usp
[4]))
196 child_tidptr
= (int __user
*) regs
.regs
[8];
198 return do_fork(clone_flags
, newsp
, ®s
, 0,
199 parent_tidptr
, child_tidptr
);
203 * sys_execve() executes a new program.
205 asmlinkage
int sys_execve(nabi_no_regargs
struct pt_regs regs
)
210 filename
= getname((char __user
*) (long)regs
.regs
[4]);
211 error
= PTR_ERR(filename
);
212 if (IS_ERR(filename
))
214 error
= do_execve(filename
, (char __user
*__user
*) (long)regs
.regs
[5],
215 (char __user
*__user
*) (long)regs
.regs
[6], ®s
);
222 SYSCALL_DEFINE1(set_thread_area
, unsigned long, addr
)
224 struct thread_info
*ti
= task_thread_info(current
);
227 if (cpu_has_userlocal
)
228 write_c0_userlocal(addr
);
233 static inline int mips_atomic_set(struct pt_regs
*regs
,
234 unsigned long addr
, unsigned long new)
236 unsigned long old
, tmp
;
239 if (unlikely(addr
& 3))
242 if (unlikely(!access_ok(VERIFY_WRITE
, addr
, 4)))
245 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
246 __asm__
__volatile__ (
249 "1: ll %[old], (%[addr]) \n"
250 " move %[tmp], %[new] \n"
251 "2: sc %[tmp], (%[addr]) \n"
252 " beqzl %[tmp], 1b \n"
254 " .section .fixup,\"ax\" \n"
255 "4: li %[err], %[efault] \n"
258 " .section __ex_table,\"a\" \n"
259 " "STR(PTR
)" 1b, 4b \n"
260 " "STR(PTR
)" 2b, 4b \n"
268 [efault
] "i" (-EFAULT
)
270 } else if (cpu_has_llsc
) {
271 __asm__
__volatile__ (
274 "1: ll %[old], (%[addr]) \n"
275 " move %[tmp], %[new] \n"
276 "2: sc %[tmp], (%[addr]) \n"
277 " bnez %[tmp], 4f \n"
283 " .section .fixup,\"ax\" \n"
284 "5: li %[err], %[efault] \n"
287 " .section __ex_table,\"a\" \n"
288 " "STR(PTR
)" 1b, 5b \n"
289 " "STR(PTR
)" 2b, 5b \n"
297 [efault
] "i" (-EFAULT
)
306 err
= __get_user(old
, (unsigned int *) addr
);
307 err
|= __put_user(new, (unsigned int *) addr
);
318 regs
->regs
[7] = 0; /* No error */
321 * Don't let your children do this ...
323 __asm__
__volatile__(
329 /* unreached. Honestly. */
333 save_static_function(sys_sysmips
);
334 static int __used noinline
335 _sys_sysmips(nabi_no_regargs
struct pt_regs regs
)
337 long cmd
, arg1
, arg2
, arg3
;
345 case MIPS_ATOMIC_SET
:
346 return mips_atomic_set(®s
, arg1
, arg2
);
353 set_thread_flag(TIF_FIXADE
);
355 clear_thread_flag(TIF_FIXADE
);
357 set_thread_flag(TIF_LOGADE
);
359 clear_thread_flag(TIF_FIXADE
);
372 * No implemented yet ...
374 SYSCALL_DEFINE3(cachectl
, char *, addr
, int, nbytes
, int, op
)
380 * If we ever come here the user sp is bad. Zap the process right away.
381 * Due to the bad stack signaling wouldn't work.
383 asmlinkage
void bad_stack(void)
389 * Do a system call from kernel instead of calling sys_execve so we
390 * end up with proper pt_regs.
392 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
394 register unsigned long __a0
asm("$4") = (unsigned long) filename
;
395 register unsigned long __a1
asm("$5") = (unsigned long) argv
;
396 register unsigned long __a2
asm("$6") = (unsigned long) envp
;
397 register unsigned long __a3
asm("$7");
400 __asm__
volatile (" \n"
402 " li $2, %5 # __NR_execve \n"
406 : "=&r" (__v0
), "=r" (__a3
)
407 : "r" (__a0
), "r" (__a1
), "r" (__a2
), "i" (__NR_execve
)
408 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",