2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
10 #include <linux/capability.h>
11 #include <linux/errno.h>
12 #include <linux/linkage.h>
15 #include <linux/smp.h>
16 #include <linux/mman.h>
17 #include <linux/ptrace.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/slab.h>
23 #include <linux/utsname.h>
24 #include <linux/unistd.h>
25 #include <linux/sem.h>
26 #include <linux/msg.h>
27 #include <linux/shm.h>
28 #include <linux/compiler.h>
29 #include <linux/module.h>
30 #include <linux/ipc.h>
31 #include <linux/uaccess.h>
34 #include <asm/branch.h>
35 #include <asm/cachectl.h>
36 #include <asm/cacheflush.h>
37 #include <asm/asm-offsets.h>
38 #include <asm/signal.h>
40 #include <asm/shmparam.h>
41 #include <asm/sysmips.h>
42 #include <asm/uaccess.h>
45 * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
46 * convention. It returns results in registers $v0 / $v1 which means there
47 * is no need for it to do verify the validity of a userspace pointer
48 * argument. Historically that used to be expensive in Linux. These days
49 * the performance advantage is negligible.
51 asmlinkage
int sysm_pipe(nabi_no_regargs
volatile struct pt_regs regs
)
56 error
= do_pipe_flags(fd
, 0);
67 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
69 EXPORT_SYMBOL(shm_align_mask
);
71 #define COLOUR_ALIGN(addr,pgoff) \
72 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
73 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
75 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
76 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
78 struct vm_area_struct
* vmm
;
80 unsigned long task_size
;
82 task_size
= STACK_TOP
;
87 if (flags
& MAP_FIXED
) {
88 /* Even MAP_FIXED mappings must reside within task_size. */
89 if (task_size
- len
< addr
)
93 * We do not accept a shared mapping if it would violate
94 * cache aliasing constraints.
96 if ((flags
& MAP_SHARED
) && (addr
& shm_align_mask
))
102 if (filp
|| (flags
& MAP_SHARED
))
106 addr
= COLOUR_ALIGN(addr
, pgoff
);
108 addr
= PAGE_ALIGN(addr
);
109 vmm
= find_vma(current
->mm
, addr
);
110 if (task_size
- len
>= addr
&&
111 (!vmm
|| addr
+ len
<= vmm
->vm_start
))
114 addr
= TASK_UNMAPPED_BASE
;
116 addr
= COLOUR_ALIGN(addr
, pgoff
);
118 addr
= PAGE_ALIGN(addr
);
120 for (vmm
= find_vma(current
->mm
, addr
); ; vmm
= vmm
->vm_next
) {
121 /* At this point: (!vmm || addr < vmm->vm_end). */
122 if (task_size
- len
< addr
)
124 if (!vmm
|| addr
+ len
<= vmm
->vm_start
)
128 addr
= COLOUR_ALIGN(addr
, pgoff
);
132 /* common code for old and new mmaps */
133 static inline unsigned long
134 do_mmap2(unsigned long addr
, unsigned long len
, unsigned long prot
,
135 unsigned long flags
, unsigned long fd
, unsigned long pgoff
)
137 unsigned long error
= -EBADF
;
138 struct file
* file
= NULL
;
140 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
141 if (!(flags
& MAP_ANONYMOUS
)) {
147 down_write(¤t
->mm
->mmap_sem
);
148 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
149 up_write(¤t
->mm
->mmap_sem
);
157 SYSCALL_DEFINE6(mips_mmap
, unsigned long, addr
, unsigned long, len
,
158 unsigned long, prot
, unsigned long, flags
, unsigned long,
161 unsigned long result
;
164 if (offset
& ~PAGE_MASK
)
167 result
= do_mmap2(addr
, len
, prot
, flags
, fd
, offset
>> PAGE_SHIFT
);
173 SYSCALL_DEFINE6(mips_mmap2
, unsigned long, addr
, unsigned long, len
,
174 unsigned long, prot
, unsigned long, flags
, unsigned long, fd
,
175 unsigned long, pgoff
)
177 if (pgoff
& (~PAGE_MASK
>> 12))
180 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
>> (PAGE_SHIFT
-12));
183 save_static_function(sys_fork
);
184 static int __used noinline
185 _sys_fork(nabi_no_regargs
struct pt_regs regs
)
187 return do_fork(SIGCHLD
, regs
.regs
[29], ®s
, 0, NULL
, NULL
);
190 save_static_function(sys_clone
);
191 static int __used noinline
192 _sys_clone(nabi_no_regargs
struct pt_regs regs
)
194 unsigned long clone_flags
;
196 int __user
*parent_tidptr
, *child_tidptr
;
198 clone_flags
= regs
.regs
[4];
199 newsp
= regs
.regs
[5];
201 newsp
= regs
.regs
[29];
202 parent_tidptr
= (int __user
*) regs
.regs
[6];
204 /* We need to fetch the fifth argument off the stack. */
206 if (clone_flags
& (CLONE_CHILD_SETTID
| CLONE_CHILD_CLEARTID
)) {
207 int __user
*__user
*usp
= (int __user
*__user
*) regs
.regs
[29];
208 if (regs
.regs
[2] == __NR_syscall
) {
209 if (get_user (child_tidptr
, &usp
[5]))
212 else if (get_user (child_tidptr
, &usp
[4]))
216 child_tidptr
= (int __user
*) regs
.regs
[8];
218 return do_fork(clone_flags
, newsp
, ®s
, 0,
219 parent_tidptr
, child_tidptr
);
223 * sys_execve() executes a new program.
225 asmlinkage
int sys_execve(nabi_no_regargs
struct pt_regs regs
)
230 filename
= getname((char __user
*) (long)regs
.regs
[4]);
231 error
= PTR_ERR(filename
);
232 if (IS_ERR(filename
))
234 error
= do_execve(filename
, (char __user
*__user
*) (long)regs
.regs
[5],
235 (char __user
*__user
*) (long)regs
.regs
[6], ®s
);
243 * Compacrapability ...
245 SYSCALL_DEFINE1(uname
, struct old_utsname __user
*, name
)
247 if (name
&& !copy_to_user(name
, utsname(), sizeof (*name
)))
253 * Compacrapability ...
255 SYSCALL_DEFINE1(olduname
, struct oldold_utsname __user
*, name
)
261 if (!access_ok(VERIFY_WRITE
, name
, sizeof(struct oldold_utsname
)))
264 error
= __copy_to_user(&name
->sysname
, &utsname()->sysname
,
266 error
-= __put_user(0, name
->sysname
+ __OLD_UTS_LEN
);
267 error
-= __copy_to_user(&name
->nodename
, &utsname()->nodename
,
269 error
-= __put_user(0, name
->nodename
+ __OLD_UTS_LEN
);
270 error
-= __copy_to_user(&name
->release
, &utsname()->release
,
272 error
-= __put_user(0, name
->release
+ __OLD_UTS_LEN
);
273 error
-= __copy_to_user(&name
->version
, &utsname()->version
,
275 error
-= __put_user(0, name
->version
+ __OLD_UTS_LEN
);
276 error
-= __copy_to_user(&name
->machine
, &utsname()->machine
,
278 error
= __put_user(0, name
->machine
+ __OLD_UTS_LEN
);
279 error
= error
? -EFAULT
: 0;
284 SYSCALL_DEFINE1(set_thread_area
, unsigned long, addr
)
286 struct thread_info
*ti
= task_thread_info(current
);
289 if (cpu_has_userlocal
)
290 write_c0_userlocal(addr
);
295 static inline int mips_atomic_set(struct pt_regs
*regs
,
296 unsigned long addr
, unsigned long new)
298 unsigned long old
, tmp
;
301 if (unlikely(addr
& 3))
304 if (unlikely(!access_ok(VERIFY_WRITE
, addr
, 4)))
307 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
308 __asm__
__volatile__ (
310 "1: ll %[old], (%[addr]) \n"
311 " move %[tmp], %[new] \n"
312 "2: sc %[tmp], (%[addr]) \n"
313 " beqzl %[tmp], 1b \n"
315 " .section .fixup,\"ax\" \n"
316 "4: li %[err], %[efault] \n"
319 " .section __ex_table,\"a\" \n"
320 " "STR(PTR
)" 1b, 4b \n"
321 " "STR(PTR
)" 2b, 4b \n"
328 [efault
] "i" (-EFAULT
)
330 } else if (cpu_has_llsc
) {
331 __asm__
__volatile__ (
333 "1: ll %[old], (%[addr]) \n"
334 " move %[tmp], %[new] \n"
335 "2: sc %[tmp], (%[addr]) \n"
336 " bnez %[tmp], 4f \n"
342 " .section .fixup,\"ax\" \n"
343 "5: li %[err], %[efault] \n"
346 " .section __ex_table,\"a\" \n"
347 " "STR(PTR
)" 1b, 5b \n"
348 " "STR(PTR
)" 2b, 5b \n"
355 [efault
] "i" (-EFAULT
)
364 err
= __get_user(old
, (unsigned int *) addr
);
365 err
|= __put_user(new, (unsigned int *) addr
);
376 regs
->regs
[7] = 0; /* No error */
379 * Don't let your children do this ...
381 __asm__
__volatile__(
387 /* unreached. Honestly. */
391 save_static_function(sys_sysmips
);
392 static int __used noinline
393 _sys_sysmips(nabi_no_regargs
struct pt_regs regs
)
395 long cmd
, arg1
, arg2
, arg3
;
403 case MIPS_ATOMIC_SET
:
404 return mips_atomic_set(®s
, arg1
, arg2
);
411 set_thread_flag(TIF_FIXADE
);
413 clear_thread_flag(TIF_FIXADE
);
415 set_thread_flag(TIF_LOGADE
);
417 clear_thread_flag(TIF_FIXADE
);
430 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
432 * This is really horribly ugly.
434 SYSCALL_DEFINE6(ipc
, unsigned int, call
, int, first
, int, second
,
435 unsigned long, third
, void __user
*, ptr
, long, fifth
)
439 version
= call
>> 16; /* hack for backward compatibility */
444 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
447 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
449 (const struct timespec __user
*)fifth
);
451 return sys_semget(first
, second
, third
);
456 if (get_user(fourth
.__pad
, (void __user
*__user
*) ptr
))
458 return sys_semctl(first
, second
, third
, fourth
);
462 return sys_msgsnd(first
, (struct msgbuf __user
*) ptr
,
467 struct ipc_kludge tmp
;
471 if (copy_from_user(&tmp
,
472 (struct ipc_kludge __user
*) ptr
,
475 return sys_msgrcv(first
, tmp
.msgp
, second
,
479 return sys_msgrcv(first
,
480 (struct msgbuf __user
*) ptr
,
481 second
, fifth
, third
);
484 return sys_msgget((key_t
) first
, second
);
486 return sys_msgctl(first
, second
,
487 (struct msqid_ds __user
*) ptr
);
493 ret
= do_shmat(first
, (char __user
*) ptr
, second
,
497 return put_user(raddr
, (unsigned long __user
*) third
);
499 case 1: /* iBCS2 emulator entry point */
500 if (!segment_eq(get_fs(), get_ds()))
502 return do_shmat(first
, (char __user
*) ptr
, second
,
503 (unsigned long *) third
);
506 return sys_shmdt((char __user
*)ptr
);
508 return sys_shmget(first
, second
, third
);
510 return sys_shmctl(first
, second
,
511 (struct shmid_ds __user
*) ptr
);
518 * No implemented yet ...
520 SYSCALL_DEFINE3(cachectl
, char *, addr
, int, nbytes
, int, op
)
526 * If we ever come here the user sp is bad. Zap the process right away.
527 * Due to the bad stack signaling wouldn't work.
529 asmlinkage
void bad_stack(void)
535 * Do a system call from kernel instead of calling sys_execve so we
536 * end up with proper pt_regs.
538 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
540 register unsigned long __a0
asm("$4") = (unsigned long) filename
;
541 register unsigned long __a1
asm("$5") = (unsigned long) argv
;
542 register unsigned long __a2
asm("$6") = (unsigned long) envp
;
543 register unsigned long __a3
asm("$7");
546 __asm__
volatile (" \n"
548 " li $2, %5 # __NR_execve \n"
552 : "=&r" (__v0
), "=r" (__a3
)
553 : "r" (__a0
), "r" (__a1
), "r" (__a2
), "i" (__NR_execve
)
554 : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",