2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/kernel/sys_sh64.c
8 * Copyright (C) 2000, 2001 Paolo Alberelli
10 * This file contains various random system calls that
11 * have a non-standard calling sequence on the Linux/SH5
14 * Mostly taken from i386 version.
18 #include <linux/errno.h>
19 #include <linux/rwsem.h>
20 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/sem.h>
25 #include <linux/msg.h>
26 #include <linux/shm.h>
27 #include <linux/stat.h>
28 #include <linux/mman.h>
29 #include <linux/file.h>
30 #include <linux/utsname.h>
31 #include <linux/syscalls.h>
32 #include <linux/ipc.h>
33 #include <asm/uaccess.h>
34 #include <asm/ptrace.h>
35 #include <asm/unistd.h>
40 * sys_pipe() is the normal C calling standard for creating
41 * a pipe. It's not the way Unix traditionally does this, though.
43 #ifdef NEW_PIPE_IMPLEMENTATION
44 asmlinkage
int sys_pipe(unsigned long * fildes
,
45 unsigned long dummy_r3
,
46 unsigned long dummy_r4
,
47 unsigned long dummy_r5
,
48 unsigned long dummy_r6
,
49 unsigned long dummy_r7
,
50 struct pt_regs
* regs
) /* r8 = pt_regs forced by entry.S */
58 ***********************************************************************
59 * To avoid the copy_to_user we prefer to break the ABIs convention, *
60 * packing the valid pair of file IDs into a single register (r3); *
61 * while r2 is the return code as defined by the sh5-ABIs. *
62 * BE CAREFUL: pipe stub, into glibc, must be aware of this solution *
63 ***********************************************************************
65 #ifdef __LITTLE_ENDIAN__
66 regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]);
68 regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]);
72 /* although not very clever this is endianess independent */
73 regs
->regs
[REG_3
] = (unsigned long long) *((unsigned long long *) fd
);
79 asmlinkage
int sys_pipe(unsigned long * fildes
)
86 if (copy_to_user(fildes
, fd
, 2*sizeof(int)))
95 * To avoid cache alias, we map the shard page with same color.
97 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
99 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
100 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
102 struct vm_area_struct
*vma
;
104 if (flags
& MAP_FIXED
) {
105 /* We do not accept a shared mapping if it would violate
106 * cache aliasing constraints.
108 if ((flags
& MAP_SHARED
) && (addr
& (SHMLBA
- 1)))
116 addr
= TASK_UNMAPPED_BASE
;
118 if (flags
& MAP_PRIVATE
)
119 addr
= PAGE_ALIGN(addr
);
121 addr
= COLOUR_ALIGN(addr
);
123 for (vma
= find_vma(current
->mm
, addr
); ; vma
= vma
->vm_next
) {
124 /* At this point: (!vma || addr < vma->vm_end). */
125 if (TASK_SIZE
- len
< addr
)
127 if (!vma
|| addr
+ len
<= vma
->vm_start
)
130 if (!(flags
& MAP_PRIVATE
))
131 addr
= COLOUR_ALIGN(addr
);
135 /* common code for old and new mmaps */
136 static inline long do_mmap2(
137 unsigned long addr
, unsigned long len
,
138 unsigned long prot
, unsigned long flags
,
139 unsigned long fd
, unsigned long pgoff
)
142 struct file
* file
= NULL
;
144 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
145 if (!(flags
& MAP_ANONYMOUS
)) {
151 down_write(¤t
->mm
->mmap_sem
);
152 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
153 up_write(¤t
->mm
->mmap_sem
);
161 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
162 unsigned long prot
, unsigned long flags
,
163 unsigned long fd
, unsigned long pgoff
)
165 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
);
168 asmlinkage
int old_mmap(unsigned long addr
, unsigned long len
,
169 unsigned long prot
, unsigned long flags
,
170 int fd
, unsigned long off
)
172 if (off
& ~PAGE_MASK
)
174 return do_mmap2(addr
, len
, prot
, flags
, fd
, off
>>PAGE_SHIFT
);
178 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
180 * This is really horribly ugly.
182 asmlinkage
int sys_ipc(uint call
, int first
, int second
,
183 int third
, void __user
*ptr
, long fifth
)
187 version
= call
>> 16; /* hack for backward compatibility */
193 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
196 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
198 (const struct timespec __user
*)fifth
);
200 return sys_semget (first
, second
, third
);
205 if (get_user(fourth
.__pad
, (void * __user
*) ptr
))
207 return sys_semctl (first
, second
, third
, fourth
);
216 return sys_msgsnd (first
, (struct msgbuf __user
*) ptr
,
221 struct ipc_kludge tmp
;
225 if (copy_from_user(&tmp
,
226 (struct ipc_kludge __user
*) ptr
,
229 return sys_msgrcv (first
, tmp
.msgp
, second
,
233 return sys_msgrcv (first
,
234 (struct msgbuf __user
*) ptr
,
235 second
, fifth
, third
);
238 return sys_msgget ((key_t
) first
, second
);
240 return sys_msgctl (first
, second
,
241 (struct msqid_ds __user
*) ptr
);
251 ret
= do_shmat (first
, (char __user
*) ptr
,
255 return put_user (raddr
, (ulong __user
*) third
);
257 case 1: /* iBCS2 emulator entry point */
258 if (!segment_eq(get_fs(), get_ds()))
260 return do_shmat (first
, (char __user
*) ptr
,
261 second
, (ulong
*) third
);
264 return sys_shmdt ((char __user
*)ptr
);
266 return sys_shmget (first
, second
, third
);
268 return sys_shmctl (first
, second
,
269 (struct shmid_ds __user
*) ptr
);
277 asmlinkage
int sys_uname(struct old_utsname
* name
)
283 err
= copy_to_user(name
, utsname(), sizeof (*name
));
285 return err
?-EFAULT
:0;
289 * Do a system call from kernel instead of calling sys_execve so we
290 * end up with proper pt_regs.
292 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
294 register unsigned long __sc0
__asm__ ("r9") = ((0x13 << 16) | __NR_execve
);
295 register unsigned long __sc2
__asm__ ("r2") = (unsigned long) filename
;
296 register unsigned long __sc3
__asm__ ("r3") = (unsigned long) argv
;
297 register unsigned long __sc4
__asm__ ("r4") = (unsigned long) envp
;
298 __asm__
__volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)"
300 : "r" (__sc0
), "r" (__sc2
), "r" (__sc3
), "r" (__sc4
) );
301 __asm__
__volatile__ ("!dummy %0 %1 %2 %3"
302 : : "r" (__sc0
), "r" (__sc2
), "r" (__sc3
), "r" (__sc4
) : "memory");