2 * linux/arch/sh/kernel/sys_sh.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/SuperH
8 * Taken from i386 version.
11 #include <linux/errno.h>
12 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/file.h>
22 #include <linux/utsname.h>
23 #include <linux/module.h>
25 #include <linux/ipc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/uaccess.h>
28 #include <asm/unistd.h>
31 * sys_pipe() is the normal C calling standard for creating
32 * a pipe. It's not the way Unix traditionally does this, though.
34 asmlinkage
int sys_pipe(unsigned long r4
, unsigned long r5
,
35 unsigned long r6
, unsigned long r7
,
36 struct pt_regs __regs
)
38 struct pt_regs
*regs
= RELOC_HIDE(&__regs
, 0);
44 regs
->regs
[1] = fd
[1];
50 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
52 EXPORT_SYMBOL(shm_align_mask
);
56 * To avoid cache aliases, we map the shared page with same color.
58 #define COLOUR_ALIGN(addr, pgoff) \
59 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
60 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
62 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
63 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
65 struct mm_struct
*mm
= current
->mm
;
66 struct vm_area_struct
*vma
;
67 unsigned long start_addr
;
70 if (flags
& MAP_FIXED
) {
71 /* We do not accept a shared mapping if it would violate
72 * cache aliasing constraints.
74 if ((flags
& MAP_SHARED
) && (addr
& shm_align_mask
))
79 if (unlikely(len
> TASK_SIZE
))
83 if (filp
|| (flags
& MAP_SHARED
))
88 addr
= COLOUR_ALIGN(addr
, pgoff
);
90 addr
= PAGE_ALIGN(addr
);
92 vma
= find_vma(mm
, addr
);
93 if (TASK_SIZE
- len
>= addr
&&
94 (!vma
|| addr
+ len
<= vma
->vm_start
))
98 if (len
> mm
->cached_hole_size
) {
99 start_addr
= addr
= mm
->free_area_cache
;
101 mm
->cached_hole_size
= 0;
102 start_addr
= addr
= TASK_UNMAPPED_BASE
;
107 addr
= COLOUR_ALIGN(addr
, pgoff
);
109 addr
= PAGE_ALIGN(mm
->free_area_cache
);
111 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
112 /* At this point: (!vma || addr < vma->vm_end). */
113 if (unlikely(TASK_SIZE
- len
< addr
)) {
115 * Start a new search - just in case we missed
118 if (start_addr
!= TASK_UNMAPPED_BASE
) {
119 start_addr
= addr
= TASK_UNMAPPED_BASE
;
120 mm
->cached_hole_size
= 0;
125 if (likely(!vma
|| addr
+ len
<= vma
->vm_start
)) {
127 * Remember the place where we stopped the search:
129 mm
->free_area_cache
= addr
+ len
;
132 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
133 mm
->cached_hole_size
= vma
->vm_start
- addr
;
137 addr
= COLOUR_ALIGN(addr
, pgoff
);
140 #endif /* CONFIG_MMU */
143 do_mmap2(unsigned long addr
, unsigned long len
, unsigned long prot
,
144 unsigned long flags
, int fd
, unsigned long pgoff
)
147 struct file
*file
= NULL
;
149 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
150 if (!(flags
& MAP_ANONYMOUS
)) {
156 down_write(¤t
->mm
->mmap_sem
);
157 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
158 up_write(¤t
->mm
->mmap_sem
);
166 asmlinkage
int old_mmap(unsigned long addr
, unsigned long len
,
167 unsigned long prot
, unsigned long flags
,
168 int fd
, unsigned long off
)
170 if (off
& ~PAGE_MASK
)
172 return do_mmap2(addr
, len
, prot
, flags
, fd
, off
>>PAGE_SHIFT
);
175 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
176 unsigned long prot
, unsigned long flags
,
177 unsigned long fd
, unsigned long pgoff
)
179 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
);
183 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
185 * This is really horribly ugly.
187 asmlinkage
int sys_ipc(uint call
, int first
, int second
,
188 int third
, void __user
*ptr
, long fifth
)
192 version
= call
>> 16; /* hack for backward compatibility */
198 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
201 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
203 (const struct timespec __user
*)fifth
);
205 return sys_semget (first
, second
, third
);
210 if (get_user(fourth
.__pad
, (void * __user
*) ptr
))
212 return sys_semctl (first
, second
, third
, fourth
);
221 return sys_msgsnd (first
, (struct msgbuf __user
*) ptr
,
226 struct ipc_kludge tmp
;
230 if (copy_from_user(&tmp
,
231 (struct ipc_kludge __user
*) ptr
,
234 return sys_msgrcv (first
, tmp
.msgp
, second
,
238 return sys_msgrcv (first
,
239 (struct msgbuf __user
*) ptr
,
240 second
, fifth
, third
);
243 return sys_msgget ((key_t
) first
, second
);
245 return sys_msgctl (first
, second
,
246 (struct msqid_ds __user
*) ptr
);
256 ret
= do_shmat (first
, (char __user
*) ptr
,
260 return put_user (raddr
, (ulong __user
*) third
);
262 case 1: /* iBCS2 emulator entry point */
263 if (!segment_eq(get_fs(), get_ds()))
265 return do_shmat (first
, (char __user
*) ptr
,
266 second
, (ulong
*) third
);
269 return sys_shmdt ((char __user
*)ptr
);
271 return sys_shmget (first
, second
, third
);
273 return sys_shmctl (first
, second
,
274 (struct shmid_ds __user
*) ptr
);
282 asmlinkage
int sys_uname(struct old_utsname
* name
)
288 err
= copy_to_user(name
, utsname(), sizeof (*name
));
290 return err
?-EFAULT
:0;
293 asmlinkage ssize_t
sys_pread_wrapper(unsigned int fd
, char * buf
,
294 size_t count
, long dummy
, loff_t pos
)
296 return sys_pread64(fd
, buf
, count
, pos
);
299 asmlinkage ssize_t
sys_pwrite_wrapper(unsigned int fd
, const char * buf
,
300 size_t count
, long dummy
, loff_t pos
)
302 return sys_pwrite64(fd
, buf
, count
, pos
);
305 asmlinkage
int sys_fadvise64_64_wrapper(int fd
, u32 offset0
, u32 offset1
,
306 u32 len0
, u32 len1
, int advice
)
308 #ifdef __LITTLE_ENDIAN__
309 return sys_fadvise64_64(fd
, (u64
)offset1
<< 32 | offset0
,
310 (u64
)len1
<< 32 | len0
, advice
);
312 return sys_fadvise64_64(fd
, (u64
)offset0
<< 32 | offset1
,
313 (u64
)len0
<< 32 | len1
, advice
);
317 #if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A)
318 #define SYSCALL_ARG3 "trapa #0x23"
320 #define SYSCALL_ARG3 "trapa #0x13"
324 * Do a system call from kernel instead of calling sys_execve so we
325 * end up with proper pt_regs.
327 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
329 register long __sc0
__asm__ ("r3") = __NR_execve
;
330 register long __sc4
__asm__ ("r4") = (long) filename
;
331 register long __sc5
__asm__ ("r5") = (long) argv
;
332 register long __sc6
__asm__ ("r6") = (long) envp
;
333 __asm__
__volatile__ (SYSCALL_ARG3
: "=z" (__sc0
)
334 : "0" (__sc0
), "r" (__sc4
), "r" (__sc5
), "r" (__sc6
)