2 * linux/arch/sh/kernel/sys_sh.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/SuperH
8 * Taken from i386 version.
11 #include <linux/errno.h>
12 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
25 #include <asm/uaccess.h>
29 * sys_pipe() is the normal C calling standard for creating
30 * a pipe. It's not the way Unix traditionally does this, though.
32 asmlinkage
int sys_pipe(unsigned long r4
, unsigned long r5
,
33 unsigned long r6
, unsigned long r7
,
47 #if defined(HAVE_ARCH_UNMAPPED_AREA)
49 * To avoid cache alias, we map the shard page with same color.
51 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
53 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
54 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
56 struct mm_struct
*mm
= current
->mm
;
57 struct vm_area_struct
*vma
;
58 unsigned long start_addr
;
60 if (flags
& MAP_FIXED
) {
61 /* We do not accept a shared mapping if it would violate
62 * cache aliasing constraints.
64 if ((flags
& MAP_SHARED
) && (addr
& (SHMLBA
- 1)))
73 if (flags
& MAP_PRIVATE
)
74 addr
= PAGE_ALIGN(addr
);
76 addr
= COLOUR_ALIGN(addr
);
77 vma
= find_vma(mm
, addr
);
78 if (TASK_SIZE
- len
>= addr
&&
79 (!vma
|| addr
+ len
<= vma
->vm_start
))
82 if (flags
& MAP_PRIVATE
)
83 addr
= PAGE_ALIGN(mm
->free_area_cache
);
85 addr
= COLOUR_ALIGN(mm
->free_area_cache
);
89 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
90 /* At this point: (!vma || addr < vma->vm_end). */
91 if (TASK_SIZE
- len
< addr
) {
93 * Start a new search - just in case we missed
96 if (start_addr
!= TASK_UNMAPPED_BASE
) {
97 start_addr
= addr
= TASK_UNMAPPED_BASE
;
102 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
104 * Remember the place where we stopped the search:
106 mm
->free_area_cache
= addr
+ len
;
110 if (!(flags
& MAP_PRIVATE
))
111 addr
= COLOUR_ALIGN(addr
);
117 do_mmap2(unsigned long addr
, unsigned long len
, unsigned long prot
,
118 unsigned long flags
, int fd
, unsigned long pgoff
)
121 struct file
*file
= NULL
;
123 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
124 if (!(flags
& MAP_ANONYMOUS
)) {
130 down_write(¤t
->mm
->mmap_sem
);
131 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
132 up_write(¤t
->mm
->mmap_sem
);
140 asmlinkage
int old_mmap(unsigned long addr
, unsigned long len
,
141 unsigned long prot
, unsigned long flags
,
142 int fd
, unsigned long off
)
144 if (off
& ~PAGE_MASK
)
146 return do_mmap2(addr
, len
, prot
, flags
, fd
, off
>>PAGE_SHIFT
);
149 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
150 unsigned long prot
, unsigned long flags
,
151 unsigned long fd
, unsigned long pgoff
)
153 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
);
157 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
159 * This is really horribly ugly.
161 asmlinkage
int sys_ipc(uint call
, int first
, int second
,
162 int third
, void __user
*ptr
, long fifth
)
166 version
= call
>> 16; /* hack for backward compatibility */
172 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
175 return sys_semtimedop(first
, (struct sembuf __user
*)ptr
,
177 (const struct timespec __user
*)fifth
);
179 return sys_semget (first
, second
, third
);
184 if (get_user(fourth
.__pad
, (void * __user
*) ptr
))
186 return sys_semctl (first
, second
, third
, fourth
);
195 return sys_msgsnd (first
, (struct msgbuf __user
*) ptr
,
200 struct ipc_kludge tmp
;
204 if (copy_from_user(&tmp
,
205 (struct ipc_kludge __user
*) ptr
,
208 return sys_msgrcv (first
, tmp
.msgp
, second
,
212 return sys_msgrcv (first
,
213 (struct msgbuf __user
*) ptr
,
214 second
, fifth
, third
);
217 return sys_msgget ((key_t
) first
, second
);
219 return sys_msgctl (first
, second
,
220 (struct msqid_ds __user
*) ptr
);
230 ret
= do_shmat (first
, (char __user
*) ptr
,
234 return put_user (raddr
, (ulong __user
*) third
);
236 case 1: /* iBCS2 emulator entry point */
237 if (!segment_eq(get_fs(), get_ds()))
239 return do_shmat (first
, (char __user
*) ptr
,
240 second
, (ulong
*) third
);
243 return sys_shmdt ((char __user
*)ptr
);
245 return sys_shmget (first
, second
, third
);
247 return sys_shmctl (first
, second
,
248 (struct shmid_ds __user
*) ptr
);
256 asmlinkage
int sys_uname(struct old_utsname
* name
)
262 err
=copy_to_user(name
, &system_utsname
, sizeof (*name
));
264 return err
?-EFAULT
:0;
267 asmlinkage ssize_t
sys_pread_wrapper(unsigned int fd
, char * buf
,
268 size_t count
, long dummy
, loff_t pos
)
270 return sys_pread64(fd
, buf
, count
, pos
);
273 asmlinkage ssize_t
sys_pwrite_wrapper(unsigned int fd
, const char * buf
,
274 size_t count
, long dummy
, loff_t pos
)
276 return sys_pwrite64(fd
, buf
, count
, pos
);
279 asmlinkage
int sys_fadvise64_64_wrapper(int fd
, u32 offset0
, u32 offset1
,
280 u32 len0
, u32 len1
, int advice
)
282 #ifdef __LITTLE_ENDIAN__
283 return sys_fadvise64_64(fd
, (u64
)offset1
<< 32 | offset0
,
284 (u64
)len1
<< 32 | len0
, advice
);
286 return sys_fadvise64_64(fd
, (u64
)offset0
<< 32 | offset1
,
287 (u64
)len0
<< 32 | len1
, advice
);