3 * PARISC specific syscalls
5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <asm/uaccess.h>
26 #include <linux/file.h>
28 #include <linux/linkage.h>
30 #include <linux/mman.h>
31 #include <linux/shm.h>
32 #include <linux/syscalls.h>
33 #include <linux/utsname.h>
34 #include <linux/personality.h>
36 int sys_pipe(int __user
*fildes
)
43 if (copy_to_user(fildes
, fd
, 2*sizeof(int)))
49 static unsigned long get_unshared_area(unsigned long addr
, unsigned long len
)
51 struct vm_area_struct
*vma
;
53 addr
= PAGE_ALIGN(addr
);
55 for (vma
= find_vma(current
->mm
, addr
); ; vma
= vma
->vm_next
) {
56 /* At this point: (!vma || addr < vma->vm_end). */
57 if (TASK_SIZE
- len
< addr
)
59 if (!vma
|| addr
+ len
<= vma
->vm_start
)
65 #define DCACHE_ALIGN(addr) (((addr) + (SHMLBA - 1)) &~ (SHMLBA - 1))
68 * We need to know the offset to use. Old scheme was to look for
69 * existing mapping and use the same offset. New scheme is to use the
70 * address of the kernel data structure as the seed for the offset.
71 * We'll see how that works...
73 * The mapping is cacheline aligned, so there's no information in the bottom
74 * few bits of the address. We're looking for 10 bits (4MB / 4k), so let's
75 * drop the bottom 8 bits and use bits 8-17.
77 static int get_offset(struct address_space
*mapping
)
79 int offset
= (unsigned long) mapping
<< (PAGE_SHIFT
- 8);
80 return offset
& 0x3FF000;
83 static unsigned long get_shared_area(struct address_space
*mapping
,
84 unsigned long addr
, unsigned long len
, unsigned long pgoff
)
86 struct vm_area_struct
*vma
;
87 int offset
= mapping
? get_offset(mapping
) : 0;
89 addr
= DCACHE_ALIGN(addr
- offset
) + offset
;
91 for (vma
= find_vma(current
->mm
, addr
); ; vma
= vma
->vm_next
) {
92 /* At this point: (!vma || addr < vma->vm_end). */
93 if (TASK_SIZE
- len
< addr
)
95 if (!vma
|| addr
+ len
<= vma
->vm_start
)
97 addr
= DCACHE_ALIGN(vma
->vm_end
- offset
) + offset
;
98 if (addr
< vma
->vm_end
) /* handle wraparound */
103 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
104 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
108 /* Might want to check for cache aliasing issues for MAP_FIXED case
109 * like ARM or MIPS ??? --BenH.
111 if (flags
& MAP_FIXED
)
114 addr
= TASK_UNMAPPED_BASE
;
117 addr
= get_shared_area(filp
->f_mapping
, addr
, len
, pgoff
);
118 } else if(flags
& MAP_SHARED
) {
119 addr
= get_shared_area(NULL
, addr
, len
, pgoff
);
121 addr
= get_unshared_area(addr
, len
);
126 static unsigned long do_mmap2(unsigned long addr
, unsigned long len
,
127 unsigned long prot
, unsigned long flags
, unsigned long fd
,
130 struct file
* file
= NULL
;
131 unsigned long error
= -EBADF
;
132 if (!(flags
& MAP_ANONYMOUS
)) {
138 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
140 down_write(¤t
->mm
->mmap_sem
);
141 error
= do_mmap_pgoff(file
, addr
, len
, prot
, flags
, pgoff
);
142 up_write(¤t
->mm
->mmap_sem
);
150 asmlinkage
unsigned long sys_mmap2(unsigned long addr
, unsigned long len
,
151 unsigned long prot
, unsigned long flags
, unsigned long fd
,
154 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
156 return do_mmap2(addr
, len
, prot
, flags
, fd
, pgoff
>> (PAGE_SHIFT
- 12));
159 asmlinkage
unsigned long sys_mmap(unsigned long addr
, unsigned long len
,
160 unsigned long prot
, unsigned long flags
, unsigned long fd
,
161 unsigned long offset
)
163 if (!(offset
& ~PAGE_MASK
)) {
164 return do_mmap2(addr
, len
, prot
, flags
, fd
, offset
>> PAGE_SHIFT
);
170 /* Fucking broken ABI */
173 asmlinkage
long parisc_truncate64(const char __user
* path
,
174 unsigned int high
, unsigned int low
)
176 return sys_truncate(path
, (long)high
<< 32 | low
);
179 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
180 unsigned int high
, unsigned int low
)
182 return sys_ftruncate(fd
, (long)high
<< 32 | low
);
185 /* stubs for the benefit of the syscall_table since truncate64 and truncate
186 * are identical on LP64 */
187 asmlinkage
long sys_truncate64(const char __user
* path
, unsigned long length
)
189 return sys_truncate(path
, length
);
191 asmlinkage
long sys_ftruncate64(unsigned int fd
, unsigned long length
)
193 return sys_ftruncate(fd
, length
);
195 asmlinkage
long sys_fcntl64(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
197 return sys_fcntl(fd
, cmd
, arg
);
201 asmlinkage
long parisc_truncate64(const char __user
* path
,
202 unsigned int high
, unsigned int low
)
204 return sys_truncate64(path
, (loff_t
)high
<< 32 | low
);
207 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
208 unsigned int high
, unsigned int low
)
210 return sys_ftruncate64(fd
, (loff_t
)high
<< 32 | low
);
214 asmlinkage ssize_t
parisc_pread64(unsigned int fd
, char __user
*buf
, size_t count
,
215 unsigned int high
, unsigned int low
)
217 return sys_pread64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
220 asmlinkage ssize_t
parisc_pwrite64(unsigned int fd
, const char __user
*buf
,
221 size_t count
, unsigned int high
, unsigned int low
)
223 return sys_pwrite64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
226 asmlinkage ssize_t
parisc_readahead(int fd
, unsigned int high
, unsigned int low
,
229 return sys_readahead(fd
, (loff_t
)high
<< 32 | low
, count
);
232 asmlinkage
long parisc_fadvise64_64(int fd
,
233 unsigned int high_off
, unsigned int low_off
,
234 unsigned int high_len
, unsigned int low_len
, int advice
)
236 return sys_fadvise64_64(fd
, (loff_t
)high_off
<< 32 | low_off
,
237 (loff_t
)high_len
<< 32 | low_len
, advice
);
240 asmlinkage
long parisc_sync_file_range(int fd
,
241 u32 hi_off
, u32 lo_off
, u32 hi_nbytes
, u32 lo_nbytes
,
244 return sys_sync_file_range(fd
, (loff_t
)hi_off
<< 32 | lo_off
,
245 (loff_t
)hi_nbytes
<< 32 | lo_nbytes
, flags
);
248 asmlinkage
unsigned long sys_alloc_hugepages(int key
, unsigned long addr
, unsigned long len
, int prot
, int flag
)
253 asmlinkage
int sys_free_hugepages(unsigned long addr
)
258 long parisc_personality(unsigned long personality
)
262 if (personality(current
->personality
) == PER_LINUX32
263 && personality
== PER_LINUX
)
264 personality
= PER_LINUX32
;
266 err
= sys_personality(personality
);
267 if (err
== PER_LINUX32
)
273 long parisc_newuname(struct new_utsname __user
*name
)
275 int err
= sys_newuname(name
);
278 if (!err
&& personality(current
->personality
) == PER_LINUX32
) {
279 if (__put_user(0, name
->machine
+ 6) ||
280 __put_user(0, name
->machine
+ 7))