3 * PARISC specific syscalls
5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
8 * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <asm/uaccess.h>
28 #include <linux/file.h>
30 #include <linux/linkage.h>
32 #include <linux/mman.h>
33 #include <linux/shm.h>
34 #include <linux/syscalls.h>
35 #include <linux/utsname.h>
36 #include <linux/personality.h>
37 #include <linux/random.h>
39 /* we construct an artificial offset for the mapping based on the physical
40 * address of the kernel mapping variable */
41 #define GET_LAST_MMAP(filp) \
42 (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
43 #define SET_LAST_MMAP(filp, val) \
46 static int get_offset(unsigned int last_mmap
)
48 return (last_mmap
& (SHM_COLOUR
-1)) >> PAGE_SHIFT
;
51 static unsigned long shared_align_offset(unsigned int last_mmap
,
54 return (get_offset(last_mmap
) + pgoff
) << PAGE_SHIFT
;
57 static inline unsigned long COLOR_ALIGN(unsigned long addr
,
58 unsigned int last_mmap
, unsigned long pgoff
)
60 unsigned long base
= (addr
+SHM_COLOUR
-1) & ~(SHM_COLOUR
-1);
61 unsigned long off
= (SHM_COLOUR
-1) &
62 (shared_align_offset(last_mmap
, pgoff
) << PAGE_SHIFT
);
68 * Top of mmap area (just below the process stack).
71 static unsigned long mmap_upper_limit(void)
73 unsigned long stack_base
;
75 /* Limit stack size - see setup_arg_pages() in fs/exec.c */
76 stack_base
= rlimit_max(RLIMIT_STACK
);
77 if (stack_base
> STACK_SIZE_MAX
)
78 stack_base
= STACK_SIZE_MAX
;
80 /* Add space for stack randomization. */
81 stack_base
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
83 return PAGE_ALIGN(STACK_TOP
- stack_base
);
87 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
88 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
90 struct mm_struct
*mm
= current
->mm
;
91 struct vm_area_struct
*vma
;
92 unsigned long task_size
= TASK_SIZE
;
93 int do_color_align
, last_mmap
;
94 struct vm_unmapped_area_info info
;
100 if (filp
|| (flags
& MAP_SHARED
))
102 last_mmap
= GET_LAST_MMAP(filp
);
104 if (flags
& MAP_FIXED
) {
105 if ((flags
& MAP_SHARED
) && last_mmap
&&
106 (addr
- shared_align_offset(last_mmap
, pgoff
))
113 if (do_color_align
&& last_mmap
)
114 addr
= COLOR_ALIGN(addr
, last_mmap
, pgoff
);
116 addr
= PAGE_ALIGN(addr
);
118 vma
= find_vma(mm
, addr
);
119 if (task_size
- len
>= addr
&&
120 (!vma
|| addr
+ len
<= vma
->vm_start
))
126 info
.low_limit
= mm
->mmap_legacy_base
;
127 info
.high_limit
= mmap_upper_limit();
128 info
.align_mask
= last_mmap
? (PAGE_MASK
& (SHM_COLOUR
- 1)) : 0;
129 info
.align_offset
= shared_align_offset(last_mmap
, pgoff
);
130 addr
= vm_unmapped_area(&info
);
133 if (do_color_align
&& !last_mmap
&& !(addr
& ~PAGE_MASK
))
134 SET_LAST_MMAP(filp
, addr
- (pgoff
<< PAGE_SHIFT
));
140 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
141 const unsigned long len
, const unsigned long pgoff
,
142 const unsigned long flags
)
144 struct vm_area_struct
*vma
;
145 struct mm_struct
*mm
= current
->mm
;
146 unsigned long addr
= addr0
;
147 int do_color_align
, last_mmap
;
148 struct vm_unmapped_area_info info
;
151 /* This should only ever run for 32-bit processes. */
152 BUG_ON(!test_thread_flag(TIF_32BIT
));
155 /* requested length too big for entire address space */
160 if (filp
|| (flags
& MAP_SHARED
))
162 last_mmap
= GET_LAST_MMAP(filp
);
164 if (flags
& MAP_FIXED
) {
165 if ((flags
& MAP_SHARED
) && last_mmap
&&
166 (addr
- shared_align_offset(last_mmap
, pgoff
))
172 /* requesting a specific address */
174 if (do_color_align
&& last_mmap
)
175 addr
= COLOR_ALIGN(addr
, last_mmap
, pgoff
);
177 addr
= PAGE_ALIGN(addr
);
178 vma
= find_vma(mm
, addr
);
179 if (TASK_SIZE
- len
>= addr
&&
180 (!vma
|| addr
+ len
<= vma
->vm_start
))
184 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
186 info
.low_limit
= PAGE_SIZE
;
187 info
.high_limit
= mm
->mmap_base
;
188 info
.align_mask
= last_mmap
? (PAGE_MASK
& (SHM_COLOUR
- 1)) : 0;
189 info
.align_offset
= shared_align_offset(last_mmap
, pgoff
);
190 addr
= vm_unmapped_area(&info
);
191 if (!(addr
& ~PAGE_MASK
))
193 VM_BUG_ON(addr
!= -ENOMEM
);
196 * A failed mmap() very likely causes application failure,
197 * so fall back to the bottom-up function here. This scenario
198 * can happen with large stack limits and large mmap()
201 return arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
204 if (do_color_align
&& !last_mmap
&& !(addr
& ~PAGE_MASK
))
205 SET_LAST_MMAP(filp
, addr
- (pgoff
<< PAGE_SHIFT
));
210 static int mmap_is_legacy(void)
212 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
215 /* parisc stack always grows up - so a unlimited stack should
216 * not be an indicator to use the legacy memory layout.
217 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
221 return sysctl_legacy_va_layout
;
224 static unsigned long mmap_rnd(void)
226 unsigned long rnd
= 0;
229 * 8 bits of randomness in 32bit mmaps, 20 address space bits
230 * 28 bits of randomness in 64bit mmaps, 40 address space bits
232 if (current
->flags
& PF_RANDOMIZE
) {
234 rnd
= get_random_int() % (1<<8);
236 rnd
= get_random_int() % (1<<28);
238 return rnd
<< PAGE_SHIFT
;
241 static unsigned long mmap_legacy_base(void)
243 return TASK_UNMAPPED_BASE
+ mmap_rnd();
247 * This function, called very early during the creation of a new
248 * process VM image, sets up which VM layout function to use:
250 void arch_pick_mmap_layout(struct mm_struct
*mm
)
252 mm
->mmap_legacy_base
= mmap_legacy_base();
253 mm
->mmap_base
= mmap_upper_limit();
255 if (mmap_is_legacy()) {
256 mm
->mmap_base
= mm
->mmap_legacy_base
;
257 mm
->get_unmapped_area
= arch_get_unmapped_area
;
259 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
264 asmlinkage
unsigned long sys_mmap2(unsigned long addr
, unsigned long len
,
265 unsigned long prot
, unsigned long flags
, unsigned long fd
,
268 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
270 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
,
271 pgoff
>> (PAGE_SHIFT
- 12));
274 asmlinkage
unsigned long sys_mmap(unsigned long addr
, unsigned long len
,
275 unsigned long prot
, unsigned long flags
, unsigned long fd
,
276 unsigned long offset
)
278 if (!(offset
& ~PAGE_MASK
)) {
279 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
,
280 offset
>> PAGE_SHIFT
);
286 /* Fucking broken ABI */
289 asmlinkage
long parisc_truncate64(const char __user
* path
,
290 unsigned int high
, unsigned int low
)
292 return sys_truncate(path
, (long)high
<< 32 | low
);
295 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
296 unsigned int high
, unsigned int low
)
298 return sys_ftruncate(fd
, (long)high
<< 32 | low
);
301 /* stubs for the benefit of the syscall_table since truncate64 and truncate
302 * are identical on LP64 */
303 asmlinkage
long sys_truncate64(const char __user
* path
, unsigned long length
)
305 return sys_truncate(path
, length
);
307 asmlinkage
long sys_ftruncate64(unsigned int fd
, unsigned long length
)
309 return sys_ftruncate(fd
, length
);
311 asmlinkage
long sys_fcntl64(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
313 return sys_fcntl(fd
, cmd
, arg
);
317 asmlinkage
long parisc_truncate64(const char __user
* path
,
318 unsigned int high
, unsigned int low
)
320 return sys_truncate64(path
, (loff_t
)high
<< 32 | low
);
323 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
324 unsigned int high
, unsigned int low
)
326 return sys_ftruncate64(fd
, (loff_t
)high
<< 32 | low
);
330 asmlinkage ssize_t
parisc_pread64(unsigned int fd
, char __user
*buf
, size_t count
,
331 unsigned int high
, unsigned int low
)
333 return sys_pread64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
336 asmlinkage ssize_t
parisc_pwrite64(unsigned int fd
, const char __user
*buf
,
337 size_t count
, unsigned int high
, unsigned int low
)
339 return sys_pwrite64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
342 asmlinkage ssize_t
parisc_readahead(int fd
, unsigned int high
, unsigned int low
,
345 return sys_readahead(fd
, (loff_t
)high
<< 32 | low
, count
);
348 asmlinkage
long parisc_fadvise64_64(int fd
,
349 unsigned int high_off
, unsigned int low_off
,
350 unsigned int high_len
, unsigned int low_len
, int advice
)
352 return sys_fadvise64_64(fd
, (loff_t
)high_off
<< 32 | low_off
,
353 (loff_t
)high_len
<< 32 | low_len
, advice
);
356 asmlinkage
long parisc_sync_file_range(int fd
,
357 u32 hi_off
, u32 lo_off
, u32 hi_nbytes
, u32 lo_nbytes
,
360 return sys_sync_file_range(fd
, (loff_t
)hi_off
<< 32 | lo_off
,
361 (loff_t
)hi_nbytes
<< 32 | lo_nbytes
, flags
);
364 asmlinkage
long parisc_fallocate(int fd
, int mode
, u32 offhi
, u32 offlo
,
365 u32 lenhi
, u32 lenlo
)
367 return sys_fallocate(fd
, mode
, ((u64
)offhi
<< 32) | offlo
,
368 ((u64
)lenhi
<< 32) | lenlo
);
371 asmlinkage
unsigned long sys_alloc_hugepages(int key
, unsigned long addr
, unsigned long len
, int prot
, int flag
)
376 asmlinkage
int sys_free_hugepages(unsigned long addr
)
381 long parisc_personality(unsigned long personality
)
385 if (personality(current
->personality
) == PER_LINUX32
386 && personality(personality
) == PER_LINUX
)
387 personality
= (personality
& ~PER_MASK
) | PER_LINUX32
;
389 err
= sys_personality(personality
);
390 if (personality(err
) == PER_LINUX32
)
391 err
= (err
& ~PER_MASK
) | PER_LINUX
;