3 * PARISC specific syscalls
5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
7 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
8 * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/uaccess.h>
28 #include <linux/file.h>
30 #include <linux/linkage.h>
32 #include <linux/mman.h>
33 #include <linux/sched/signal.h>
34 #include <linux/sched/mm.h>
35 #include <linux/shm.h>
36 #include <linux/syscalls.h>
37 #include <linux/utsname.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
41 /* we construct an artificial offset for the mapping based on the physical
42 * address of the kernel mapping variable */
43 #define GET_LAST_MMAP(filp) \
44 (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
45 #define SET_LAST_MMAP(filp, val) \
48 static int get_offset(unsigned int last_mmap
)
50 return (last_mmap
& (SHM_COLOUR
-1)) >> PAGE_SHIFT
;
53 static unsigned long shared_align_offset(unsigned int last_mmap
,
56 return (get_offset(last_mmap
) + pgoff
) << PAGE_SHIFT
;
59 static inline unsigned long COLOR_ALIGN(unsigned long addr
,
60 unsigned int last_mmap
, unsigned long pgoff
)
62 unsigned long base
= (addr
+SHM_COLOUR
-1) & ~(SHM_COLOUR
-1);
63 unsigned long off
= (SHM_COLOUR
-1) &
64 (shared_align_offset(last_mmap
, pgoff
) << PAGE_SHIFT
);
70 * Top of mmap area (just below the process stack).
73 static unsigned long mmap_upper_limit(void)
75 unsigned long stack_base
;
77 /* Limit stack size - see setup_arg_pages() in fs/exec.c */
78 stack_base
= rlimit_max(RLIMIT_STACK
);
79 if (stack_base
> STACK_SIZE_MAX
)
80 stack_base
= STACK_SIZE_MAX
;
82 /* Add space for stack randomization. */
83 stack_base
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
85 return PAGE_ALIGN(STACK_TOP
- stack_base
);
89 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
90 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
92 struct mm_struct
*mm
= current
->mm
;
93 struct vm_area_struct
*vma
, *prev
;
94 unsigned long task_size
= TASK_SIZE
;
95 int do_color_align
, last_mmap
;
96 struct vm_unmapped_area_info info
;
102 if (filp
|| (flags
& MAP_SHARED
))
104 last_mmap
= GET_LAST_MMAP(filp
);
106 if (flags
& MAP_FIXED
) {
107 if ((flags
& MAP_SHARED
) && last_mmap
&&
108 (addr
- shared_align_offset(last_mmap
, pgoff
))
115 if (do_color_align
&& last_mmap
)
116 addr
= COLOR_ALIGN(addr
, last_mmap
, pgoff
);
118 addr
= PAGE_ALIGN(addr
);
120 vma
= find_vma_prev(mm
, addr
, &prev
);
121 if (task_size
- len
>= addr
&&
122 (!vma
|| addr
+ len
<= vm_start_gap(vma
)) &&
123 (!prev
|| addr
>= vm_end_gap(prev
)))
129 info
.low_limit
= mm
->mmap_legacy_base
;
130 info
.high_limit
= mmap_upper_limit();
131 info
.align_mask
= last_mmap
? (PAGE_MASK
& (SHM_COLOUR
- 1)) : 0;
132 info
.align_offset
= shared_align_offset(last_mmap
, pgoff
);
133 addr
= vm_unmapped_area(&info
);
136 if (do_color_align
&& !last_mmap
&& !(addr
& ~PAGE_MASK
))
137 SET_LAST_MMAP(filp
, addr
- (pgoff
<< PAGE_SHIFT
));
143 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
144 const unsigned long len
, const unsigned long pgoff
,
145 const unsigned long flags
)
147 struct vm_area_struct
*vma
, *prev
;
148 struct mm_struct
*mm
= current
->mm
;
149 unsigned long addr
= addr0
;
150 int do_color_align
, last_mmap
;
151 struct vm_unmapped_area_info info
;
154 /* This should only ever run for 32-bit processes. */
155 BUG_ON(!test_thread_flag(TIF_32BIT
));
158 /* requested length too big for entire address space */
163 if (filp
|| (flags
& MAP_SHARED
))
165 last_mmap
= GET_LAST_MMAP(filp
);
167 if (flags
& MAP_FIXED
) {
168 if ((flags
& MAP_SHARED
) && last_mmap
&&
169 (addr
- shared_align_offset(last_mmap
, pgoff
))
175 /* requesting a specific address */
177 if (do_color_align
&& last_mmap
)
178 addr
= COLOR_ALIGN(addr
, last_mmap
, pgoff
);
180 addr
= PAGE_ALIGN(addr
);
182 vma
= find_vma_prev(mm
, addr
, &prev
);
183 if (TASK_SIZE
- len
>= addr
&&
184 (!vma
|| addr
+ len
<= vm_start_gap(vma
)) &&
185 (!prev
|| addr
>= vm_end_gap(prev
)))
189 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
191 info
.low_limit
= PAGE_SIZE
;
192 info
.high_limit
= mm
->mmap_base
;
193 info
.align_mask
= last_mmap
? (PAGE_MASK
& (SHM_COLOUR
- 1)) : 0;
194 info
.align_offset
= shared_align_offset(last_mmap
, pgoff
);
195 addr
= vm_unmapped_area(&info
);
196 if (!(addr
& ~PAGE_MASK
))
198 VM_BUG_ON(addr
!= -ENOMEM
);
201 * A failed mmap() very likely causes application failure,
202 * so fall back to the bottom-up function here. This scenario
203 * can happen with large stack limits and large mmap()
206 return arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
209 if (do_color_align
&& !last_mmap
&& !(addr
& ~PAGE_MASK
))
210 SET_LAST_MMAP(filp
, addr
- (pgoff
<< PAGE_SHIFT
));
215 static int mmap_is_legacy(void)
217 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
220 /* parisc stack always grows up - so a unlimited stack should
221 * not be an indicator to use the legacy memory layout.
222 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
226 return sysctl_legacy_va_layout
;
229 static unsigned long mmap_rnd(void)
231 unsigned long rnd
= 0;
233 if (current
->flags
& PF_RANDOMIZE
)
234 rnd
= get_random_int() & MMAP_RND_MASK
;
236 return rnd
<< PAGE_SHIFT
;
239 unsigned long arch_mmap_rnd(void)
241 return (get_random_int() & MMAP_RND_MASK
) << PAGE_SHIFT
;
244 static unsigned long mmap_legacy_base(void)
246 return TASK_UNMAPPED_BASE
+ mmap_rnd();
250 * This function, called very early during the creation of a new
251 * process VM image, sets up which VM layout function to use:
253 void arch_pick_mmap_layout(struct mm_struct
*mm
)
255 mm
->mmap_legacy_base
= mmap_legacy_base();
256 mm
->mmap_base
= mmap_upper_limit();
258 if (mmap_is_legacy()) {
259 mm
->mmap_base
= mm
->mmap_legacy_base
;
260 mm
->get_unmapped_area
= arch_get_unmapped_area
;
262 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
267 asmlinkage
unsigned long sys_mmap2(unsigned long addr
, unsigned long len
,
268 unsigned long prot
, unsigned long flags
, unsigned long fd
,
271 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
273 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
,
274 pgoff
>> (PAGE_SHIFT
- 12));
277 asmlinkage
unsigned long sys_mmap(unsigned long addr
, unsigned long len
,
278 unsigned long prot
, unsigned long flags
, unsigned long fd
,
279 unsigned long offset
)
281 if (!(offset
& ~PAGE_MASK
)) {
282 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
,
283 offset
>> PAGE_SHIFT
);
289 /* Fucking broken ABI */
292 asmlinkage
long parisc_truncate64(const char __user
* path
,
293 unsigned int high
, unsigned int low
)
295 return sys_truncate(path
, (long)high
<< 32 | low
);
298 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
299 unsigned int high
, unsigned int low
)
301 return sys_ftruncate(fd
, (long)high
<< 32 | low
);
304 /* stubs for the benefit of the syscall_table since truncate64 and truncate
305 * are identical on LP64 */
306 asmlinkage
long sys_truncate64(const char __user
* path
, unsigned long length
)
308 return sys_truncate(path
, length
);
310 asmlinkage
long sys_ftruncate64(unsigned int fd
, unsigned long length
)
312 return sys_ftruncate(fd
, length
);
314 asmlinkage
long sys_fcntl64(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
316 return sys_fcntl(fd
, cmd
, arg
);
320 asmlinkage
long parisc_truncate64(const char __user
* path
,
321 unsigned int high
, unsigned int low
)
323 return sys_truncate64(path
, (loff_t
)high
<< 32 | low
);
326 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
327 unsigned int high
, unsigned int low
)
329 return sys_ftruncate64(fd
, (loff_t
)high
<< 32 | low
);
333 asmlinkage ssize_t
parisc_pread64(unsigned int fd
, char __user
*buf
, size_t count
,
334 unsigned int high
, unsigned int low
)
336 return sys_pread64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
339 asmlinkage ssize_t
parisc_pwrite64(unsigned int fd
, const char __user
*buf
,
340 size_t count
, unsigned int high
, unsigned int low
)
342 return sys_pwrite64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
345 asmlinkage ssize_t
parisc_readahead(int fd
, unsigned int high
, unsigned int low
,
348 return sys_readahead(fd
, (loff_t
)high
<< 32 | low
, count
);
351 asmlinkage
long parisc_fadvise64_64(int fd
,
352 unsigned int high_off
, unsigned int low_off
,
353 unsigned int high_len
, unsigned int low_len
, int advice
)
355 return sys_fadvise64_64(fd
, (loff_t
)high_off
<< 32 | low_off
,
356 (loff_t
)high_len
<< 32 | low_len
, advice
);
359 asmlinkage
long parisc_sync_file_range(int fd
,
360 u32 hi_off
, u32 lo_off
, u32 hi_nbytes
, u32 lo_nbytes
,
363 return sys_sync_file_range(fd
, (loff_t
)hi_off
<< 32 | lo_off
,
364 (loff_t
)hi_nbytes
<< 32 | lo_nbytes
, flags
);
367 asmlinkage
long parisc_fallocate(int fd
, int mode
, u32 offhi
, u32 offlo
,
368 u32 lenhi
, u32 lenlo
)
370 return sys_fallocate(fd
, mode
, ((u64
)offhi
<< 32) | offlo
,
371 ((u64
)lenhi
<< 32) | lenlo
);
374 long parisc_personality(unsigned long personality
)
378 if (personality(current
->personality
) == PER_LINUX32
379 && personality(personality
) == PER_LINUX
)
380 personality
= (personality
& ~PER_MASK
) | PER_LINUX32
;
382 err
= sys_personality(personality
);
383 if (personality(err
) == PER_LINUX32
)
384 err
= (err
& ~PER_MASK
) | PER_LINUX
;