1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * PARISC specific syscalls
6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
8 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
9 * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
12 #include <linux/uaccess.h>
14 #include <linux/file.h>
16 #include <linux/linkage.h>
18 #include <linux/mman.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/mm.h>
21 #include <linux/shm.h>
22 #include <linux/syscalls.h>
23 #include <linux/utsname.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
27 /* we construct an artificial offset for the mapping based on the physical
28 * address of the kernel mapping variable */
29 #define GET_LAST_MMAP(filp) \
30 (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
31 #define SET_LAST_MMAP(filp, val) \
34 static int get_offset(unsigned int last_mmap
)
36 return (last_mmap
& (SHM_COLOUR
-1)) >> PAGE_SHIFT
;
39 static unsigned long shared_align_offset(unsigned int last_mmap
,
42 return (get_offset(last_mmap
) + pgoff
) << PAGE_SHIFT
;
45 static inline unsigned long COLOR_ALIGN(unsigned long addr
,
46 unsigned int last_mmap
, unsigned long pgoff
)
48 unsigned long base
= (addr
+SHM_COLOUR
-1) & ~(SHM_COLOUR
-1);
49 unsigned long off
= (SHM_COLOUR
-1) &
50 (shared_align_offset(last_mmap
, pgoff
) << PAGE_SHIFT
);
56 * Top of mmap area (just below the process stack).
60 * When called from arch_get_unmapped_area(), rlim_stack will be NULL,
61 * indicating that "current" should be used instead of a passed-in
62 * value from the exec bprm as done with arch_pick_mmap_layout().
64 static unsigned long mmap_upper_limit(struct rlimit
*rlim_stack
)
66 unsigned long stack_base
;
68 /* Limit stack size - see setup_arg_pages() in fs/exec.c */
69 stack_base
= rlim_stack
? rlim_stack
->rlim_max
70 : rlimit_max(RLIMIT_STACK
);
71 if (stack_base
> STACK_SIZE_MAX
)
72 stack_base
= STACK_SIZE_MAX
;
74 /* Add space for stack randomization. */
75 if (current
->flags
& PF_RANDOMIZE
)
76 stack_base
+= (STACK_RND_MASK
<< PAGE_SHIFT
);
78 return PAGE_ALIGN(STACK_TOP
- stack_base
);
82 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
83 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
85 struct mm_struct
*mm
= current
->mm
;
86 struct vm_area_struct
*vma
, *prev
;
87 unsigned long task_size
= TASK_SIZE
;
88 int do_color_align
, last_mmap
;
89 struct vm_unmapped_area_info info
;
95 if (filp
|| (flags
& MAP_SHARED
))
97 last_mmap
= GET_LAST_MMAP(filp
);
99 if (flags
& MAP_FIXED
) {
100 if ((flags
& MAP_SHARED
) && last_mmap
&&
101 (addr
- shared_align_offset(last_mmap
, pgoff
))
108 if (do_color_align
&& last_mmap
)
109 addr
= COLOR_ALIGN(addr
, last_mmap
, pgoff
);
111 addr
= PAGE_ALIGN(addr
);
113 vma
= find_vma_prev(mm
, addr
, &prev
);
114 if (task_size
- len
>= addr
&&
115 (!vma
|| addr
+ len
<= vm_start_gap(vma
)) &&
116 (!prev
|| addr
>= vm_end_gap(prev
)))
122 info
.low_limit
= mm
->mmap_legacy_base
;
123 info
.high_limit
= mmap_upper_limit(NULL
);
124 info
.align_mask
= last_mmap
? (PAGE_MASK
& (SHM_COLOUR
- 1)) : 0;
125 info
.align_offset
= shared_align_offset(last_mmap
, pgoff
);
126 addr
= vm_unmapped_area(&info
);
129 if (do_color_align
&& !last_mmap
&& !(addr
& ~PAGE_MASK
))
130 SET_LAST_MMAP(filp
, addr
- (pgoff
<< PAGE_SHIFT
));
136 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
137 const unsigned long len
, const unsigned long pgoff
,
138 const unsigned long flags
)
140 struct vm_area_struct
*vma
, *prev
;
141 struct mm_struct
*mm
= current
->mm
;
142 unsigned long addr
= addr0
;
143 int do_color_align
, last_mmap
;
144 struct vm_unmapped_area_info info
;
146 /* requested length too big for entire address space */
151 if (filp
|| (flags
& MAP_SHARED
))
153 last_mmap
= GET_LAST_MMAP(filp
);
155 if (flags
& MAP_FIXED
) {
156 if ((flags
& MAP_SHARED
) && last_mmap
&&
157 (addr
- shared_align_offset(last_mmap
, pgoff
))
163 /* requesting a specific address */
165 if (do_color_align
&& last_mmap
)
166 addr
= COLOR_ALIGN(addr
, last_mmap
, pgoff
);
168 addr
= PAGE_ALIGN(addr
);
170 vma
= find_vma_prev(mm
, addr
, &prev
);
171 if (TASK_SIZE
- len
>= addr
&&
172 (!vma
|| addr
+ len
<= vm_start_gap(vma
)) &&
173 (!prev
|| addr
>= vm_end_gap(prev
)))
177 info
.flags
= VM_UNMAPPED_AREA_TOPDOWN
;
179 info
.low_limit
= PAGE_SIZE
;
180 info
.high_limit
= mm
->mmap_base
;
181 info
.align_mask
= last_mmap
? (PAGE_MASK
& (SHM_COLOUR
- 1)) : 0;
182 info
.align_offset
= shared_align_offset(last_mmap
, pgoff
);
183 addr
= vm_unmapped_area(&info
);
184 if (!(addr
& ~PAGE_MASK
))
186 VM_BUG_ON(addr
!= -ENOMEM
);
189 * A failed mmap() very likely causes application failure,
190 * so fall back to the bottom-up function here. This scenario
191 * can happen with large stack limits and large mmap()
194 return arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
197 if (do_color_align
&& !last_mmap
&& !(addr
& ~PAGE_MASK
))
198 SET_LAST_MMAP(filp
, addr
- (pgoff
<< PAGE_SHIFT
));
203 static int mmap_is_legacy(void)
205 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
208 /* parisc stack always grows up - so a unlimited stack should
209 * not be an indicator to use the legacy memory layout.
210 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
214 return sysctl_legacy_va_layout
;
217 static unsigned long mmap_rnd(void)
219 unsigned long rnd
= 0;
221 if (current
->flags
& PF_RANDOMIZE
)
222 rnd
= get_random_int() & MMAP_RND_MASK
;
224 return rnd
<< PAGE_SHIFT
;
227 unsigned long arch_mmap_rnd(void)
229 return (get_random_int() & MMAP_RND_MASK
) << PAGE_SHIFT
;
232 static unsigned long mmap_legacy_base(void)
234 return TASK_UNMAPPED_BASE
+ mmap_rnd();
238 * This function, called very early during the creation of a new
239 * process VM image, sets up which VM layout function to use:
241 void arch_pick_mmap_layout(struct mm_struct
*mm
, struct rlimit
*rlim_stack
)
243 mm
->mmap_legacy_base
= mmap_legacy_base();
244 mm
->mmap_base
= mmap_upper_limit(rlim_stack
);
246 if (mmap_is_legacy()) {
247 mm
->mmap_base
= mm
->mmap_legacy_base
;
248 mm
->get_unmapped_area
= arch_get_unmapped_area
;
250 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
255 asmlinkage
unsigned long sys_mmap2(unsigned long addr
, unsigned long len
,
256 unsigned long prot
, unsigned long flags
, unsigned long fd
,
259 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
261 return ksys_mmap_pgoff(addr
, len
, prot
, flags
, fd
,
262 pgoff
>> (PAGE_SHIFT
- 12));
265 asmlinkage
unsigned long sys_mmap(unsigned long addr
, unsigned long len
,
266 unsigned long prot
, unsigned long flags
, unsigned long fd
,
267 unsigned long offset
)
269 if (!(offset
& ~PAGE_MASK
)) {
270 return ksys_mmap_pgoff(addr
, len
, prot
, flags
, fd
,
271 offset
>> PAGE_SHIFT
);
277 /* Fucking broken ABI */
280 asmlinkage
long parisc_truncate64(const char __user
* path
,
281 unsigned int high
, unsigned int low
)
283 return ksys_truncate(path
, (long)high
<< 32 | low
);
286 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
287 unsigned int high
, unsigned int low
)
289 return ksys_ftruncate(fd
, (long)high
<< 32 | low
);
292 /* stubs for the benefit of the syscall_table since truncate64 and truncate
293 * are identical on LP64 */
294 asmlinkage
long sys_truncate64(const char __user
* path
, unsigned long length
)
296 return ksys_truncate(path
, length
);
298 asmlinkage
long sys_ftruncate64(unsigned int fd
, unsigned long length
)
300 return ksys_ftruncate(fd
, length
);
302 asmlinkage
long sys_fcntl64(unsigned int fd
, unsigned int cmd
, unsigned long arg
)
304 return sys_fcntl(fd
, cmd
, arg
);
308 asmlinkage
long parisc_truncate64(const char __user
* path
,
309 unsigned int high
, unsigned int low
)
311 return ksys_truncate(path
, (loff_t
)high
<< 32 | low
);
314 asmlinkage
long parisc_ftruncate64(unsigned int fd
,
315 unsigned int high
, unsigned int low
)
317 return sys_ftruncate64(fd
, (loff_t
)high
<< 32 | low
);
321 asmlinkage ssize_t
parisc_pread64(unsigned int fd
, char __user
*buf
, size_t count
,
322 unsigned int high
, unsigned int low
)
324 return ksys_pread64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
327 asmlinkage ssize_t
parisc_pwrite64(unsigned int fd
, const char __user
*buf
,
328 size_t count
, unsigned int high
, unsigned int low
)
330 return ksys_pwrite64(fd
, buf
, count
, (loff_t
)high
<< 32 | low
);
333 asmlinkage ssize_t
parisc_readahead(int fd
, unsigned int high
, unsigned int low
,
336 return ksys_readahead(fd
, (loff_t
)high
<< 32 | low
, count
);
339 asmlinkage
long parisc_fadvise64_64(int fd
,
340 unsigned int high_off
, unsigned int low_off
,
341 unsigned int high_len
, unsigned int low_len
, int advice
)
343 return ksys_fadvise64_64(fd
, (loff_t
)high_off
<< 32 | low_off
,
344 (loff_t
)high_len
<< 32 | low_len
, advice
);
347 asmlinkage
long parisc_sync_file_range(int fd
,
348 u32 hi_off
, u32 lo_off
, u32 hi_nbytes
, u32 lo_nbytes
,
351 return ksys_sync_file_range(fd
, (loff_t
)hi_off
<< 32 | lo_off
,
352 (loff_t
)hi_nbytes
<< 32 | lo_nbytes
, flags
);
355 asmlinkage
long parisc_fallocate(int fd
, int mode
, u32 offhi
, u32 offlo
,
356 u32 lenhi
, u32 lenlo
)
358 return ksys_fallocate(fd
, mode
, ((u64
)offhi
<< 32) | offlo
,
359 ((u64
)lenhi
<< 32) | lenlo
);
362 long parisc_personality(unsigned long personality
)
366 if (personality(current
->personality
) == PER_LINUX32
367 && personality(personality
) == PER_LINUX
)
368 personality
= (personality
& ~PER_MASK
) | PER_LINUX32
;
370 err
= sys_personality(personality
);
371 if (personality(err
) == PER_LINUX32
)
372 err
= (err
& ~PER_MASK
) | PER_LINUX
;