treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / parisc / kernel / sys_parisc.c
blob5d458a44b09c6475692c163738c775fa3916cdfd
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 /*
4 * PARISC specific syscalls
6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
8 * Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
9 * Copyright (C) 1999-2014 Helge Deller <deller@gmx.de>
12 #include <linux/uaccess.h>
13 #include <asm/elf.h>
14 #include <linux/file.h>
15 #include <linux/fs.h>
16 #include <linux/linkage.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/mm.h>
21 #include <linux/shm.h>
22 #include <linux/syscalls.h>
23 #include <linux/utsname.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
27 /* we construct an artificial offset for the mapping based on the physical
28 * address of the kernel mapping variable */
29 #define GET_LAST_MMAP(filp) \
30 (filp ? ((unsigned long) filp->f_mapping) >> 8 : 0UL)
31 #define SET_LAST_MMAP(filp, val) \
32 { /* nothing */ }
34 static int get_offset(unsigned int last_mmap)
36 return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
39 static unsigned long shared_align_offset(unsigned int last_mmap,
40 unsigned long pgoff)
42 return (get_offset(last_mmap) + pgoff) << PAGE_SHIFT;
45 static inline unsigned long COLOR_ALIGN(unsigned long addr,
46 unsigned int last_mmap, unsigned long pgoff)
48 unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
49 unsigned long off = (SHM_COLOUR-1) &
50 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
52 return base + off;
56 * Top of mmap area (just below the process stack).
60 * When called from arch_get_unmapped_area(), rlim_stack will be NULL,
61 * indicating that "current" should be used instead of a passed-in
62 * value from the exec bprm as done with arch_pick_mmap_layout().
64 static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
66 unsigned long stack_base;
68 /* Limit stack size - see setup_arg_pages() in fs/exec.c */
69 stack_base = rlim_stack ? rlim_stack->rlim_max
70 : rlimit_max(RLIMIT_STACK);
71 if (stack_base > STACK_SIZE_MAX)
72 stack_base = STACK_SIZE_MAX;
74 /* Add space for stack randomization. */
75 if (current->flags & PF_RANDOMIZE)
76 stack_base += (STACK_RND_MASK << PAGE_SHIFT);
78 return PAGE_ALIGN(STACK_TOP - stack_base);
82 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
83 unsigned long len, unsigned long pgoff, unsigned long flags)
85 struct mm_struct *mm = current->mm;
86 struct vm_area_struct *vma, *prev;
87 unsigned long task_size = TASK_SIZE;
88 int do_color_align, last_mmap;
89 struct vm_unmapped_area_info info;
91 if (len > task_size)
92 return -ENOMEM;
94 do_color_align = 0;
95 if (filp || (flags & MAP_SHARED))
96 do_color_align = 1;
97 last_mmap = GET_LAST_MMAP(filp);
99 if (flags & MAP_FIXED) {
100 if ((flags & MAP_SHARED) && last_mmap &&
101 (addr - shared_align_offset(last_mmap, pgoff))
102 & (SHM_COLOUR - 1))
103 return -EINVAL;
104 goto found_addr;
107 if (addr) {
108 if (do_color_align && last_mmap)
109 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
110 else
111 addr = PAGE_ALIGN(addr);
113 vma = find_vma_prev(mm, addr, &prev);
114 if (task_size - len >= addr &&
115 (!vma || addr + len <= vm_start_gap(vma)) &&
116 (!prev || addr >= vm_end_gap(prev)))
117 goto found_addr;
120 info.flags = 0;
121 info.length = len;
122 info.low_limit = mm->mmap_legacy_base;
123 info.high_limit = mmap_upper_limit(NULL);
124 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
125 info.align_offset = shared_align_offset(last_mmap, pgoff);
126 addr = vm_unmapped_area(&info);
128 found_addr:
129 if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
130 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
132 return addr;
135 unsigned long
136 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
137 const unsigned long len, const unsigned long pgoff,
138 const unsigned long flags)
140 struct vm_area_struct *vma, *prev;
141 struct mm_struct *mm = current->mm;
142 unsigned long addr = addr0;
143 int do_color_align, last_mmap;
144 struct vm_unmapped_area_info info;
146 /* requested length too big for entire address space */
147 if (len > TASK_SIZE)
148 return -ENOMEM;
150 do_color_align = 0;
151 if (filp || (flags & MAP_SHARED))
152 do_color_align = 1;
153 last_mmap = GET_LAST_MMAP(filp);
155 if (flags & MAP_FIXED) {
156 if ((flags & MAP_SHARED) && last_mmap &&
157 (addr - shared_align_offset(last_mmap, pgoff))
158 & (SHM_COLOUR - 1))
159 return -EINVAL;
160 goto found_addr;
163 /* requesting a specific address */
164 if (addr) {
165 if (do_color_align && last_mmap)
166 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
167 else
168 addr = PAGE_ALIGN(addr);
170 vma = find_vma_prev(mm, addr, &prev);
171 if (TASK_SIZE - len >= addr &&
172 (!vma || addr + len <= vm_start_gap(vma)) &&
173 (!prev || addr >= vm_end_gap(prev)))
174 goto found_addr;
177 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
178 info.length = len;
179 info.low_limit = PAGE_SIZE;
180 info.high_limit = mm->mmap_base;
181 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
182 info.align_offset = shared_align_offset(last_mmap, pgoff);
183 addr = vm_unmapped_area(&info);
184 if (!(addr & ~PAGE_MASK))
185 goto found_addr;
186 VM_BUG_ON(addr != -ENOMEM);
189 * A failed mmap() very likely causes application failure,
190 * so fall back to the bottom-up function here. This scenario
191 * can happen with large stack limits and large mmap()
192 * allocations.
194 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
196 found_addr:
197 if (do_color_align && !last_mmap && !(addr & ~PAGE_MASK))
198 SET_LAST_MMAP(filp, addr - (pgoff << PAGE_SHIFT));
200 return addr;
203 static int mmap_is_legacy(void)
205 if (current->personality & ADDR_COMPAT_LAYOUT)
206 return 1;
208 /* parisc stack always grows up - so a unlimited stack should
209 * not be an indicator to use the legacy memory layout.
210 * if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
211 * return 1;
214 return sysctl_legacy_va_layout;
217 static unsigned long mmap_rnd(void)
219 unsigned long rnd = 0;
221 if (current->flags & PF_RANDOMIZE)
222 rnd = get_random_int() & MMAP_RND_MASK;
224 return rnd << PAGE_SHIFT;
227 unsigned long arch_mmap_rnd(void)
229 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
232 static unsigned long mmap_legacy_base(void)
234 return TASK_UNMAPPED_BASE + mmap_rnd();
238 * This function, called very early during the creation of a new
239 * process VM image, sets up which VM layout function to use:
241 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
243 mm->mmap_legacy_base = mmap_legacy_base();
244 mm->mmap_base = mmap_upper_limit(rlim_stack);
246 if (mmap_is_legacy()) {
247 mm->mmap_base = mm->mmap_legacy_base;
248 mm->get_unmapped_area = arch_get_unmapped_area;
249 } else {
250 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
255 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
256 unsigned long prot, unsigned long flags, unsigned long fd,
257 unsigned long pgoff)
259 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
260 we have. */
261 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
262 pgoff >> (PAGE_SHIFT - 12));
265 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
266 unsigned long prot, unsigned long flags, unsigned long fd,
267 unsigned long offset)
269 if (!(offset & ~PAGE_MASK)) {
270 return ksys_mmap_pgoff(addr, len, prot, flags, fd,
271 offset >> PAGE_SHIFT);
272 } else {
273 return -EINVAL;
277 /* Fucking broken ABI */
279 #ifdef CONFIG_64BIT
280 asmlinkage long parisc_truncate64(const char __user * path,
281 unsigned int high, unsigned int low)
283 return ksys_truncate(path, (long)high << 32 | low);
286 asmlinkage long parisc_ftruncate64(unsigned int fd,
287 unsigned int high, unsigned int low)
289 return ksys_ftruncate(fd, (long)high << 32 | low);
292 /* stubs for the benefit of the syscall_table since truncate64 and truncate
293 * are identical on LP64 */
294 asmlinkage long sys_truncate64(const char __user * path, unsigned long length)
296 return ksys_truncate(path, length);
298 asmlinkage long sys_ftruncate64(unsigned int fd, unsigned long length)
300 return ksys_ftruncate(fd, length);
302 asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg)
304 return sys_fcntl(fd, cmd, arg);
306 #else
308 asmlinkage long parisc_truncate64(const char __user * path,
309 unsigned int high, unsigned int low)
311 return ksys_truncate(path, (loff_t)high << 32 | low);
314 asmlinkage long parisc_ftruncate64(unsigned int fd,
315 unsigned int high, unsigned int low)
317 return sys_ftruncate64(fd, (loff_t)high << 32 | low);
319 #endif
321 asmlinkage ssize_t parisc_pread64(unsigned int fd, char __user *buf, size_t count,
322 unsigned int high, unsigned int low)
324 return ksys_pread64(fd, buf, count, (loff_t)high << 32 | low);
327 asmlinkage ssize_t parisc_pwrite64(unsigned int fd, const char __user *buf,
328 size_t count, unsigned int high, unsigned int low)
330 return ksys_pwrite64(fd, buf, count, (loff_t)high << 32 | low);
333 asmlinkage ssize_t parisc_readahead(int fd, unsigned int high, unsigned int low,
334 size_t count)
336 return ksys_readahead(fd, (loff_t)high << 32 | low, count);
339 asmlinkage long parisc_fadvise64_64(int fd,
340 unsigned int high_off, unsigned int low_off,
341 unsigned int high_len, unsigned int low_len, int advice)
343 return ksys_fadvise64_64(fd, (loff_t)high_off << 32 | low_off,
344 (loff_t)high_len << 32 | low_len, advice);
347 asmlinkage long parisc_sync_file_range(int fd,
348 u32 hi_off, u32 lo_off, u32 hi_nbytes, u32 lo_nbytes,
349 unsigned int flags)
351 return ksys_sync_file_range(fd, (loff_t)hi_off << 32 | lo_off,
352 (loff_t)hi_nbytes << 32 | lo_nbytes, flags);
355 asmlinkage long parisc_fallocate(int fd, int mode, u32 offhi, u32 offlo,
356 u32 lenhi, u32 lenlo)
358 return ksys_fallocate(fd, mode, ((u64)offhi << 32) | offlo,
359 ((u64)lenhi << 32) | lenlo);
362 long parisc_personality(unsigned long personality)
364 long err;
366 if (personality(current->personality) == PER_LINUX32
367 && personality(personality) == PER_LINUX)
368 personality = (personality & ~PER_MASK) | PER_LINUX32;
370 err = sys_personality(personality);
371 if (personality(err) == PER_LINUX32)
372 err = (err & ~PER_MASK) | PER_LINUX;
374 return err;