Linux 3.12.39
[linux/fpc-iii.git] / arch / sparc / kernel / sys_sparc_64.c
blobd05eb9c1d8465e77fe2c4667e9827dd19842c6e5
1 /* linux/arch/sparc64/kernel/sys_sparc.c
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
5 * platform.
6 */
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26 #include <linux/export.h>
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/unistd.h>
32 #include "entry.h"
33 #include "systbls.h"
35 /* #define DEBUG_UNIMP_SYSCALL */
37 asmlinkage unsigned long sys_getpagesize(void)
39 return PAGE_SIZE;
42 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
43 * overflow past the end of the 64-bit address space?
45 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
47 unsigned long va_exclude_start, va_exclude_end;
49 va_exclude_start = VA_EXCLUDE_START;
50 va_exclude_end = VA_EXCLUDE_END;
52 if (unlikely(len >= va_exclude_start))
53 return 1;
55 if (unlikely((addr + len) < addr))
56 return 1;
58 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
59 ((addr + len) >= va_exclude_start &&
60 (addr + len) < va_exclude_end)))
61 return 1;
63 return 0;
66 /* These functions differ from the default implementations in
67 * mm/mmap.c in two ways:
69 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
70 * for fixed such mappings we just validate what the user gave us.
71 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
72 * the spitfire/niagara VA-hole.
75 static inline unsigned long COLOR_ALIGN(unsigned long addr,
76 unsigned long pgoff)
78 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
79 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
81 return base + off;
84 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
86 struct mm_struct *mm = current->mm;
87 struct vm_area_struct * vma;
88 unsigned long task_size = TASK_SIZE;
89 int do_color_align;
90 struct vm_unmapped_area_info info;
92 if (flags & MAP_FIXED) {
93 /* We do not accept a shared mapping if it would violate
94 * cache aliasing constraints.
96 if ((flags & MAP_SHARED) &&
97 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
98 return -EINVAL;
99 return addr;
102 if (test_thread_flag(TIF_32BIT))
103 task_size = STACK_TOP32;
104 if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
105 return -ENOMEM;
107 do_color_align = 0;
108 if (filp || (flags & MAP_SHARED))
109 do_color_align = 1;
111 if (addr) {
112 if (do_color_align)
113 addr = COLOR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
117 vma = find_vma(mm, addr);
118 if (task_size - len >= addr &&
119 (!vma || addr + len <= vma->vm_start))
120 return addr;
123 info.flags = 0;
124 info.length = len;
125 info.low_limit = TASK_UNMAPPED_BASE;
126 info.high_limit = min(task_size, VA_EXCLUDE_START);
127 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
128 info.align_offset = pgoff << PAGE_SHIFT;
129 addr = vm_unmapped_area(&info);
131 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
132 VM_BUG_ON(addr != -ENOMEM);
133 info.low_limit = VA_EXCLUDE_END;
134 info.high_limit = task_size;
135 addr = vm_unmapped_area(&info);
138 return addr;
141 unsigned long
142 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
143 const unsigned long len, const unsigned long pgoff,
144 const unsigned long flags)
146 struct vm_area_struct *vma;
147 struct mm_struct *mm = current->mm;
148 unsigned long task_size = STACK_TOP32;
149 unsigned long addr = addr0;
150 int do_color_align;
151 struct vm_unmapped_area_info info;
153 /* This should only ever run for 32-bit processes. */
154 BUG_ON(!test_thread_flag(TIF_32BIT));
156 if (flags & MAP_FIXED) {
157 /* We do not accept a shared mapping if it would violate
158 * cache aliasing constraints.
160 if ((flags & MAP_SHARED) &&
161 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
162 return -EINVAL;
163 return addr;
166 if (unlikely(len > task_size))
167 return -ENOMEM;
169 do_color_align = 0;
170 if (filp || (flags & MAP_SHARED))
171 do_color_align = 1;
173 /* requesting a specific address */
174 if (addr) {
175 if (do_color_align)
176 addr = COLOR_ALIGN(addr, pgoff);
177 else
178 addr = PAGE_ALIGN(addr);
180 vma = find_vma(mm, addr);
181 if (task_size - len >= addr &&
182 (!vma || addr + len <= vma->vm_start))
183 return addr;
186 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
187 info.length = len;
188 info.low_limit = PAGE_SIZE;
189 info.high_limit = mm->mmap_base;
190 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
191 info.align_offset = pgoff << PAGE_SHIFT;
192 addr = vm_unmapped_area(&info);
195 * A failed mmap() very likely causes application failure,
196 * so fall back to the bottom-up function here. This scenario
197 * can happen with large stack limits and large mmap()
198 * allocations.
200 if (addr & ~PAGE_MASK) {
201 VM_BUG_ON(addr != -ENOMEM);
202 info.flags = 0;
203 info.low_limit = TASK_UNMAPPED_BASE;
204 info.high_limit = STACK_TOP32;
205 addr = vm_unmapped_area(&info);
208 return addr;
211 /* Try to align mapping such that we align it as much as possible. */
212 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
214 unsigned long align_goal, addr = -ENOMEM;
215 unsigned long (*get_area)(struct file *, unsigned long,
216 unsigned long, unsigned long, unsigned long);
218 get_area = current->mm->get_unmapped_area;
220 if (flags & MAP_FIXED) {
221 /* Ok, don't mess with it. */
222 return get_area(NULL, orig_addr, len, pgoff, flags);
224 flags &= ~MAP_SHARED;
226 align_goal = PAGE_SIZE;
227 if (len >= (4UL * 1024 * 1024))
228 align_goal = (4UL * 1024 * 1024);
229 else if (len >= (512UL * 1024))
230 align_goal = (512UL * 1024);
231 else if (len >= (64UL * 1024))
232 align_goal = (64UL * 1024);
234 do {
235 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
236 if (!(addr & ~PAGE_MASK)) {
237 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
238 break;
241 if (align_goal == (4UL * 1024 * 1024))
242 align_goal = (512UL * 1024);
243 else if (align_goal == (512UL * 1024))
244 align_goal = (64UL * 1024);
245 else
246 align_goal = PAGE_SIZE;
247 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
249 /* Mapping is smaller than 64K or larger areas could not
250 * be obtained.
252 if (addr & ~PAGE_MASK)
253 addr = get_area(NULL, orig_addr, len, pgoff, flags);
255 return addr;
257 EXPORT_SYMBOL(get_fb_unmapped_area);
259 /* Essentially the same as PowerPC. */
260 static unsigned long mmap_rnd(void)
262 unsigned long rnd = 0UL;
264 if (current->flags & PF_RANDOMIZE) {
265 unsigned long val = get_random_int();
266 if (test_thread_flag(TIF_32BIT))
267 rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
268 else
269 rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
271 return rnd << PAGE_SHIFT;
274 void arch_pick_mmap_layout(struct mm_struct *mm)
276 unsigned long random_factor = mmap_rnd();
277 unsigned long gap;
280 * Fall back to the standard layout if the personality
281 * bit is set, or if the expected stack growth is unlimited:
283 gap = rlimit(RLIMIT_STACK);
284 if (!test_thread_flag(TIF_32BIT) ||
285 (current->personality & ADDR_COMPAT_LAYOUT) ||
286 gap == RLIM_INFINITY ||
287 sysctl_legacy_va_layout) {
288 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
289 mm->get_unmapped_area = arch_get_unmapped_area;
290 } else {
291 /* We know it's 32-bit */
292 unsigned long task_size = STACK_TOP32;
294 if (gap < 128 * 1024 * 1024)
295 gap = 128 * 1024 * 1024;
296 if (gap > (task_size / 6 * 5))
297 gap = (task_size / 6 * 5);
299 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
300 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
305 * sys_pipe() is the normal C calling standard for creating
306 * a pipe. It's not the way unix traditionally does this, though.
308 SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
310 int fd[2];
311 int error;
313 error = do_pipe_flags(fd, 0);
314 if (error)
315 goto out;
316 regs->u_regs[UREG_I1] = fd[1];
317 error = fd[0];
318 out:
319 return error;
323 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
325 * This is really horribly ugly.
328 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
329 unsigned long, third, void __user *, ptr, long, fifth)
331 long err;
333 /* No need for backward compatibility. We can start fresh... */
334 if (call <= SEMCTL) {
335 switch (call) {
336 case SEMOP:
337 err = sys_semtimedop(first, ptr,
338 (unsigned)second, NULL);
339 goto out;
340 case SEMTIMEDOP:
341 err = sys_semtimedop(first, ptr, (unsigned)second,
342 (const struct timespec __user *)
343 (unsigned long) fifth);
344 goto out;
345 case SEMGET:
346 err = sys_semget(first, (int)second, (int)third);
347 goto out;
348 case SEMCTL: {
349 err = sys_semctl(first, second,
350 (int)third | IPC_64,
351 (unsigned long) ptr);
352 goto out;
354 default:
355 err = -ENOSYS;
356 goto out;
359 if (call <= MSGCTL) {
360 switch (call) {
361 case MSGSND:
362 err = sys_msgsnd(first, ptr, (size_t)second,
363 (int)third);
364 goto out;
365 case MSGRCV:
366 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
367 (int)third);
368 goto out;
369 case MSGGET:
370 err = sys_msgget((key_t)first, (int)second);
371 goto out;
372 case MSGCTL:
373 err = sys_msgctl(first, (int)second | IPC_64, ptr);
374 goto out;
375 default:
376 err = -ENOSYS;
377 goto out;
380 if (call <= SHMCTL) {
381 switch (call) {
382 case SHMAT: {
383 ulong raddr;
384 err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
385 if (!err) {
386 if (put_user(raddr,
387 (ulong __user *) third))
388 err = -EFAULT;
390 goto out;
392 case SHMDT:
393 err = sys_shmdt(ptr);
394 goto out;
395 case SHMGET:
396 err = sys_shmget(first, (size_t)second, (int)third);
397 goto out;
398 case SHMCTL:
399 err = sys_shmctl(first, (int)second | IPC_64, ptr);
400 goto out;
401 default:
402 err = -ENOSYS;
403 goto out;
405 } else {
406 err = -ENOSYS;
408 out:
409 return err;
412 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
414 int ret;
416 if (personality(current->personality) == PER_LINUX32 &&
417 personality(personality) == PER_LINUX)
418 personality |= PER_LINUX32;
419 ret = sys_personality(personality);
420 if (personality(ret) == PER_LINUX32)
421 ret &= ~PER_LINUX32;
423 return ret;
426 int sparc_mmap_check(unsigned long addr, unsigned long len)
428 if (test_thread_flag(TIF_32BIT)) {
429 if (len >= STACK_TOP32)
430 return -EINVAL;
432 if (addr > STACK_TOP32 - len)
433 return -EINVAL;
434 } else {
435 if (len >= VA_EXCLUDE_START)
436 return -EINVAL;
438 if (invalid_64bit_range(addr, len))
439 return -EINVAL;
442 return 0;
445 /* Linux version of mmap */
446 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
447 unsigned long, prot, unsigned long, flags, unsigned long, fd,
448 unsigned long, off)
450 unsigned long retval = -EINVAL;
452 if ((off + PAGE_ALIGN(len)) < off)
453 goto out;
454 if (off & ~PAGE_MASK)
455 goto out;
456 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
457 out:
458 return retval;
461 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
463 if (invalid_64bit_range(addr, len))
464 return -EINVAL;
466 return vm_munmap(addr, len);
469 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
470 unsigned long, new_len, unsigned long, flags,
471 unsigned long, new_addr)
473 if (test_thread_flag(TIF_32BIT))
474 return -EINVAL;
475 return sys_mremap(addr, old_len, new_len, flags, new_addr);
478 /* we come to here via sys_nis_syscall so it can setup the regs argument */
479 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
481 static int count;
483 /* Don't make the system unusable, if someone goes stuck */
484 if (count++ > 5)
485 return -ENOSYS;
487 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
488 #ifdef DEBUG_UNIMP_SYSCALL
489 show_regs (regs);
490 #endif
492 return -ENOSYS;
495 /* #define DEBUG_SPARC_BREAKPOINT */
497 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
499 siginfo_t info;
501 if (test_thread_flag(TIF_32BIT)) {
502 regs->tpc &= 0xffffffff;
503 regs->tnpc &= 0xffffffff;
505 #ifdef DEBUG_SPARC_BREAKPOINT
506 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
507 #endif
508 info.si_signo = SIGTRAP;
509 info.si_errno = 0;
510 info.si_code = TRAP_BRKPT;
511 info.si_addr = (void __user *)regs->tpc;
512 info.si_trapno = 0;
513 force_sig_info(SIGTRAP, &info, current);
514 #ifdef DEBUG_SPARC_BREAKPOINT
515 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
516 #endif
519 extern void check_pending(int signum);
521 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
523 int nlen, err;
525 if (len < 0)
526 return -EINVAL;
528 down_read(&uts_sem);
530 nlen = strlen(utsname()->domainname) + 1;
531 err = -EINVAL;
532 if (nlen > len)
533 goto out;
535 err = -EFAULT;
536 if (!copy_to_user(name, utsname()->domainname, nlen))
537 err = 0;
539 out:
540 up_read(&uts_sem);
541 return err;
544 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
545 utrap_handler_t, new_p, utrap_handler_t, new_d,
546 utrap_handler_t __user *, old_p,
547 utrap_handler_t __user *, old_d)
549 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
550 return -EINVAL;
551 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
552 if (old_p) {
553 if (!current_thread_info()->utraps) {
554 if (put_user(NULL, old_p))
555 return -EFAULT;
556 } else {
557 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
558 return -EFAULT;
561 if (old_d) {
562 if (put_user(NULL, old_d))
563 return -EFAULT;
565 return 0;
567 if (!current_thread_info()->utraps) {
568 current_thread_info()->utraps =
569 kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
570 if (!current_thread_info()->utraps)
571 return -ENOMEM;
572 current_thread_info()->utraps[0] = 1;
573 } else {
574 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
575 current_thread_info()->utraps[0] > 1) {
576 unsigned long *p = current_thread_info()->utraps;
578 current_thread_info()->utraps =
579 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
580 GFP_KERNEL);
581 if (!current_thread_info()->utraps) {
582 current_thread_info()->utraps = p;
583 return -ENOMEM;
585 p[0]--;
586 current_thread_info()->utraps[0] = 1;
587 memcpy(current_thread_info()->utraps+1, p+1,
588 UT_TRAP_INSTRUCTION_31*sizeof(long));
591 if (old_p) {
592 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
593 return -EFAULT;
595 if (old_d) {
596 if (put_user(NULL, old_d))
597 return -EFAULT;
599 current_thread_info()->utraps[type] = (long)new_p;
601 return 0;
604 asmlinkage long sparc_memory_ordering(unsigned long model,
605 struct pt_regs *regs)
607 if (model >= 3)
608 return -EINVAL;
609 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
610 return 0;
613 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
614 struct sigaction __user *, oact, void __user *, restorer,
615 size_t, sigsetsize)
617 struct k_sigaction new_ka, old_ka;
618 int ret;
620 /* XXX: Don't preclude handling different sized sigset_t's. */
621 if (sigsetsize != sizeof(sigset_t))
622 return -EINVAL;
624 if (act) {
625 new_ka.ka_restorer = restorer;
626 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
627 return -EFAULT;
630 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
632 if (!ret && oact) {
633 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
634 return -EFAULT;
637 return ret;
640 asmlinkage long sys_kern_features(void)
642 return KERN_FEATURE_MIXED_MODE_STACK;