x86/efi: Enforce CONFIG_RELOCATABLE for EFI boot stub
[linux/fpc-iii.git] / arch / sparc / kernel / sys_sparc_64.c
blob51561b8b15baf916f7d5cb5e5cc0db3d8eb6032f
1 /* linux/arch/sparc64/kernel/sys_sparc.c
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
5 * platform.
6 */
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26 #include <linux/export.h>
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/unistd.h>
32 #include "entry.h"
33 #include "systbls.h"
35 /* #define DEBUG_UNIMP_SYSCALL */
37 asmlinkage unsigned long sys_getpagesize(void)
39 return PAGE_SIZE;
42 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
43 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
45 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
46 * overflow past the end of the 64-bit address space?
48 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
50 unsigned long va_exclude_start, va_exclude_end;
52 va_exclude_start = VA_EXCLUDE_START;
53 va_exclude_end = VA_EXCLUDE_END;
55 if (unlikely(len >= va_exclude_start))
56 return 1;
58 if (unlikely((addr + len) < addr))
59 return 1;
61 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
62 ((addr + len) >= va_exclude_start &&
63 (addr + len) < va_exclude_end)))
64 return 1;
66 return 0;
69 /* These functions differ from the default implementations in
70 * mm/mmap.c in two ways:
72 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
73 * for fixed such mappings we just validate what the user gave us.
74 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
75 * the spitfire/niagara VA-hole.
78 static inline unsigned long COLOR_ALIGN(unsigned long addr,
79 unsigned long pgoff)
81 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
82 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
84 return base + off;
87 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
89 struct mm_struct *mm = current->mm;
90 struct vm_area_struct * vma;
91 unsigned long task_size = TASK_SIZE;
92 int do_color_align;
93 struct vm_unmapped_area_info info;
95 if (flags & MAP_FIXED) {
96 /* We do not accept a shared mapping if it would violate
97 * cache aliasing constraints.
99 if ((flags & MAP_SHARED) &&
100 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
101 return -EINVAL;
102 return addr;
105 if (test_thread_flag(TIF_32BIT))
106 task_size = STACK_TOP32;
107 if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
108 return -ENOMEM;
110 do_color_align = 0;
111 if (filp || (flags & MAP_SHARED))
112 do_color_align = 1;
114 if (addr) {
115 if (do_color_align)
116 addr = COLOR_ALIGN(addr, pgoff);
117 else
118 addr = PAGE_ALIGN(addr);
120 vma = find_vma(mm, addr);
121 if (task_size - len >= addr &&
122 (!vma || addr + len <= vma->vm_start))
123 return addr;
126 info.flags = 0;
127 info.length = len;
128 info.low_limit = TASK_UNMAPPED_BASE;
129 info.high_limit = min(task_size, VA_EXCLUDE_START);
130 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
131 info.align_offset = pgoff << PAGE_SHIFT;
132 addr = vm_unmapped_area(&info);
134 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
135 VM_BUG_ON(addr != -ENOMEM);
136 info.low_limit = VA_EXCLUDE_END;
137 info.high_limit = task_size;
138 addr = vm_unmapped_area(&info);
141 return addr;
144 unsigned long
145 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
146 const unsigned long len, const unsigned long pgoff,
147 const unsigned long flags)
149 struct vm_area_struct *vma;
150 struct mm_struct *mm = current->mm;
151 unsigned long task_size = STACK_TOP32;
152 unsigned long addr = addr0;
153 int do_color_align;
154 struct vm_unmapped_area_info info;
156 /* This should only ever run for 32-bit processes. */
157 BUG_ON(!test_thread_flag(TIF_32BIT));
159 if (flags & MAP_FIXED) {
160 /* We do not accept a shared mapping if it would violate
161 * cache aliasing constraints.
163 if ((flags & MAP_SHARED) &&
164 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
165 return -EINVAL;
166 return addr;
169 if (unlikely(len > task_size))
170 return -ENOMEM;
172 do_color_align = 0;
173 if (filp || (flags & MAP_SHARED))
174 do_color_align = 1;
176 /* requesting a specific address */
177 if (addr) {
178 if (do_color_align)
179 addr = COLOR_ALIGN(addr, pgoff);
180 else
181 addr = PAGE_ALIGN(addr);
183 vma = find_vma(mm, addr);
184 if (task_size - len >= addr &&
185 (!vma || addr + len <= vma->vm_start))
186 return addr;
189 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
190 info.length = len;
191 info.low_limit = PAGE_SIZE;
192 info.high_limit = mm->mmap_base;
193 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
194 info.align_offset = pgoff << PAGE_SHIFT;
195 addr = vm_unmapped_area(&info);
198 * A failed mmap() very likely causes application failure,
199 * so fall back to the bottom-up function here. This scenario
200 * can happen with large stack limits and large mmap()
201 * allocations.
203 if (addr & ~PAGE_MASK) {
204 VM_BUG_ON(addr != -ENOMEM);
205 info.flags = 0;
206 info.low_limit = TASK_UNMAPPED_BASE;
207 info.high_limit = STACK_TOP32;
208 addr = vm_unmapped_area(&info);
211 return addr;
214 /* Try to align mapping such that we align it as much as possible. */
215 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
217 unsigned long align_goal, addr = -ENOMEM;
218 unsigned long (*get_area)(struct file *, unsigned long,
219 unsigned long, unsigned long, unsigned long);
221 get_area = current->mm->get_unmapped_area;
223 if (flags & MAP_FIXED) {
224 /* Ok, don't mess with it. */
225 return get_area(NULL, orig_addr, len, pgoff, flags);
227 flags &= ~MAP_SHARED;
229 align_goal = PAGE_SIZE;
230 if (len >= (4UL * 1024 * 1024))
231 align_goal = (4UL * 1024 * 1024);
232 else if (len >= (512UL * 1024))
233 align_goal = (512UL * 1024);
234 else if (len >= (64UL * 1024))
235 align_goal = (64UL * 1024);
237 do {
238 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
239 if (!(addr & ~PAGE_MASK)) {
240 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
241 break;
244 if (align_goal == (4UL * 1024 * 1024))
245 align_goal = (512UL * 1024);
246 else if (align_goal == (512UL * 1024))
247 align_goal = (64UL * 1024);
248 else
249 align_goal = PAGE_SIZE;
250 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
252 /* Mapping is smaller than 64K or larger areas could not
253 * be obtained.
255 if (addr & ~PAGE_MASK)
256 addr = get_area(NULL, orig_addr, len, pgoff, flags);
258 return addr;
260 EXPORT_SYMBOL(get_fb_unmapped_area);
262 /* Essentially the same as PowerPC. */
263 static unsigned long mmap_rnd(void)
265 unsigned long rnd = 0UL;
267 if (current->flags & PF_RANDOMIZE) {
268 unsigned long val = get_random_int();
269 if (test_thread_flag(TIF_32BIT))
270 rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
271 else
272 rnd = (val % (1UL << (30UL-PAGE_SHIFT)));
274 return rnd << PAGE_SHIFT;
277 void arch_pick_mmap_layout(struct mm_struct *mm)
279 unsigned long random_factor = mmap_rnd();
280 unsigned long gap;
283 * Fall back to the standard layout if the personality
284 * bit is set, or if the expected stack growth is unlimited:
286 gap = rlimit(RLIMIT_STACK);
287 if (!test_thread_flag(TIF_32BIT) ||
288 (current->personality & ADDR_COMPAT_LAYOUT) ||
289 gap == RLIM_INFINITY ||
290 sysctl_legacy_va_layout) {
291 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
292 mm->get_unmapped_area = arch_get_unmapped_area;
293 } else {
294 /* We know it's 32-bit */
295 unsigned long task_size = STACK_TOP32;
297 if (gap < 128 * 1024 * 1024)
298 gap = 128 * 1024 * 1024;
299 if (gap > (task_size / 6 * 5))
300 gap = (task_size / 6 * 5);
302 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
303 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
308 * sys_pipe() is the normal C calling standard for creating
309 * a pipe. It's not the way unix traditionally does this, though.
311 SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
313 int fd[2];
314 int error;
316 error = do_pipe_flags(fd, 0);
317 if (error)
318 goto out;
319 regs->u_regs[UREG_I1] = fd[1];
320 error = fd[0];
321 out:
322 return error;
326 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
328 * This is really horribly ugly.
331 SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
332 unsigned long, third, void __user *, ptr, long, fifth)
334 long err;
336 /* No need for backward compatibility. We can start fresh... */
337 if (call <= SEMCTL) {
338 switch (call) {
339 case SEMOP:
340 err = sys_semtimedop(first, ptr,
341 (unsigned)second, NULL);
342 goto out;
343 case SEMTIMEDOP:
344 err = sys_semtimedop(first, ptr, (unsigned)second,
345 (const struct timespec __user *)
346 (unsigned long) fifth);
347 goto out;
348 case SEMGET:
349 err = sys_semget(first, (int)second, (int)third);
350 goto out;
351 case SEMCTL: {
352 err = sys_semctl(first, second,
353 (int)third | IPC_64,
354 (unsigned long) ptr);
355 goto out;
357 default:
358 err = -ENOSYS;
359 goto out;
362 if (call <= MSGCTL) {
363 switch (call) {
364 case MSGSND:
365 err = sys_msgsnd(first, ptr, (size_t)second,
366 (int)third);
367 goto out;
368 case MSGRCV:
369 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
370 (int)third);
371 goto out;
372 case MSGGET:
373 err = sys_msgget((key_t)first, (int)second);
374 goto out;
375 case MSGCTL:
376 err = sys_msgctl(first, (int)second | IPC_64, ptr);
377 goto out;
378 default:
379 err = -ENOSYS;
380 goto out;
383 if (call <= SHMCTL) {
384 switch (call) {
385 case SHMAT: {
386 ulong raddr;
387 err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA);
388 if (!err) {
389 if (put_user(raddr,
390 (ulong __user *) third))
391 err = -EFAULT;
393 goto out;
395 case SHMDT:
396 err = sys_shmdt(ptr);
397 goto out;
398 case SHMGET:
399 err = sys_shmget(first, (size_t)second, (int)third);
400 goto out;
401 case SHMCTL:
402 err = sys_shmctl(first, (int)second | IPC_64, ptr);
403 goto out;
404 default:
405 err = -ENOSYS;
406 goto out;
408 } else {
409 err = -ENOSYS;
411 out:
412 return err;
415 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
417 int ret;
419 if (personality(current->personality) == PER_LINUX32 &&
420 personality(personality) == PER_LINUX)
421 personality |= PER_LINUX32;
422 ret = sys_personality(personality);
423 if (personality(ret) == PER_LINUX32)
424 ret &= ~PER_LINUX32;
426 return ret;
429 int sparc_mmap_check(unsigned long addr, unsigned long len)
431 if (test_thread_flag(TIF_32BIT)) {
432 if (len >= STACK_TOP32)
433 return -EINVAL;
435 if (addr > STACK_TOP32 - len)
436 return -EINVAL;
437 } else {
438 if (len >= VA_EXCLUDE_START)
439 return -EINVAL;
441 if (invalid_64bit_range(addr, len))
442 return -EINVAL;
445 return 0;
448 /* Linux version of mmap */
449 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
450 unsigned long, prot, unsigned long, flags, unsigned long, fd,
451 unsigned long, off)
453 unsigned long retval = -EINVAL;
455 if ((off + PAGE_ALIGN(len)) < off)
456 goto out;
457 if (off & ~PAGE_MASK)
458 goto out;
459 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
460 out:
461 return retval;
464 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
466 if (invalid_64bit_range(addr, len))
467 return -EINVAL;
469 return vm_munmap(addr, len);
472 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
473 unsigned long, new_len, unsigned long, flags,
474 unsigned long, new_addr)
476 if (test_thread_flag(TIF_32BIT))
477 return -EINVAL;
478 return sys_mremap(addr, old_len, new_len, flags, new_addr);
481 /* we come to here via sys_nis_syscall so it can setup the regs argument */
482 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
484 static int count;
486 /* Don't make the system unusable, if someone goes stuck */
487 if (count++ > 5)
488 return -ENOSYS;
490 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
491 #ifdef DEBUG_UNIMP_SYSCALL
492 show_regs (regs);
493 #endif
495 return -ENOSYS;
498 /* #define DEBUG_SPARC_BREAKPOINT */
500 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
502 siginfo_t info;
504 if (test_thread_flag(TIF_32BIT)) {
505 regs->tpc &= 0xffffffff;
506 regs->tnpc &= 0xffffffff;
508 #ifdef DEBUG_SPARC_BREAKPOINT
509 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
510 #endif
511 info.si_signo = SIGTRAP;
512 info.si_errno = 0;
513 info.si_code = TRAP_BRKPT;
514 info.si_addr = (void __user *)regs->tpc;
515 info.si_trapno = 0;
516 force_sig_info(SIGTRAP, &info, current);
517 #ifdef DEBUG_SPARC_BREAKPOINT
518 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
519 #endif
522 extern void check_pending(int signum);
524 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
526 int nlen, err;
528 if (len < 0)
529 return -EINVAL;
531 down_read(&uts_sem);
533 nlen = strlen(utsname()->domainname) + 1;
534 err = -EINVAL;
535 if (nlen > len)
536 goto out;
538 err = -EFAULT;
539 if (!copy_to_user(name, utsname()->domainname, nlen))
540 err = 0;
542 out:
543 up_read(&uts_sem);
544 return err;
547 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
548 utrap_handler_t, new_p, utrap_handler_t, new_d,
549 utrap_handler_t __user *, old_p,
550 utrap_handler_t __user *, old_d)
552 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
553 return -EINVAL;
554 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
555 if (old_p) {
556 if (!current_thread_info()->utraps) {
557 if (put_user(NULL, old_p))
558 return -EFAULT;
559 } else {
560 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
561 return -EFAULT;
564 if (old_d) {
565 if (put_user(NULL, old_d))
566 return -EFAULT;
568 return 0;
570 if (!current_thread_info()->utraps) {
571 current_thread_info()->utraps =
572 kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
573 if (!current_thread_info()->utraps)
574 return -ENOMEM;
575 current_thread_info()->utraps[0] = 1;
576 } else {
577 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
578 current_thread_info()->utraps[0] > 1) {
579 unsigned long *p = current_thread_info()->utraps;
581 current_thread_info()->utraps =
582 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
583 GFP_KERNEL);
584 if (!current_thread_info()->utraps) {
585 current_thread_info()->utraps = p;
586 return -ENOMEM;
588 p[0]--;
589 current_thread_info()->utraps[0] = 1;
590 memcpy(current_thread_info()->utraps+1, p+1,
591 UT_TRAP_INSTRUCTION_31*sizeof(long));
594 if (old_p) {
595 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
596 return -EFAULT;
598 if (old_d) {
599 if (put_user(NULL, old_d))
600 return -EFAULT;
602 current_thread_info()->utraps[type] = (long)new_p;
604 return 0;
607 asmlinkage long sparc_memory_ordering(unsigned long model,
608 struct pt_regs *regs)
610 if (model >= 3)
611 return -EINVAL;
612 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
613 return 0;
616 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
617 struct sigaction __user *, oact, void __user *, restorer,
618 size_t, sigsetsize)
620 struct k_sigaction new_ka, old_ka;
621 int ret;
623 /* XXX: Don't preclude handling different sized sigset_t's. */
624 if (sigsetsize != sizeof(sigset_t))
625 return -EINVAL;
627 if (act) {
628 new_ka.ka_restorer = restorer;
629 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
630 return -EFAULT;
633 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
635 if (!ret && oact) {
636 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
637 return -EFAULT;
640 return ret;
643 asmlinkage long sys_kern_features(void)
645 return KERN_FEATURE_MIXED_MODE_STACK;