1 /* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/config.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
14 #include <linux/file.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/mman.h>
21 #include <linux/utsname.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/ipc.h>
27 #include <linux/personality.h>
29 #include <asm/uaccess.h>
31 #include <asm/utrap.h>
32 #include <asm/perfctr.h>
34 /* #define DEBUG_UNIMP_SYSCALL */
36 /* XXX Make this per-binary type, this way we can detect the type of
37 * XXX a binary. Every Sparc executable calls this very early on.
39 asmlinkage
unsigned long sys_getpagesize(void)
44 #define COLOUR_ALIGN(addr,pgoff) \
45 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
46 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
48 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
)
50 struct mm_struct
*mm
= current
->mm
;
51 struct vm_area_struct
* vma
;
52 unsigned long task_size
= TASK_SIZE
;
53 unsigned long start_addr
;
56 if (flags
& MAP_FIXED
) {
57 /* We do not accept a shared mapping if it would violate
58 * cache aliasing constraints.
60 if ((flags
& MAP_SHARED
) &&
61 ((addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1)))
66 if (test_thread_flag(TIF_32BIT
))
67 task_size
= 0xf0000000UL
;
68 if (len
> task_size
|| len
> -PAGE_OFFSET
)
72 if (filp
|| (flags
& MAP_SHARED
))
77 addr
= COLOUR_ALIGN(addr
, pgoff
);
79 addr
= PAGE_ALIGN(addr
);
81 vma
= find_vma(mm
, addr
);
82 if (task_size
- len
>= addr
&&
83 (!vma
|| addr
+ len
<= vma
->vm_start
))
87 start_addr
= addr
= mm
->free_area_cache
;
93 addr
= COLOUR_ALIGN(addr
, pgoff
);
95 addr
= PAGE_ALIGN(addr
);
97 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
98 /* At this point: (!vma || addr < vma->vm_end). */
99 if (addr
< PAGE_OFFSET
&& -PAGE_OFFSET
- len
< addr
) {
101 vma
= find_vma(mm
, PAGE_OFFSET
);
103 if (task_size
< addr
) {
104 if (start_addr
!= TASK_UNMAPPED_BASE
) {
105 start_addr
= addr
= TASK_UNMAPPED_BASE
;
110 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
112 * Remember the place where we stopped the search:
114 mm
->free_area_cache
= addr
+ len
;
119 addr
= COLOUR_ALIGN(addr
, pgoff
);
123 /* Try to align mapping such that we align it as much as possible. */
124 unsigned long get_fb_unmapped_area(struct file
*filp
, unsigned long orig_addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
)
126 unsigned long align_goal
, addr
= -ENOMEM
;
128 if (flags
& MAP_FIXED
) {
129 /* Ok, don't mess with it. */
130 return get_unmapped_area(NULL
, addr
, len
, pgoff
, flags
);
132 flags
&= ~MAP_SHARED
;
134 align_goal
= PAGE_SIZE
;
135 if (len
>= (4UL * 1024 * 1024))
136 align_goal
= (4UL * 1024 * 1024);
137 else if (len
>= (512UL * 1024))
138 align_goal
= (512UL * 1024);
139 else if (len
>= (64UL * 1024))
140 align_goal
= (64UL * 1024);
143 addr
= get_unmapped_area(NULL
, orig_addr
, len
+ (align_goal
- PAGE_SIZE
), pgoff
, flags
);
144 if (!(addr
& ~PAGE_MASK
)) {
145 addr
= (addr
+ (align_goal
- 1UL)) & ~(align_goal
- 1UL);
149 if (align_goal
== (4UL * 1024 * 1024))
150 align_goal
= (512UL * 1024);
151 else if (align_goal
== (512UL * 1024))
152 align_goal
= (64UL * 1024);
154 align_goal
= PAGE_SIZE
;
155 } while ((addr
& ~PAGE_MASK
) && align_goal
> PAGE_SIZE
);
157 /* Mapping is smaller than 64K or larger areas could not
160 if (addr
& ~PAGE_MASK
)
161 addr
= get_unmapped_area(NULL
, orig_addr
, len
, pgoff
, flags
);
166 asmlinkage
unsigned long sparc_brk(unsigned long brk
)
168 /* People could try to be nasty and use ta 0x6d in 32bit programs */
169 if (test_thread_flag(TIF_32BIT
) &&
171 return current
->mm
->brk
;
173 if ((current
->mm
->brk
& PAGE_OFFSET
) != (brk
& PAGE_OFFSET
))
174 return current
->mm
->brk
;
179 * sys_pipe() is the normal C calling standard for creating
180 * a pipe. It's not the way unix traditionally does this, though.
182 asmlinkage
long sparc_pipe(struct pt_regs
*regs
)
190 regs
->u_regs
[UREG_I1
] = fd
[1];
197 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
199 * This is really horribly ugly.
202 asmlinkage
long sys_ipc(unsigned int call
, int first
, unsigned long second
,
203 unsigned long third
, void __user
*ptr
, long fifth
)
207 /* No need for backward compatibility. We can start fresh... */
208 if (call
<= SEMCTL
) {
211 err
= sys_semtimedop(first
, ptr
,
212 (unsigned)second
, NULL
);
215 err
= sys_semtimedop(first
, ptr
, (unsigned)second
,
216 (const struct timespec __user
*) fifth
);
219 err
= sys_semget(first
, (int)second
, (int)third
);
227 if (get_user(fourth
.__pad
,
228 (void __user
* __user
*) ptr
))
230 err
= sys_semctl(first
, (int)second
| IPC_64
,
239 if (call
<= MSGCTL
) {
242 err
= sys_msgsnd(first
, ptr
, (size_t)second
,
246 err
= sys_msgrcv(first
, ptr
, (size_t)second
, fifth
,
250 err
= sys_msgget((key_t
)first
, (int)second
);
253 err
= sys_msgctl(first
, (int)second
| IPC_64
, ptr
);
260 if (call
<= SHMCTL
) {
264 err
= do_shmat(first
, ptr
, (int)second
, &raddr
);
267 (ulong __user
*) third
))
273 err
= sys_shmdt(ptr
);
276 err
= sys_shmget(first
, (size_t)second
, (int)third
);
279 err
= sys_shmctl(first
, (int)second
| IPC_64
, ptr
);
292 asmlinkage
long sparc64_newuname(struct new_utsname __user
*name
)
294 int ret
= sys_newuname(name
);
296 if (current
->personality
== PER_LINUX32
&& !ret
) {
297 ret
= (copy_to_user(name
->machine
, "sparc\0\0", 8)
303 asmlinkage
long sparc64_personality(unsigned long personality
)
307 if (current
->personality
== PER_LINUX32
&&
308 personality
== PER_LINUX
)
309 personality
= PER_LINUX32
;
310 ret
= sys_personality(personality
);
311 if (ret
== PER_LINUX32
)
317 /* Linux version of mmap */
318 asmlinkage
unsigned long sys_mmap(unsigned long addr
, unsigned long len
,
319 unsigned long prot
, unsigned long flags
, unsigned long fd
,
322 struct file
* file
= NULL
;
323 unsigned long retval
= -EBADF
;
325 if (!(flags
& MAP_ANONYMOUS
)) {
330 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
331 len
= PAGE_ALIGN(len
);
334 if (test_thread_flag(TIF_32BIT
)) {
335 if (len
> 0xf0000000UL
||
336 ((flags
& MAP_FIXED
) && addr
> 0xf0000000UL
- len
))
339 if (len
> -PAGE_OFFSET
||
340 ((flags
& MAP_FIXED
) &&
341 addr
< PAGE_OFFSET
&& addr
+ len
> -PAGE_OFFSET
))
345 down_write(¤t
->mm
->mmap_sem
);
346 retval
= do_mmap(file
, addr
, len
, prot
, flags
, off
);
347 up_write(¤t
->mm
->mmap_sem
);
356 asmlinkage
long sys64_munmap(unsigned long addr
, size_t len
)
360 if (len
> -PAGE_OFFSET
||
361 (addr
< PAGE_OFFSET
&& addr
+ len
> -PAGE_OFFSET
))
363 down_write(¤t
->mm
->mmap_sem
);
364 ret
= do_munmap(current
->mm
, addr
, len
);
365 up_write(¤t
->mm
->mmap_sem
);
369 extern unsigned long do_mremap(unsigned long addr
,
370 unsigned long old_len
, unsigned long new_len
,
371 unsigned long flags
, unsigned long new_addr
);
373 asmlinkage
unsigned long sys64_mremap(unsigned long addr
,
374 unsigned long old_len
, unsigned long new_len
,
375 unsigned long flags
, unsigned long new_addr
)
377 struct vm_area_struct
*vma
;
378 unsigned long ret
= -EINVAL
;
379 if (test_thread_flag(TIF_32BIT
))
381 if (old_len
> -PAGE_OFFSET
|| new_len
> -PAGE_OFFSET
)
383 if (addr
< PAGE_OFFSET
&& addr
+ old_len
> -PAGE_OFFSET
)
385 down_write(¤t
->mm
->mmap_sem
);
386 if (flags
& MREMAP_FIXED
) {
387 if (new_addr
< PAGE_OFFSET
&&
388 new_addr
+ new_len
> -PAGE_OFFSET
)
390 } else if (addr
< PAGE_OFFSET
&& addr
+ new_len
> -PAGE_OFFSET
) {
391 unsigned long map_flags
= 0;
392 struct file
*file
= NULL
;
395 if (!(flags
& MREMAP_MAYMOVE
))
398 vma
= find_vma(current
->mm
, addr
);
400 if (vma
->vm_flags
& VM_SHARED
)
401 map_flags
|= MAP_SHARED
;
405 /* MREMAP_FIXED checked above. */
406 new_addr
= get_unmapped_area(file
, addr
, new_len
,
407 vma
? vma
->vm_pgoff
: 0,
410 if (new_addr
& ~PAGE_MASK
)
412 flags
|= MREMAP_FIXED
;
414 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
416 up_write(¤t
->mm
->mmap_sem
);
421 /* we come to here via sys_nis_syscall so it can setup the regs argument */
422 asmlinkage
unsigned long c_sys_nis_syscall(struct pt_regs
*regs
)
426 /* Don't make the system unusable, if someone goes stuck */
430 printk ("Unimplemented SPARC system call %ld\n",regs
->u_regs
[1]);
431 #ifdef DEBUG_UNIMP_SYSCALL
438 /* #define DEBUG_SPARC_BREAKPOINT */
440 asmlinkage
void sparc_breakpoint(struct pt_regs
*regs
)
444 if (test_thread_flag(TIF_32BIT
)) {
445 regs
->tpc
&= 0xffffffff;
446 regs
->tnpc
&= 0xffffffff;
448 #ifdef DEBUG_SPARC_BREAKPOINT
449 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs
->tpc
, regs
->tnpc
);
451 info
.si_signo
= SIGTRAP
;
453 info
.si_code
= TRAP_BRKPT
;
454 info
.si_addr
= (void __user
*)regs
->tpc
;
456 force_sig_info(SIGTRAP
, &info
, current
);
457 #ifdef DEBUG_SPARC_BREAKPOINT
458 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs
->tpc
, regs
->tnpc
);
462 extern void check_pending(int signum
);
464 asmlinkage
long sys_getdomainname(char __user
*name
, int len
)
471 nlen
= strlen(system_utsname
.domainname
) + 1;
475 if (len
> __NEW_UTS_LEN
)
477 if (copy_to_user(name
, system_utsname
.domainname
, len
))
485 asmlinkage
long solaris_syscall(struct pt_regs
*regs
)
489 regs
->tpc
= regs
->tnpc
;
491 if (test_thread_flag(TIF_32BIT
)) {
492 regs
->tpc
&= 0xffffffff;
493 regs
->tnpc
&= 0xffffffff;
496 printk ("For Solaris binary emulation you need solaris module loaded\n");
499 send_sig(SIGSEGV
, current
, 1);
504 #ifndef CONFIG_SUNOS_EMUL
505 asmlinkage
long sunos_syscall(struct pt_regs
*regs
)
509 regs
->tpc
= regs
->tnpc
;
511 if (test_thread_flag(TIF_32BIT
)) {
512 regs
->tpc
&= 0xffffffff;
513 regs
->tnpc
&= 0xffffffff;
516 printk ("SunOS binary emulation not compiled in\n");
517 force_sig(SIGSEGV
, current
);
523 asmlinkage
long sys_utrap_install(utrap_entry_t type
,
524 utrap_handler_t new_p
,
525 utrap_handler_t new_d
,
526 utrap_handler_t __user
*old_p
,
527 utrap_handler_t __user
*old_d
)
529 if (type
< UT_INSTRUCTION_EXCEPTION
|| type
> UT_TRAP_INSTRUCTION_31
)
531 if (new_p
== (utrap_handler_t
)(long)UTH_NOCHANGE
) {
533 if (!current_thread_info()->utraps
) {
534 if (put_user(NULL
, old_p
))
537 if (put_user((utrap_handler_t
)(current_thread_info()->utraps
[type
]), old_p
))
542 if (put_user(NULL
, old_d
))
547 if (!current_thread_info()->utraps
) {
548 current_thread_info()->utraps
=
549 kmalloc((UT_TRAP_INSTRUCTION_31
+1)*sizeof(long), GFP_KERNEL
);
550 if (!current_thread_info()->utraps
)
552 current_thread_info()->utraps
[0] = 1;
553 memset(current_thread_info()->utraps
+1, 0,
554 UT_TRAP_INSTRUCTION_31
*sizeof(long));
556 if ((utrap_handler_t
)current_thread_info()->utraps
[type
] != new_p
&&
557 current_thread_info()->utraps
[0] > 1) {
558 long *p
= current_thread_info()->utraps
;
560 current_thread_info()->utraps
=
561 kmalloc((UT_TRAP_INSTRUCTION_31
+1)*sizeof(long),
563 if (!current_thread_info()->utraps
) {
564 current_thread_info()->utraps
= p
;
568 current_thread_info()->utraps
[0] = 1;
569 memcpy(current_thread_info()->utraps
+1, p
+1,
570 UT_TRAP_INSTRUCTION_31
*sizeof(long));
574 if (put_user((utrap_handler_t
)(current_thread_info()->utraps
[type
]), old_p
))
578 if (put_user(NULL
, old_d
))
581 current_thread_info()->utraps
[type
] = (long)new_p
;
586 long sparc_memory_ordering(unsigned long model
, struct pt_regs
*regs
)
590 regs
->tstate
= (regs
->tstate
& ~TSTATE_MM
) | (model
<< 14);
594 asmlinkage
long sys_rt_sigaction(int sig
,
595 const struct sigaction __user
*act
,
596 struct sigaction __user
*oact
,
597 void __user
*restorer
,
600 struct k_sigaction new_ka
, old_ka
;
603 /* XXX: Don't preclude handling different sized sigset_t's. */
604 if (sigsetsize
!= sizeof(sigset_t
))
608 new_ka
.ka_restorer
= restorer
;
609 if (copy_from_user(&new_ka
.sa
, act
, sizeof(*act
)))
613 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
616 if (copy_to_user(oact
, &old_ka
.sa
, sizeof(*oact
)))
623 /* Invoked by rtrap code to update performance counters in
626 asmlinkage
void update_perfctrs(void)
628 unsigned long pic
, tmp
;
631 tmp
= (current_thread_info()->kernel_cntd0
+= (unsigned int)pic
);
632 __put_user(tmp
, current_thread_info()->user_cntd0
);
633 tmp
= (current_thread_info()->kernel_cntd1
+= (pic
>> 32));
634 __put_user(tmp
, current_thread_info()->user_cntd1
);
638 asmlinkage
long sys_perfctr(int opcode
, unsigned long arg0
, unsigned long arg1
, unsigned long arg2
)
644 current_thread_info()->pcr_reg
= arg2
;
645 current_thread_info()->user_cntd0
= (u64 __user
*) arg0
;
646 current_thread_info()->user_cntd1
= (u64 __user
*) arg1
;
647 current_thread_info()->kernel_cntd0
=
648 current_thread_info()->kernel_cntd1
= 0;
651 set_thread_flag(TIF_PERFCTR
);
656 if (test_thread_flag(TIF_PERFCTR
)) {
657 current_thread_info()->user_cntd0
=
658 current_thread_info()->user_cntd1
= NULL
;
659 current_thread_info()->pcr_reg
= 0;
661 clear_thread_flag(TIF_PERFCTR
);
667 unsigned long pic
, tmp
;
669 if (!test_thread_flag(TIF_PERFCTR
)) {
674 tmp
= (current_thread_info()->kernel_cntd0
+= (unsigned int)pic
);
675 err
|= __put_user(tmp
, current_thread_info()->user_cntd0
);
676 tmp
= (current_thread_info()->kernel_cntd1
+= (pic
>> 32));
677 err
|= __put_user(tmp
, current_thread_info()->user_cntd1
);
683 if (!test_thread_flag(TIF_PERFCTR
)) {
687 current_thread_info()->kernel_cntd0
=
688 current_thread_info()->kernel_cntd1
= 0;
692 case PERFCTR_SETPCR
: {
693 u64 __user
*user_pcr
= (u64 __user
*)arg0
;
695 if (!test_thread_flag(TIF_PERFCTR
)) {
699 err
|= __get_user(current_thread_info()->pcr_reg
, user_pcr
);
700 write_pcr(current_thread_info()->pcr_reg
);
701 current_thread_info()->kernel_cntd0
=
702 current_thread_info()->kernel_cntd1
= 0;
707 case PERFCTR_GETPCR
: {
708 u64 __user
*user_pcr
= (u64 __user
*)arg0
;
710 if (!test_thread_flag(TIF_PERFCTR
)) {
714 err
|= __put_user(current_thread_info()->pcr_reg
, user_pcr
);