1 /* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched.h>
13 #include <linux/file.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/mman.h>
20 #include <linux/utsname.h>
21 #include <linux/smp.h>
22 #include <linux/slab.h>
23 #include <linux/syscalls.h>
24 #include <linux/ipc.h>
25 #include <linux/personality.h>
26 #include <linux/random.h>
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/perfctr.h>
31 #include <asm/unistd.h>
33 /* #define DEBUG_UNIMP_SYSCALL */
35 asmlinkage
unsigned long sys_getpagesize(void)
40 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
41 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
43 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
44 * overflow past the end of the 64-bit address space?
46 static inline int invalid_64bit_range(unsigned long addr
, unsigned long len
)
48 unsigned long va_exclude_start
, va_exclude_end
;
50 va_exclude_start
= VA_EXCLUDE_START
;
51 va_exclude_end
= VA_EXCLUDE_END
;
53 if (unlikely(len
>= va_exclude_start
))
56 if (unlikely((addr
+ len
) < addr
))
59 if (unlikely((addr
>= va_exclude_start
&& addr
< va_exclude_end
) ||
60 ((addr
+ len
) >= va_exclude_start
&&
61 (addr
+ len
) < va_exclude_end
)))
67 /* Does start,end straddle the VA-space hole? */
68 static inline int straddles_64bit_va_hole(unsigned long start
, unsigned long end
)
70 unsigned long va_exclude_start
, va_exclude_end
;
72 va_exclude_start
= VA_EXCLUDE_START
;
73 va_exclude_end
= VA_EXCLUDE_END
;
75 if (likely(start
< va_exclude_start
&& end
< va_exclude_start
))
78 if (likely(start
>= va_exclude_end
&& end
>= va_exclude_end
))
84 /* These functions differ from the default implementations in
85 * mm/mmap.c in two ways:
87 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
88 * for fixed such mappings we just validate what the user gave us.
89 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
90 * the spitfire/niagara VA-hole.
93 static inline unsigned long COLOUR_ALIGN(unsigned long addr
,
96 unsigned long base
= (addr
+SHMLBA
-1)&~(SHMLBA
-1);
97 unsigned long off
= (pgoff
<<PAGE_SHIFT
) & (SHMLBA
-1);
102 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr
,
105 unsigned long base
= addr
& ~(SHMLBA
-1);
106 unsigned long off
= (pgoff
<<PAGE_SHIFT
) & (SHMLBA
-1);
108 if (base
+ off
<= addr
)
113 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
)
115 struct mm_struct
*mm
= current
->mm
;
116 struct vm_area_struct
* vma
;
117 unsigned long task_size
= TASK_SIZE
;
118 unsigned long start_addr
;
121 if (flags
& MAP_FIXED
) {
122 /* We do not accept a shared mapping if it would violate
123 * cache aliasing constraints.
125 if ((flags
& MAP_SHARED
) &&
126 ((addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1)))
131 if (test_thread_flag(TIF_32BIT
))
132 task_size
= STACK_TOP32
;
133 if (unlikely(len
> task_size
|| len
>= VA_EXCLUDE_START
))
137 if (filp
|| (flags
& MAP_SHARED
))
142 addr
= COLOUR_ALIGN(addr
, pgoff
);
144 addr
= PAGE_ALIGN(addr
);
146 vma
= find_vma(mm
, addr
);
147 if (task_size
- len
>= addr
&&
148 (!vma
|| addr
+ len
<= vma
->vm_start
))
152 if (len
> mm
->cached_hole_size
) {
153 start_addr
= addr
= mm
->free_area_cache
;
155 start_addr
= addr
= TASK_UNMAPPED_BASE
;
156 mm
->cached_hole_size
= 0;
163 addr
= COLOUR_ALIGN(addr
, pgoff
);
165 addr
= PAGE_ALIGN(addr
);
167 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
168 /* At this point: (!vma || addr < vma->vm_end). */
169 if (addr
< VA_EXCLUDE_START
&&
170 (addr
+ len
) >= VA_EXCLUDE_START
) {
171 addr
= VA_EXCLUDE_END
;
172 vma
= find_vma(mm
, VA_EXCLUDE_END
);
174 if (unlikely(task_size
< addr
)) {
175 if (start_addr
!= TASK_UNMAPPED_BASE
) {
176 start_addr
= addr
= TASK_UNMAPPED_BASE
;
177 mm
->cached_hole_size
= 0;
182 if (likely(!vma
|| addr
+ len
<= vma
->vm_start
)) {
184 * Remember the place where we stopped the search:
186 mm
->free_area_cache
= addr
+ len
;
189 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
190 mm
->cached_hole_size
= vma
->vm_start
- addr
;
194 addr
= COLOUR_ALIGN(addr
, pgoff
);
199 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
200 const unsigned long len
, const unsigned long pgoff
,
201 const unsigned long flags
)
203 struct vm_area_struct
*vma
;
204 struct mm_struct
*mm
= current
->mm
;
205 unsigned long task_size
= STACK_TOP32
;
206 unsigned long addr
= addr0
;
209 /* This should only ever run for 32-bit processes. */
210 BUG_ON(!test_thread_flag(TIF_32BIT
));
212 if (flags
& MAP_FIXED
) {
213 /* We do not accept a shared mapping if it would violate
214 * cache aliasing constraints.
216 if ((flags
& MAP_SHARED
) &&
217 ((addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1)))
222 if (unlikely(len
> task_size
))
226 if (filp
|| (flags
& MAP_SHARED
))
229 /* requesting a specific address */
232 addr
= COLOUR_ALIGN(addr
, pgoff
);
234 addr
= PAGE_ALIGN(addr
);
236 vma
= find_vma(mm
, addr
);
237 if (task_size
- len
>= addr
&&
238 (!vma
|| addr
+ len
<= vma
->vm_start
))
242 /* check if free_area_cache is useful for us */
243 if (len
<= mm
->cached_hole_size
) {
244 mm
->cached_hole_size
= 0;
245 mm
->free_area_cache
= mm
->mmap_base
;
248 /* either no address requested or can't fit in requested address hole */
249 addr
= mm
->free_area_cache
;
250 if (do_color_align
) {
251 unsigned long base
= COLOUR_ALIGN_DOWN(addr
-len
, pgoff
);
256 /* make sure it can fit in the remaining address space */
257 if (likely(addr
> len
)) {
258 vma
= find_vma(mm
, addr
-len
);
259 if (!vma
|| addr
<= vma
->vm_start
) {
260 /* remember the address as a hint for next time */
261 return (mm
->free_area_cache
= addr
-len
);
265 if (unlikely(mm
->mmap_base
< len
))
268 addr
= mm
->mmap_base
-len
;
270 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
274 * Lookup failure means no vma is above this address,
275 * else if new region fits below vma->vm_start,
276 * return with success:
278 vma
= find_vma(mm
, addr
);
279 if (likely(!vma
|| addr
+len
<= vma
->vm_start
)) {
280 /* remember the address as a hint for next time */
281 return (mm
->free_area_cache
= addr
);
284 /* remember the largest hole we saw so far */
285 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
286 mm
->cached_hole_size
= vma
->vm_start
- addr
;
288 /* try just below the current vma->vm_start */
289 addr
= vma
->vm_start
-len
;
291 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
292 } while (likely(len
< vma
->vm_start
));
296 * A failed mmap() very likely causes application failure,
297 * so fall back to the bottom-up function here. This scenario
298 * can happen with large stack limits and large mmap()
301 mm
->cached_hole_size
= ~0UL;
302 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
303 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
305 * Restore the topdown base:
307 mm
->free_area_cache
= mm
->mmap_base
;
308 mm
->cached_hole_size
= ~0UL;
313 /* Try to align mapping such that we align it as much as possible. */
314 unsigned long get_fb_unmapped_area(struct file
*filp
, unsigned long orig_addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
)
316 unsigned long align_goal
, addr
= -ENOMEM
;
318 if (flags
& MAP_FIXED
) {
319 /* Ok, don't mess with it. */
320 return get_unmapped_area(NULL
, orig_addr
, len
, pgoff
, flags
);
322 flags
&= ~MAP_SHARED
;
324 align_goal
= PAGE_SIZE
;
325 if (len
>= (4UL * 1024 * 1024))
326 align_goal
= (4UL * 1024 * 1024);
327 else if (len
>= (512UL * 1024))
328 align_goal
= (512UL * 1024);
329 else if (len
>= (64UL * 1024))
330 align_goal
= (64UL * 1024);
333 addr
= get_unmapped_area(NULL
, orig_addr
, len
+ (align_goal
- PAGE_SIZE
), pgoff
, flags
);
334 if (!(addr
& ~PAGE_MASK
)) {
335 addr
= (addr
+ (align_goal
- 1UL)) & ~(align_goal
- 1UL);
339 if (align_goal
== (4UL * 1024 * 1024))
340 align_goal
= (512UL * 1024);
341 else if (align_goal
== (512UL * 1024))
342 align_goal
= (64UL * 1024);
344 align_goal
= PAGE_SIZE
;
345 } while ((addr
& ~PAGE_MASK
) && align_goal
> PAGE_SIZE
);
347 /* Mapping is smaller than 64K or larger areas could not
350 if (addr
& ~PAGE_MASK
)
351 addr
= get_unmapped_area(NULL
, orig_addr
, len
, pgoff
, flags
);
356 /* Essentially the same as PowerPC... */
357 void arch_pick_mmap_layout(struct mm_struct
*mm
)
359 unsigned long random_factor
= 0UL;
361 if (current
->flags
& PF_RANDOMIZE
) {
362 random_factor
= get_random_int();
363 if (test_thread_flag(TIF_32BIT
))
364 random_factor
&= ((1 * 1024 * 1024) - 1);
366 random_factor
= ((random_factor
<< PAGE_SHIFT
) &
371 * Fall back to the standard layout if the personality
372 * bit is set, or if the expected stack growth is unlimited:
374 if (!test_thread_flag(TIF_32BIT
) ||
375 (current
->personality
& ADDR_COMPAT_LAYOUT
) ||
376 current
->signal
->rlim
[RLIMIT_STACK
].rlim_cur
== RLIM_INFINITY
||
377 sysctl_legacy_va_layout
) {
378 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
379 mm
->get_unmapped_area
= arch_get_unmapped_area
;
380 mm
->unmap_area
= arch_unmap_area
;
382 /* We know it's 32-bit */
383 unsigned long task_size
= STACK_TOP32
;
386 gap
= current
->signal
->rlim
[RLIMIT_STACK
].rlim_cur
;
387 if (gap
< 128 * 1024 * 1024)
388 gap
= 128 * 1024 * 1024;
389 if (gap
> (task_size
/ 6 * 5))
390 gap
= (task_size
/ 6 * 5);
392 mm
->mmap_base
= PAGE_ALIGN(task_size
- gap
- random_factor
);
393 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
394 mm
->unmap_area
= arch_unmap_area_topdown
;
398 asmlinkage
unsigned long sparc_brk(unsigned long brk
)
400 /* People could try to be nasty and use ta 0x6d in 32bit programs */
401 if (test_thread_flag(TIF_32BIT
) && brk
>= STACK_TOP32
)
402 return current
->mm
->brk
;
404 if (unlikely(straddles_64bit_va_hole(current
->mm
->brk
, brk
)))
405 return current
->mm
->brk
;
411 * sys_pipe() is the normal C calling standard for creating
412 * a pipe. It's not the way unix traditionally does this, though.
414 asmlinkage
long sparc_pipe(struct pt_regs
*regs
)
422 regs
->u_regs
[UREG_I1
] = fd
[1];
429 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
431 * This is really horribly ugly.
434 asmlinkage
long sys_ipc(unsigned int call
, int first
, unsigned long second
,
435 unsigned long third
, void __user
*ptr
, long fifth
)
439 /* No need for backward compatibility. We can start fresh... */
440 if (call
<= SEMCTL
) {
443 err
= sys_semtimedop(first
, ptr
,
444 (unsigned)second
, NULL
);
447 err
= sys_semtimedop(first
, ptr
, (unsigned)second
,
448 (const struct timespec __user
*) fifth
);
451 err
= sys_semget(first
, (int)second
, (int)third
);
454 err
= sys_semctl(first
, third
,
455 (int)second
| IPC_64
,
464 if (call
<= MSGCTL
) {
467 err
= sys_msgsnd(first
, ptr
, (size_t)second
,
471 err
= sys_msgrcv(first
, ptr
, (size_t)second
, fifth
,
475 err
= sys_msgget((key_t
)first
, (int)second
);
478 err
= sys_msgctl(first
, (int)second
| IPC_64
, ptr
);
485 if (call
<= SHMCTL
) {
489 err
= do_shmat(first
, ptr
, (int)second
, &raddr
);
492 (ulong __user
*) third
))
498 err
= sys_shmdt(ptr
);
501 err
= sys_shmget(first
, (size_t)second
, (int)third
);
504 err
= sys_shmctl(first
, (int)second
| IPC_64
, ptr
);
517 asmlinkage
long sparc64_newuname(struct new_utsname __user
*name
)
519 int ret
= sys_newuname(name
);
521 if (current
->personality
== PER_LINUX32
&& !ret
) {
522 ret
= (copy_to_user(name
->machine
, "sparc\0\0", 8)
528 asmlinkage
long sparc64_personality(unsigned long personality
)
532 if (current
->personality
== PER_LINUX32
&&
533 personality
== PER_LINUX
)
534 personality
= PER_LINUX32
;
535 ret
= sys_personality(personality
);
536 if (ret
== PER_LINUX32
)
542 int sparc64_mmap_check(unsigned long addr
, unsigned long len
,
545 if (test_thread_flag(TIF_32BIT
)) {
546 if (len
>= STACK_TOP32
)
549 if ((flags
& MAP_FIXED
) && addr
> STACK_TOP32
- len
)
552 if (len
>= VA_EXCLUDE_START
)
555 if ((flags
& MAP_FIXED
) && invalid_64bit_range(addr
, len
))
562 /* Linux version of mmap */
563 asmlinkage
unsigned long sys_mmap(unsigned long addr
, unsigned long len
,
564 unsigned long prot
, unsigned long flags
, unsigned long fd
,
567 struct file
* file
= NULL
;
568 unsigned long retval
= -EBADF
;
570 if (!(flags
& MAP_ANONYMOUS
)) {
575 flags
&= ~(MAP_EXECUTABLE
| MAP_DENYWRITE
);
576 len
= PAGE_ALIGN(len
);
578 down_write(¤t
->mm
->mmap_sem
);
579 retval
= do_mmap(file
, addr
, len
, prot
, flags
, off
);
580 up_write(¤t
->mm
->mmap_sem
);
588 asmlinkage
long sys64_munmap(unsigned long addr
, size_t len
)
592 if (invalid_64bit_range(addr
, len
))
595 down_write(¤t
->mm
->mmap_sem
);
596 ret
= do_munmap(current
->mm
, addr
, len
);
597 up_write(¤t
->mm
->mmap_sem
);
601 extern unsigned long do_mremap(unsigned long addr
,
602 unsigned long old_len
, unsigned long new_len
,
603 unsigned long flags
, unsigned long new_addr
);
605 asmlinkage
unsigned long sys64_mremap(unsigned long addr
,
606 unsigned long old_len
, unsigned long new_len
,
607 unsigned long flags
, unsigned long new_addr
)
609 struct vm_area_struct
*vma
;
610 unsigned long ret
= -EINVAL
;
612 if (test_thread_flag(TIF_32BIT
))
614 if (unlikely(new_len
>= VA_EXCLUDE_START
))
616 if (unlikely(invalid_64bit_range(addr
, old_len
)))
619 down_write(¤t
->mm
->mmap_sem
);
620 if (flags
& MREMAP_FIXED
) {
621 if (invalid_64bit_range(new_addr
, new_len
))
623 } else if (invalid_64bit_range(addr
, new_len
)) {
624 unsigned long map_flags
= 0;
625 struct file
*file
= NULL
;
628 if (!(flags
& MREMAP_MAYMOVE
))
631 vma
= find_vma(current
->mm
, addr
);
633 if (vma
->vm_flags
& VM_SHARED
)
634 map_flags
|= MAP_SHARED
;
638 /* MREMAP_FIXED checked above. */
639 new_addr
= get_unmapped_area(file
, addr
, new_len
,
640 vma
? vma
->vm_pgoff
: 0,
643 if (new_addr
& ~PAGE_MASK
)
645 flags
|= MREMAP_FIXED
;
647 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
649 up_write(¤t
->mm
->mmap_sem
);
654 /* we come to here via sys_nis_syscall so it can setup the regs argument */
655 asmlinkage
unsigned long c_sys_nis_syscall(struct pt_regs
*regs
)
659 /* Don't make the system unusable, if someone goes stuck */
663 printk ("Unimplemented SPARC system call %ld\n",regs
->u_regs
[1]);
664 #ifdef DEBUG_UNIMP_SYSCALL
671 /* #define DEBUG_SPARC_BREAKPOINT */
673 asmlinkage
void sparc_breakpoint(struct pt_regs
*regs
)
677 if (test_thread_flag(TIF_32BIT
)) {
678 regs
->tpc
&= 0xffffffff;
679 regs
->tnpc
&= 0xffffffff;
681 #ifdef DEBUG_SPARC_BREAKPOINT
682 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs
->tpc
, regs
->tnpc
);
684 info
.si_signo
= SIGTRAP
;
686 info
.si_code
= TRAP_BRKPT
;
687 info
.si_addr
= (void __user
*)regs
->tpc
;
689 force_sig_info(SIGTRAP
, &info
, current
);
690 #ifdef DEBUG_SPARC_BREAKPOINT
691 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs
->tpc
, regs
->tnpc
);
695 extern void check_pending(int signum
);
697 asmlinkage
long sys_getdomainname(char __user
*name
, int len
)
706 nlen
= strlen(utsname()->domainname
) + 1;
712 if (!copy_to_user(name
, utsname()->domainname
, nlen
))
720 asmlinkage
long solaris_syscall(struct pt_regs
*regs
)
724 regs
->tpc
= regs
->tnpc
;
726 if (test_thread_flag(TIF_32BIT
)) {
727 regs
->tpc
&= 0xffffffff;
728 regs
->tnpc
&= 0xffffffff;
731 printk ("For Solaris binary emulation you need solaris module loaded\n");
734 send_sig(SIGSEGV
, current
, 1);
739 #ifndef CONFIG_SUNOS_EMUL
740 asmlinkage
long sunos_syscall(struct pt_regs
*regs
)
744 regs
->tpc
= regs
->tnpc
;
746 if (test_thread_flag(TIF_32BIT
)) {
747 regs
->tpc
&= 0xffffffff;
748 regs
->tnpc
&= 0xffffffff;
751 printk ("SunOS binary emulation not compiled in\n");
752 force_sig(SIGSEGV
, current
);
758 asmlinkage
long sys_utrap_install(utrap_entry_t type
,
759 utrap_handler_t new_p
,
760 utrap_handler_t new_d
,
761 utrap_handler_t __user
*old_p
,
762 utrap_handler_t __user
*old_d
)
764 if (type
< UT_INSTRUCTION_EXCEPTION
|| type
> UT_TRAP_INSTRUCTION_31
)
766 if (new_p
== (utrap_handler_t
)(long)UTH_NOCHANGE
) {
768 if (!current_thread_info()->utraps
) {
769 if (put_user(NULL
, old_p
))
772 if (put_user((utrap_handler_t
)(current_thread_info()->utraps
[type
]), old_p
))
777 if (put_user(NULL
, old_d
))
782 if (!current_thread_info()->utraps
) {
783 current_thread_info()->utraps
=
784 kzalloc((UT_TRAP_INSTRUCTION_31
+1)*sizeof(long), GFP_KERNEL
);
785 if (!current_thread_info()->utraps
)
787 current_thread_info()->utraps
[0] = 1;
789 if ((utrap_handler_t
)current_thread_info()->utraps
[type
] != new_p
&&
790 current_thread_info()->utraps
[0] > 1) {
791 long *p
= current_thread_info()->utraps
;
793 current_thread_info()->utraps
=
794 kmalloc((UT_TRAP_INSTRUCTION_31
+1)*sizeof(long),
796 if (!current_thread_info()->utraps
) {
797 current_thread_info()->utraps
= p
;
801 current_thread_info()->utraps
[0] = 1;
802 memcpy(current_thread_info()->utraps
+1, p
+1,
803 UT_TRAP_INSTRUCTION_31
*sizeof(long));
807 if (put_user((utrap_handler_t
)(current_thread_info()->utraps
[type
]), old_p
))
811 if (put_user(NULL
, old_d
))
814 current_thread_info()->utraps
[type
] = (long)new_p
;
819 long sparc_memory_ordering(unsigned long model
, struct pt_regs
*regs
)
823 regs
->tstate
= (regs
->tstate
& ~TSTATE_MM
) | (model
<< 14);
827 asmlinkage
long sys_rt_sigaction(int sig
,
828 const struct sigaction __user
*act
,
829 struct sigaction __user
*oact
,
830 void __user
*restorer
,
833 struct k_sigaction new_ka
, old_ka
;
836 /* XXX: Don't preclude handling different sized sigset_t's. */
837 if (sigsetsize
!= sizeof(sigset_t
))
841 new_ka
.ka_restorer
= restorer
;
842 if (copy_from_user(&new_ka
.sa
, act
, sizeof(*act
)))
846 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
849 if (copy_to_user(oact
, &old_ka
.sa
, sizeof(*oact
)))
856 /* Invoked by rtrap code to update performance counters in
859 asmlinkage
void update_perfctrs(void)
861 unsigned long pic
, tmp
;
864 tmp
= (current_thread_info()->kernel_cntd0
+= (unsigned int)pic
);
865 __put_user(tmp
, current_thread_info()->user_cntd0
);
866 tmp
= (current_thread_info()->kernel_cntd1
+= (pic
>> 32));
867 __put_user(tmp
, current_thread_info()->user_cntd1
);
871 asmlinkage
long sys_perfctr(int opcode
, unsigned long arg0
, unsigned long arg1
, unsigned long arg2
)
877 current_thread_info()->pcr_reg
= arg2
;
878 current_thread_info()->user_cntd0
= (u64 __user
*) arg0
;
879 current_thread_info()->user_cntd1
= (u64 __user
*) arg1
;
880 current_thread_info()->kernel_cntd0
=
881 current_thread_info()->kernel_cntd1
= 0;
884 set_thread_flag(TIF_PERFCTR
);
889 if (test_thread_flag(TIF_PERFCTR
)) {
890 current_thread_info()->user_cntd0
=
891 current_thread_info()->user_cntd1
= NULL
;
892 current_thread_info()->pcr_reg
= 0;
894 clear_thread_flag(TIF_PERFCTR
);
900 unsigned long pic
, tmp
;
902 if (!test_thread_flag(TIF_PERFCTR
)) {
907 tmp
= (current_thread_info()->kernel_cntd0
+= (unsigned int)pic
);
908 err
|= __put_user(tmp
, current_thread_info()->user_cntd0
);
909 tmp
= (current_thread_info()->kernel_cntd1
+= (pic
>> 32));
910 err
|= __put_user(tmp
, current_thread_info()->user_cntd1
);
916 if (!test_thread_flag(TIF_PERFCTR
)) {
920 current_thread_info()->kernel_cntd0
=
921 current_thread_info()->kernel_cntd1
= 0;
925 case PERFCTR_SETPCR
: {
926 u64 __user
*user_pcr
= (u64 __user
*)arg0
;
928 if (!test_thread_flag(TIF_PERFCTR
)) {
932 err
|= __get_user(current_thread_info()->pcr_reg
, user_pcr
);
933 write_pcr(current_thread_info()->pcr_reg
);
934 current_thread_info()->kernel_cntd0
=
935 current_thread_info()->kernel_cntd1
= 0;
940 case PERFCTR_GETPCR
: {
941 u64 __user
*user_pcr
= (u64 __user
*)arg0
;
943 if (!test_thread_flag(TIF_PERFCTR
)) {
947 err
|= __put_user(current_thread_info()->pcr_reg
, user_pcr
);
959 * Do a system call from kernel instead of calling sys_execve so we
960 * end up with proper pt_regs.
962 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
965 register long __g1
__asm__ ("g1") = __NR_execve
;
966 register long __o0
__asm__ ("o0") = (long)(filename
);
967 register long __o1
__asm__ ("o1") = (long)(argv
);
968 register long __o2
__asm__ ("o2") = (long)(envp
);
969 asm volatile ("t 0x6d\n\t"
970 "sub %%g0, %%o0, %0\n\t"
971 "movcc %%xcc, %%o0, %0\n\t"
972 : "=r" (__res
), "=&r" (__o0
)
973 : "1" (__o0
), "r" (__o1
), "r" (__o2
), "r" (__g1
)