1 /* linux/arch/sparc64/kernel/sys_sparc.c
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
12 #include <linux/file.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/perfctr.h>
31 #include <asm/unistd.h>
36 /* #define DEBUG_UNIMP_SYSCALL */
38 asmlinkage
unsigned long sys_getpagesize(void)
43 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
44 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
46 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
47 * overflow past the end of the 64-bit address space?
49 static inline int invalid_64bit_range(unsigned long addr
, unsigned long len
)
51 unsigned long va_exclude_start
, va_exclude_end
;
53 va_exclude_start
= VA_EXCLUDE_START
;
54 va_exclude_end
= VA_EXCLUDE_END
;
56 if (unlikely(len
>= va_exclude_start
))
59 if (unlikely((addr
+ len
) < addr
))
62 if (unlikely((addr
>= va_exclude_start
&& addr
< va_exclude_end
) ||
63 ((addr
+ len
) >= va_exclude_start
&&
64 (addr
+ len
) < va_exclude_end
)))
70 /* Does start,end straddle the VA-space hole? */
71 static inline int straddles_64bit_va_hole(unsigned long start
, unsigned long end
)
73 unsigned long va_exclude_start
, va_exclude_end
;
75 va_exclude_start
= VA_EXCLUDE_START
;
76 va_exclude_end
= VA_EXCLUDE_END
;
78 if (likely(start
< va_exclude_start
&& end
< va_exclude_start
))
81 if (likely(start
>= va_exclude_end
&& end
>= va_exclude_end
))
87 /* These functions differ from the default implementations in
88 * mm/mmap.c in two ways:
90 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
91 * for fixed such mappings we just validate what the user gave us.
92 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
93 * the spitfire/niagara VA-hole.
96 static inline unsigned long COLOUR_ALIGN(unsigned long addr
,
99 unsigned long base
= (addr
+SHMLBA
-1)&~(SHMLBA
-1);
100 unsigned long off
= (pgoff
<<PAGE_SHIFT
) & (SHMLBA
-1);
105 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr
,
108 unsigned long base
= addr
& ~(SHMLBA
-1);
109 unsigned long off
= (pgoff
<<PAGE_SHIFT
) & (SHMLBA
-1);
111 if (base
+ off
<= addr
)
116 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
)
118 struct mm_struct
*mm
= current
->mm
;
119 struct vm_area_struct
* vma
;
120 unsigned long task_size
= TASK_SIZE
;
121 unsigned long start_addr
;
124 if (flags
& MAP_FIXED
) {
125 /* We do not accept a shared mapping if it would violate
126 * cache aliasing constraints.
128 if ((flags
& MAP_SHARED
) &&
129 ((addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1)))
134 if (test_thread_flag(TIF_32BIT
))
135 task_size
= STACK_TOP32
;
136 if (unlikely(len
> task_size
|| len
>= VA_EXCLUDE_START
))
140 if (filp
|| (flags
& MAP_SHARED
))
145 addr
= COLOUR_ALIGN(addr
, pgoff
);
147 addr
= PAGE_ALIGN(addr
);
149 vma
= find_vma(mm
, addr
);
150 if (task_size
- len
>= addr
&&
151 (!vma
|| addr
+ len
<= vma
->vm_start
))
155 if (len
> mm
->cached_hole_size
) {
156 start_addr
= addr
= mm
->free_area_cache
;
158 start_addr
= addr
= TASK_UNMAPPED_BASE
;
159 mm
->cached_hole_size
= 0;
166 addr
= COLOUR_ALIGN(addr
, pgoff
);
168 addr
= PAGE_ALIGN(addr
);
170 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
171 /* At this point: (!vma || addr < vma->vm_end). */
172 if (addr
< VA_EXCLUDE_START
&&
173 (addr
+ len
) >= VA_EXCLUDE_START
) {
174 addr
= VA_EXCLUDE_END
;
175 vma
= find_vma(mm
, VA_EXCLUDE_END
);
177 if (unlikely(task_size
< addr
)) {
178 if (start_addr
!= TASK_UNMAPPED_BASE
) {
179 start_addr
= addr
= TASK_UNMAPPED_BASE
;
180 mm
->cached_hole_size
= 0;
185 if (likely(!vma
|| addr
+ len
<= vma
->vm_start
)) {
187 * Remember the place where we stopped the search:
189 mm
->free_area_cache
= addr
+ len
;
192 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
193 mm
->cached_hole_size
= vma
->vm_start
- addr
;
197 addr
= COLOUR_ALIGN(addr
, pgoff
);
202 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
203 const unsigned long len
, const unsigned long pgoff
,
204 const unsigned long flags
)
206 struct vm_area_struct
*vma
;
207 struct mm_struct
*mm
= current
->mm
;
208 unsigned long task_size
= STACK_TOP32
;
209 unsigned long addr
= addr0
;
212 /* This should only ever run for 32-bit processes. */
213 BUG_ON(!test_thread_flag(TIF_32BIT
));
215 if (flags
& MAP_FIXED
) {
216 /* We do not accept a shared mapping if it would violate
217 * cache aliasing constraints.
219 if ((flags
& MAP_SHARED
) &&
220 ((addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1)))
225 if (unlikely(len
> task_size
))
229 if (filp
|| (flags
& MAP_SHARED
))
232 /* requesting a specific address */
235 addr
= COLOUR_ALIGN(addr
, pgoff
);
237 addr
= PAGE_ALIGN(addr
);
239 vma
= find_vma(mm
, addr
);
240 if (task_size
- len
>= addr
&&
241 (!vma
|| addr
+ len
<= vma
->vm_start
))
245 /* check if free_area_cache is useful for us */
246 if (len
<= mm
->cached_hole_size
) {
247 mm
->cached_hole_size
= 0;
248 mm
->free_area_cache
= mm
->mmap_base
;
251 /* either no address requested or can't fit in requested address hole */
252 addr
= mm
->free_area_cache
;
253 if (do_color_align
) {
254 unsigned long base
= COLOUR_ALIGN_DOWN(addr
-len
, pgoff
);
259 /* make sure it can fit in the remaining address space */
260 if (likely(addr
> len
)) {
261 vma
= find_vma(mm
, addr
-len
);
262 if (!vma
|| addr
<= vma
->vm_start
) {
263 /* remember the address as a hint for next time */
264 return (mm
->free_area_cache
= addr
-len
);
268 if (unlikely(mm
->mmap_base
< len
))
271 addr
= mm
->mmap_base
-len
;
273 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
277 * Lookup failure means no vma is above this address,
278 * else if new region fits below vma->vm_start,
279 * return with success:
281 vma
= find_vma(mm
, addr
);
282 if (likely(!vma
|| addr
+len
<= vma
->vm_start
)) {
283 /* remember the address as a hint for next time */
284 return (mm
->free_area_cache
= addr
);
287 /* remember the largest hole we saw so far */
288 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
289 mm
->cached_hole_size
= vma
->vm_start
- addr
;
291 /* try just below the current vma->vm_start */
292 addr
= vma
->vm_start
-len
;
294 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
295 } while (likely(len
< vma
->vm_start
));
299 * A failed mmap() very likely causes application failure,
300 * so fall back to the bottom-up function here. This scenario
301 * can happen with large stack limits and large mmap()
304 mm
->cached_hole_size
= ~0UL;
305 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
306 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
308 * Restore the topdown base:
310 mm
->free_area_cache
= mm
->mmap_base
;
311 mm
->cached_hole_size
= ~0UL;
316 /* Try to align mapping such that we align it as much as possible. */
317 unsigned long get_fb_unmapped_area(struct file
*filp
, unsigned long orig_addr
, unsigned long len
, unsigned long pgoff
, unsigned long flags
)
319 unsigned long align_goal
, addr
= -ENOMEM
;
320 unsigned long (*get_area
)(struct file
*, unsigned long,
321 unsigned long, unsigned long, unsigned long);
323 get_area
= current
->mm
->get_unmapped_area
;
325 if (flags
& MAP_FIXED
) {
326 /* Ok, don't mess with it. */
327 return get_area(NULL
, orig_addr
, len
, pgoff
, flags
);
329 flags
&= ~MAP_SHARED
;
331 align_goal
= PAGE_SIZE
;
332 if (len
>= (4UL * 1024 * 1024))
333 align_goal
= (4UL * 1024 * 1024);
334 else if (len
>= (512UL * 1024))
335 align_goal
= (512UL * 1024);
336 else if (len
>= (64UL * 1024))
337 align_goal
= (64UL * 1024);
340 addr
= get_area(NULL
, orig_addr
, len
+ (align_goal
- PAGE_SIZE
), pgoff
, flags
);
341 if (!(addr
& ~PAGE_MASK
)) {
342 addr
= (addr
+ (align_goal
- 1UL)) & ~(align_goal
- 1UL);
346 if (align_goal
== (4UL * 1024 * 1024))
347 align_goal
= (512UL * 1024);
348 else if (align_goal
== (512UL * 1024))
349 align_goal
= (64UL * 1024);
351 align_goal
= PAGE_SIZE
;
352 } while ((addr
& ~PAGE_MASK
) && align_goal
> PAGE_SIZE
);
354 /* Mapping is smaller than 64K or larger areas could not
357 if (addr
& ~PAGE_MASK
)
358 addr
= get_area(NULL
, orig_addr
, len
, pgoff
, flags
);
362 EXPORT_SYMBOL(get_fb_unmapped_area
);
364 /* Essentially the same as PowerPC... */
365 void arch_pick_mmap_layout(struct mm_struct
*mm
)
367 unsigned long random_factor
= 0UL;
370 if (current
->flags
& PF_RANDOMIZE
) {
371 random_factor
= get_random_int();
372 if (test_thread_flag(TIF_32BIT
))
373 random_factor
&= ((1 * 1024 * 1024) - 1);
375 random_factor
= ((random_factor
<< PAGE_SHIFT
) &
380 * Fall back to the standard layout if the personality
381 * bit is set, or if the expected stack growth is unlimited:
383 gap
= rlimit(RLIMIT_STACK
);
384 if (!test_thread_flag(TIF_32BIT
) ||
385 (current
->personality
& ADDR_COMPAT_LAYOUT
) ||
386 gap
== RLIM_INFINITY
||
387 sysctl_legacy_va_layout
) {
388 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
389 mm
->get_unmapped_area
= arch_get_unmapped_area
;
390 mm
->unmap_area
= arch_unmap_area
;
392 /* We know it's 32-bit */
393 unsigned long task_size
= STACK_TOP32
;
395 if (gap
< 128 * 1024 * 1024)
396 gap
= 128 * 1024 * 1024;
397 if (gap
> (task_size
/ 6 * 5))
398 gap
= (task_size
/ 6 * 5);
400 mm
->mmap_base
= PAGE_ALIGN(task_size
- gap
- random_factor
);
401 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
402 mm
->unmap_area
= arch_unmap_area_topdown
;
407 * sys_pipe() is the normal C calling standard for creating
408 * a pipe. It's not the way unix traditionally does this, though.
410 SYSCALL_DEFINE1(sparc_pipe_real
, struct pt_regs
*, regs
)
415 error
= do_pipe_flags(fd
, 0);
418 regs
->u_regs
[UREG_I1
] = fd
[1];
425 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
427 * This is really horribly ugly.
430 SYSCALL_DEFINE6(ipc
, unsigned int, call
, int, first
, unsigned long, second
,
431 unsigned long, third
, void __user
*, ptr
, long, fifth
)
435 /* No need for backward compatibility. We can start fresh... */
436 if (call
<= SEMCTL
) {
439 err
= sys_semtimedop(first
, ptr
,
440 (unsigned)second
, NULL
);
443 err
= sys_semtimedop(first
, ptr
, (unsigned)second
,
444 (const struct timespec __user
*)
445 (unsigned long) fifth
);
448 err
= sys_semget(first
, (int)second
, (int)third
);
451 err
= sys_semctl(first
, second
,
461 if (call
<= MSGCTL
) {
464 err
= sys_msgsnd(first
, ptr
, (size_t)second
,
468 err
= sys_msgrcv(first
, ptr
, (size_t)second
, fifth
,
472 err
= sys_msgget((key_t
)first
, (int)second
);
475 err
= sys_msgctl(first
, (int)second
| IPC_64
, ptr
);
482 if (call
<= SHMCTL
) {
486 err
= do_shmat(first
, ptr
, (int)second
, &raddr
);
489 (ulong __user
*) third
))
495 err
= sys_shmdt(ptr
);
498 err
= sys_shmget(first
, (size_t)second
, (int)third
);
501 err
= sys_shmctl(first
, (int)second
| IPC_64
, ptr
);
514 SYSCALL_DEFINE1(sparc64_newuname
, struct new_utsname __user
*, name
)
516 int ret
= sys_newuname(name
);
518 if (current
->personality
== PER_LINUX32
&& !ret
) {
519 ret
= (copy_to_user(name
->machine
, "sparc\0\0", 8)
525 SYSCALL_DEFINE1(sparc64_personality
, unsigned long, personality
)
529 if (current
->personality
== PER_LINUX32
&&
530 personality
== PER_LINUX
)
531 personality
= PER_LINUX32
;
532 ret
= sys_personality(personality
);
533 if (ret
== PER_LINUX32
)
539 int sparc_mmap_check(unsigned long addr
, unsigned long len
)
541 if (test_thread_flag(TIF_32BIT
)) {
542 if (len
>= STACK_TOP32
)
545 if (addr
> STACK_TOP32
- len
)
548 if (len
>= VA_EXCLUDE_START
)
551 if (invalid_64bit_range(addr
, len
))
558 /* Linux version of mmap */
559 SYSCALL_DEFINE6(mmap
, unsigned long, addr
, unsigned long, len
,
560 unsigned long, prot
, unsigned long, flags
, unsigned long, fd
,
563 unsigned long retval
= -EINVAL
;
565 if ((off
+ PAGE_ALIGN(len
)) < off
)
567 if (off
& ~PAGE_MASK
)
569 retval
= sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, off
>> PAGE_SHIFT
);
574 SYSCALL_DEFINE2(64_munmap
, unsigned long, addr
, size_t, len
)
578 if (invalid_64bit_range(addr
, len
))
581 down_write(¤t
->mm
->mmap_sem
);
582 ret
= do_munmap(current
->mm
, addr
, len
);
583 up_write(¤t
->mm
->mmap_sem
);
587 extern unsigned long do_mremap(unsigned long addr
,
588 unsigned long old_len
, unsigned long new_len
,
589 unsigned long flags
, unsigned long new_addr
);
591 SYSCALL_DEFINE5(64_mremap
, unsigned long, addr
, unsigned long, old_len
,
592 unsigned long, new_len
, unsigned long, flags
,
593 unsigned long, new_addr
)
595 unsigned long ret
= -EINVAL
;
597 if (test_thread_flag(TIF_32BIT
))
600 down_write(¤t
->mm
->mmap_sem
);
601 ret
= do_mremap(addr
, old_len
, new_len
, flags
, new_addr
);
602 up_write(¤t
->mm
->mmap_sem
);
607 /* we come to here via sys_nis_syscall so it can setup the regs argument */
608 asmlinkage
unsigned long c_sys_nis_syscall(struct pt_regs
*regs
)
612 /* Don't make the system unusable, if someone goes stuck */
616 printk ("Unimplemented SPARC system call %ld\n",regs
->u_regs
[1]);
617 #ifdef DEBUG_UNIMP_SYSCALL
624 /* #define DEBUG_SPARC_BREAKPOINT */
626 asmlinkage
void sparc_breakpoint(struct pt_regs
*regs
)
630 if (test_thread_flag(TIF_32BIT
)) {
631 regs
->tpc
&= 0xffffffff;
632 regs
->tnpc
&= 0xffffffff;
634 #ifdef DEBUG_SPARC_BREAKPOINT
635 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs
->tpc
, regs
->tnpc
);
637 info
.si_signo
= SIGTRAP
;
639 info
.si_code
= TRAP_BRKPT
;
640 info
.si_addr
= (void __user
*)regs
->tpc
;
642 force_sig_info(SIGTRAP
, &info
, current
);
643 #ifdef DEBUG_SPARC_BREAKPOINT
644 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs
->tpc
, regs
->tnpc
);
648 extern void check_pending(int signum
);
650 SYSCALL_DEFINE2(getdomainname
, char __user
*, name
, int, len
)
659 nlen
= strlen(utsname()->domainname
) + 1;
665 if (!copy_to_user(name
, utsname()->domainname
, nlen
))
673 SYSCALL_DEFINE5(utrap_install
, utrap_entry_t
, type
,
674 utrap_handler_t
, new_p
, utrap_handler_t
, new_d
,
675 utrap_handler_t __user
*, old_p
,
676 utrap_handler_t __user
*, old_d
)
678 if (type
< UT_INSTRUCTION_EXCEPTION
|| type
> UT_TRAP_INSTRUCTION_31
)
680 if (new_p
== (utrap_handler_t
)(long)UTH_NOCHANGE
) {
682 if (!current_thread_info()->utraps
) {
683 if (put_user(NULL
, old_p
))
686 if (put_user((utrap_handler_t
)(current_thread_info()->utraps
[type
]), old_p
))
691 if (put_user(NULL
, old_d
))
696 if (!current_thread_info()->utraps
) {
697 current_thread_info()->utraps
=
698 kzalloc((UT_TRAP_INSTRUCTION_31
+1)*sizeof(long), GFP_KERNEL
);
699 if (!current_thread_info()->utraps
)
701 current_thread_info()->utraps
[0] = 1;
703 if ((utrap_handler_t
)current_thread_info()->utraps
[type
] != new_p
&&
704 current_thread_info()->utraps
[0] > 1) {
705 unsigned long *p
= current_thread_info()->utraps
;
707 current_thread_info()->utraps
=
708 kmalloc((UT_TRAP_INSTRUCTION_31
+1)*sizeof(long),
710 if (!current_thread_info()->utraps
) {
711 current_thread_info()->utraps
= p
;
715 current_thread_info()->utraps
[0] = 1;
716 memcpy(current_thread_info()->utraps
+1, p
+1,
717 UT_TRAP_INSTRUCTION_31
*sizeof(long));
721 if (put_user((utrap_handler_t
)(current_thread_info()->utraps
[type
]), old_p
))
725 if (put_user(NULL
, old_d
))
728 current_thread_info()->utraps
[type
] = (long)new_p
;
733 asmlinkage
long sparc_memory_ordering(unsigned long model
,
734 struct pt_regs
*regs
)
738 regs
->tstate
= (regs
->tstate
& ~TSTATE_MM
) | (model
<< 14);
742 SYSCALL_DEFINE5(rt_sigaction
, int, sig
, const struct sigaction __user
*, act
,
743 struct sigaction __user
*, oact
, void __user
*, restorer
,
746 struct k_sigaction new_ka
, old_ka
;
749 /* XXX: Don't preclude handling different sized sigset_t's. */
750 if (sigsetsize
!= sizeof(sigset_t
))
754 new_ka
.ka_restorer
= restorer
;
755 if (copy_from_user(&new_ka
.sa
, act
, sizeof(*act
)))
759 ret
= do_sigaction(sig
, act
? &new_ka
: NULL
, oact
? &old_ka
: NULL
);
762 if (copy_to_user(oact
, &old_ka
.sa
, sizeof(*oact
)))
769 /* Invoked by rtrap code to update performance counters in
772 asmlinkage
void update_perfctrs(void)
774 unsigned long pic
, tmp
;
777 tmp
= (current_thread_info()->kernel_cntd0
+= (unsigned int)pic
);
778 __put_user(tmp
, current_thread_info()->user_cntd0
);
779 tmp
= (current_thread_info()->kernel_cntd1
+= (pic
>> 32));
780 __put_user(tmp
, current_thread_info()->user_cntd1
);
784 SYSCALL_DEFINE4(perfctr
, int, opcode
, unsigned long, arg0
,
785 unsigned long, arg1
, unsigned long, arg2
)
791 current_thread_info()->pcr_reg
= arg2
;
792 current_thread_info()->user_cntd0
= (u64 __user
*) arg0
;
793 current_thread_info()->user_cntd1
= (u64 __user
*) arg1
;
794 current_thread_info()->kernel_cntd0
=
795 current_thread_info()->kernel_cntd1
= 0;
798 set_thread_flag(TIF_PERFCTR
);
803 if (test_thread_flag(TIF_PERFCTR
)) {
804 current_thread_info()->user_cntd0
=
805 current_thread_info()->user_cntd1
= NULL
;
806 current_thread_info()->pcr_reg
= 0;
808 clear_thread_flag(TIF_PERFCTR
);
814 unsigned long pic
, tmp
;
816 if (!test_thread_flag(TIF_PERFCTR
)) {
821 tmp
= (current_thread_info()->kernel_cntd0
+= (unsigned int)pic
);
822 err
|= __put_user(tmp
, current_thread_info()->user_cntd0
);
823 tmp
= (current_thread_info()->kernel_cntd1
+= (pic
>> 32));
824 err
|= __put_user(tmp
, current_thread_info()->user_cntd1
);
830 if (!test_thread_flag(TIF_PERFCTR
)) {
834 current_thread_info()->kernel_cntd0
=
835 current_thread_info()->kernel_cntd1
= 0;
839 case PERFCTR_SETPCR
: {
840 u64 __user
*user_pcr
= (u64 __user
*)arg0
;
842 if (!test_thread_flag(TIF_PERFCTR
)) {
846 err
|= __get_user(current_thread_info()->pcr_reg
, user_pcr
);
847 write_pcr(current_thread_info()->pcr_reg
);
848 current_thread_info()->kernel_cntd0
=
849 current_thread_info()->kernel_cntd1
= 0;
854 case PERFCTR_GETPCR
: {
855 u64 __user
*user_pcr
= (u64 __user
*)arg0
;
857 if (!test_thread_flag(TIF_PERFCTR
)) {
861 err
|= __put_user(current_thread_info()->pcr_reg
, user_pcr
);
873 * Do a system call from kernel instead of calling sys_execve so we
874 * end up with proper pt_regs.
876 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
879 register long __g1
__asm__ ("g1") = __NR_execve
;
880 register long __o0
__asm__ ("o0") = (long)(filename
);
881 register long __o1
__asm__ ("o1") = (long)(argv
);
882 register long __o2
__asm__ ("o2") = (long)(envp
);
883 asm volatile ("t 0x6d\n\t"
884 "sub %%g0, %%o0, %0\n\t"
885 "movcc %%xcc, %%o0, %0\n\t"
886 : "=r" (__res
), "=&r" (__o0
)
887 : "1" (__o0
), "r" (__o1
), "r" (__o2
), "r" (__g1
)