4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
20 extern char system_call_common
[];
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK 0xffffffff87c0ffffUL
26 #define MSR_MASK 0x87c0ffff
30 #define XER_SO 0x80000000U
31 #define XER_OV 0x40000000U
32 #define XER_CA 0x20000000U
36 * Functions in ldstfp.S
38 extern int do_lfs(int rn
, unsigned long ea
);
39 extern int do_lfd(int rn
, unsigned long ea
);
40 extern int do_stfs(int rn
, unsigned long ea
);
41 extern int do_stfd(int rn
, unsigned long ea
);
42 extern int do_lvx(int rn
, unsigned long ea
);
43 extern int do_stvx(int rn
, unsigned long ea
);
44 extern int do_lxvd2x(int rn
, unsigned long ea
);
45 extern int do_stxvd2x(int rn
, unsigned long ea
);
49 * Emulate the truncation of 64 bit values in 32-bit mode.
51 static unsigned long truncate_if_32bit(unsigned long msr
, unsigned long val
)
54 if ((msr
& MSR_64BIT
) == 0)
61 * Determine whether a conditional branch instruction would branch.
63 static int __kprobes
branch_taken(unsigned int instr
, struct pt_regs
*regs
)
65 unsigned int bo
= (instr
>> 21) & 0x1f;
69 /* decrement counter */
71 if (((bo
>> 1) & 1) ^ (regs
->ctr
== 0))
74 if ((bo
& 0x10) == 0) {
75 /* check bit from CR */
76 bi
= (instr
>> 16) & 0x1f;
77 if (((regs
->ccr
>> (31 - bi
)) & 1) != ((bo
>> 3) & 1))
84 static long __kprobes
address_ok(struct pt_regs
*regs
, unsigned long ea
, int nb
)
88 return __access_ok(ea
, nb
, USER_DS
);
92 * Calculate effective address for a D-form instruction
94 static unsigned long __kprobes
dform_ea(unsigned int instr
, struct pt_regs
*regs
)
99 ra
= (instr
>> 16) & 0x1f;
100 ea
= (signed short) instr
; /* sign-extend */
103 if (instr
& 0x04000000) { /* update forms */
104 if ((instr
>>26) != 47) /* stmw is not an update form */
109 return truncate_if_32bit(regs
->msr
, ea
);
114 * Calculate effective address for a DS-form instruction
116 static unsigned long __kprobes
dsform_ea(unsigned int instr
, struct pt_regs
*regs
)
121 ra
= (instr
>> 16) & 0x1f;
122 ea
= (signed short) (instr
& ~3); /* sign-extend */
125 if ((instr
& 3) == 1) /* update forms */
129 return truncate_if_32bit(regs
->msr
, ea
);
131 #endif /* __powerpc64 */
134 * Calculate effective address for an X-form instruction
136 static unsigned long __kprobes
xform_ea(unsigned int instr
, struct pt_regs
*regs
,
142 ra
= (instr
>> 16) & 0x1f;
143 rb
= (instr
>> 11) & 0x1f;
147 if (do_update
) /* update forms */
151 return truncate_if_32bit(regs
->msr
, ea
);
155 * Return the largest power of 2, not greater than sizeof(unsigned long),
156 * such that x is a multiple of it.
158 static inline unsigned long max_align(unsigned long x
)
160 x
|= sizeof(unsigned long);
161 return x
& -x
; /* isolates rightmost bit */
165 static inline unsigned long byterev_2(unsigned long x
)
167 return ((x
>> 8) & 0xff) | ((x
& 0xff) << 8);
170 static inline unsigned long byterev_4(unsigned long x
)
172 return ((x
>> 24) & 0xff) | ((x
>> 8) & 0xff00) |
173 ((x
& 0xff00) << 8) | ((x
& 0xff) << 24);
177 static inline unsigned long byterev_8(unsigned long x
)
179 return (byterev_4(x
) << 32) | byterev_4(x
>> 32);
183 static int __kprobes
read_mem_aligned(unsigned long *dest
, unsigned long ea
,
191 err
= __get_user(x
, (unsigned char __user
*) ea
);
194 err
= __get_user(x
, (unsigned short __user
*) ea
);
197 err
= __get_user(x
, (unsigned int __user
*) ea
);
201 err
= __get_user(x
, (unsigned long __user
*) ea
);
210 static int __kprobes
read_mem_unaligned(unsigned long *dest
, unsigned long ea
,
211 int nb
, struct pt_regs
*regs
)
214 unsigned long x
, b
, c
;
215 #ifdef __LITTLE_ENDIAN__
216 int len
= nb
; /* save a copy of the length for byte reversal */
219 /* unaligned, do this in pieces */
221 for (; nb
> 0; nb
-= c
) {
222 #ifdef __LITTLE_ENDIAN__
225 #ifdef __BIG_ENDIAN__
230 err
= read_mem_aligned(&b
, ea
, c
);
233 x
= (x
<< (8 * c
)) + b
;
236 #ifdef __LITTLE_ENDIAN__
239 *dest
= byterev_2(x
);
242 *dest
= byterev_4(x
);
246 *dest
= byterev_8(x
);
251 #ifdef __BIG_ENDIAN__
258 * Read memory at address ea for nb bytes, return 0 for success
259 * or -EFAULT if an error occurred.
261 static int __kprobes
read_mem(unsigned long *dest
, unsigned long ea
, int nb
,
262 struct pt_regs
*regs
)
264 if (!address_ok(regs
, ea
, nb
))
266 if ((ea
& (nb
- 1)) == 0)
267 return read_mem_aligned(dest
, ea
, nb
);
268 return read_mem_unaligned(dest
, ea
, nb
, regs
);
271 static int __kprobes
write_mem_aligned(unsigned long val
, unsigned long ea
,
278 err
= __put_user(val
, (unsigned char __user
*) ea
);
281 err
= __put_user(val
, (unsigned short __user
*) ea
);
284 err
= __put_user(val
, (unsigned int __user
*) ea
);
288 err
= __put_user(val
, (unsigned long __user
*) ea
);
295 static int __kprobes
write_mem_unaligned(unsigned long val
, unsigned long ea
,
296 int nb
, struct pt_regs
*regs
)
301 #ifdef __LITTLE_ENDIAN__
304 val
= byterev_2(val
);
307 val
= byterev_4(val
);
311 val
= byterev_8(val
);
316 /* unaligned or little-endian, do this in pieces */
317 for (; nb
> 0; nb
-= c
) {
318 #ifdef __LITTLE_ENDIAN__
321 #ifdef __BIG_ENDIAN__
326 err
= write_mem_aligned(val
>> (nb
- c
) * 8, ea
, c
);
335 * Write memory at address ea for nb bytes, return 0 for success
336 * or -EFAULT if an error occurred.
338 static int __kprobes
write_mem(unsigned long val
, unsigned long ea
, int nb
,
339 struct pt_regs
*regs
)
341 if (!address_ok(regs
, ea
, nb
))
343 if ((ea
& (nb
- 1)) == 0)
344 return write_mem_aligned(val
, ea
, nb
);
345 return write_mem_unaligned(val
, ea
, nb
, regs
);
348 #ifdef CONFIG_PPC_FPU
350 * Check the address and alignment, and call func to do the actual
353 static int __kprobes
do_fp_load(int rn
, int (*func
)(int, unsigned long),
354 unsigned long ea
, int nb
,
355 struct pt_regs
*regs
)
362 #ifdef __BIG_ENDIAN__
366 #ifdef __LITTLE_ENDIAN__
374 if (!address_ok(regs
, ea
, nb
))
377 return (*func
)(rn
, ea
);
378 ptr
= (unsigned long) &data
.ul
;
379 if (sizeof(unsigned long) == 8 || nb
== 4) {
380 err
= read_mem_unaligned(&data
.ul
[0], ea
, nb
, regs
);
382 ptr
= (unsigned long)&(data
.single
.word
);
384 /* reading a double on 32-bit */
385 err
= read_mem_unaligned(&data
.ul
[0], ea
, 4, regs
);
387 err
= read_mem_unaligned(&data
.ul
[1], ea
+ 4, 4, regs
);
391 return (*func
)(rn
, ptr
);
394 static int __kprobes
do_fp_store(int rn
, int (*func
)(int, unsigned long),
395 unsigned long ea
, int nb
,
396 struct pt_regs
*regs
)
403 #ifdef __BIG_ENDIAN__
407 #ifdef __LITTLE_ENDIAN__
415 if (!address_ok(regs
, ea
, nb
))
418 return (*func
)(rn
, ea
);
419 ptr
= (unsigned long) &data
.ul
[0];
420 if (sizeof(unsigned long) == 8 || nb
== 4) {
422 ptr
= (unsigned long)&(data
.single
.word
);
423 err
= (*func
)(rn
, ptr
);
426 err
= write_mem_unaligned(data
.ul
[0], ea
, nb
, regs
);
428 /* writing a double on 32-bit */
429 err
= (*func
)(rn
, ptr
);
432 err
= write_mem_unaligned(data
.ul
[0], ea
, 4, regs
);
434 err
= write_mem_unaligned(data
.ul
[1], ea
+ 4, 4, regs
);
440 #ifdef CONFIG_ALTIVEC
441 /* For Altivec/VMX, no need to worry about alignment */
442 static int __kprobes
do_vec_load(int rn
, int (*func
)(int, unsigned long),
443 unsigned long ea
, struct pt_regs
*regs
)
445 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
447 return (*func
)(rn
, ea
);
450 static int __kprobes
do_vec_store(int rn
, int (*func
)(int, unsigned long),
451 unsigned long ea
, struct pt_regs
*regs
)
453 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
455 return (*func
)(rn
, ea
);
457 #endif /* CONFIG_ALTIVEC */
460 static int __kprobes
do_vsx_load(int rn
, int (*func
)(int, unsigned long),
461 unsigned long ea
, struct pt_regs
*regs
)
464 unsigned long val
[2];
466 if (!address_ok(regs
, ea
, 16))
469 return (*func
)(rn
, ea
);
470 err
= read_mem_unaligned(&val
[0], ea
, 8, regs
);
472 err
= read_mem_unaligned(&val
[1], ea
+ 8, 8, regs
);
474 err
= (*func
)(rn
, (unsigned long) &val
[0]);
478 static int __kprobes
do_vsx_store(int rn
, int (*func
)(int, unsigned long),
479 unsigned long ea
, struct pt_regs
*regs
)
482 unsigned long val
[2];
484 if (!address_ok(regs
, ea
, 16))
487 return (*func
)(rn
, ea
);
488 err
= (*func
)(rn
, (unsigned long) &val
[0]);
491 err
= write_mem_unaligned(val
[0], ea
, 8, regs
);
493 err
= write_mem_unaligned(val
[1], ea
+ 8, 8, regs
);
496 #endif /* CONFIG_VSX */
498 #define __put_user_asmx(x, addr, err, op, cr) \
499 __asm__ __volatile__( \
500 "1: " op " %2,0,%3\n" \
503 ".section .fixup,\"ax\"\n" \
507 ".section __ex_table,\"a\"\n" \
508 PPC_LONG_ALIGN "\n" \
511 : "=r" (err), "=r" (cr) \
512 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
514 #define __get_user_asmx(x, addr, err, op) \
515 __asm__ __volatile__( \
516 "1: "op" %1,0,%2\n" \
518 ".section .fixup,\"ax\"\n" \
522 ".section __ex_table,\"a\"\n" \
523 PPC_LONG_ALIGN "\n" \
526 : "=r" (err), "=r" (x) \
527 : "r" (addr), "i" (-EFAULT), "0" (err))
529 #define __cacheop_user_asmx(addr, err, op) \
530 __asm__ __volatile__( \
533 ".section .fixup,\"ax\"\n" \
537 ".section __ex_table,\"a\"\n" \
538 PPC_LONG_ALIGN "\n" \
542 : "r" (addr), "i" (-EFAULT), "0" (err))
544 static void __kprobes
set_cr0(struct pt_regs
*regs
, int rd
)
546 long val
= regs
->gpr
[rd
];
548 regs
->ccr
= (regs
->ccr
& 0x0fffffff) | ((regs
->xer
>> 3) & 0x10000000);
550 if (!(regs
->msr
& MSR_64BIT
))
554 regs
->ccr
|= 0x80000000;
556 regs
->ccr
|= 0x40000000;
558 regs
->ccr
|= 0x20000000;
561 static void __kprobes
add_with_carry(struct pt_regs
*regs
, int rd
,
562 unsigned long val1
, unsigned long val2
,
563 unsigned long carry_in
)
565 unsigned long val
= val1
+ val2
;
571 if (!(regs
->msr
& MSR_64BIT
)) {
572 val
= (unsigned int) val
;
573 val1
= (unsigned int) val1
;
576 if (val
< val1
|| (carry_in
&& val
== val1
))
579 regs
->xer
&= ~XER_CA
;
582 static void __kprobes
do_cmp_signed(struct pt_regs
*regs
, long v1
, long v2
,
585 unsigned int crval
, shift
;
587 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
594 shift
= (7 - crfld
) * 4;
595 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
598 static void __kprobes
do_cmp_unsigned(struct pt_regs
*regs
, unsigned long v1
,
599 unsigned long v2
, int crfld
)
601 unsigned int crval
, shift
;
603 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
610 shift
= (7 - crfld
) * 4;
611 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
615 * Elements of 32-bit rotate and mask instructions.
617 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
618 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
620 #define MASK64_L(mb) (~0UL >> (mb))
621 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
622 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
623 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
625 #define DATA32(x) (x)
627 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
630 * Emulate instructions that cause a transfer of control,
631 * loads and stores, and a few other instructions.
632 * Returns 1 if the step was emulated, 0 if not,
633 * or -1 if the instruction is one that should not be stepped,
634 * such as an rfid, or a mtmsrd that would clear MSR_RI.
636 int __kprobes
emulate_step(struct pt_regs
*regs
, unsigned int instr
)
638 unsigned int opcode
, ra
, rb
, rd
, spr
, u
;
639 unsigned long int imm
;
640 unsigned long int val
, val2
;
641 unsigned long int ea
;
642 unsigned int cr
, mb
, me
, sh
;
644 unsigned long old_ra
, val3
;
647 opcode
= instr
>> 26;
650 imm
= (signed short)(instr
& 0xfffc);
651 if ((instr
& 2) == 0)
654 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
);
656 regs
->link
= regs
->nip
;
657 if (branch_taken(instr
, regs
))
658 regs
->nip
= truncate_if_32bit(regs
->msr
, imm
);
663 * N.B. this uses knowledge about how the syscall
664 * entry code works. If that is changed, this will
665 * need to be changed also.
667 if (regs
->gpr
[0] == 0x1ebe &&
668 cpu_has_feature(CPU_FTR_REAL_LE
)) {
672 regs
->gpr
[9] = regs
->gpr
[13];
673 regs
->gpr
[10] = MSR_KERNEL
;
674 regs
->gpr
[11] = regs
->nip
+ 4;
675 regs
->gpr
[12] = regs
->msr
& MSR_MASK
;
676 regs
->gpr
[13] = (unsigned long) get_paca();
677 regs
->nip
= (unsigned long) &system_call_common
;
678 regs
->msr
= MSR_KERNEL
;
682 imm
= instr
& 0x03fffffc;
683 if (imm
& 0x02000000)
685 if ((instr
& 2) == 0)
688 regs
->link
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
689 imm
= truncate_if_32bit(regs
->msr
, imm
);
693 switch ((instr
>> 1) & 0x3ff) {
695 case 528: /* bcctr */
696 imm
= (instr
& 0x400)? regs
->ctr
: regs
->link
;
697 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
698 imm
= truncate_if_32bit(regs
->msr
, imm
);
700 regs
->link
= regs
->nip
;
701 if (branch_taken(instr
, regs
))
705 case 18: /* rfid, scary */
708 case 150: /* isync */
713 case 129: /* crandc */
714 case 193: /* crxor */
715 case 225: /* crnand */
716 case 257: /* crand */
717 case 289: /* creqv */
718 case 417: /* crorc */
720 ra
= (instr
>> 16) & 0x1f;
721 rb
= (instr
>> 11) & 0x1f;
722 rd
= (instr
>> 21) & 0x1f;
723 ra
= (regs
->ccr
>> (31 - ra
)) & 1;
724 rb
= (regs
->ccr
>> (31 - rb
)) & 1;
725 val
= (instr
>> (6 + ra
* 2 + rb
)) & 1;
726 regs
->ccr
= (regs
->ccr
& ~(1UL << (31 - rd
))) |
732 switch ((instr
>> 1) & 0x3ff) {
735 switch ((instr
>> 21) & 3) {
737 asm volatile("lwsync" : : : "memory");
739 case 2: /* ptesync */
740 asm volatile("ptesync" : : : "memory");
747 case 854: /* eieio */
754 /* Following cases refer to regs->gpr[], so we need all regs */
755 if (!FULL_REGS(regs
))
758 rd
= (instr
>> 21) & 0x1f;
759 ra
= (instr
>> 16) & 0x1f;
760 rb
= (instr
>> 11) & 0x1f;
764 regs
->gpr
[rd
] = regs
->gpr
[ra
] * (short) instr
;
769 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], imm
, 1);
773 imm
= (unsigned short) instr
;
777 val
= (unsigned int) val
;
779 do_cmp_unsigned(regs
, val
, imm
, rd
>> 2);
789 do_cmp_signed(regs
, val
, imm
, rd
>> 2);
794 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
797 case 13: /* addic. */
799 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
806 imm
+= regs
->gpr
[ra
];
811 imm
= ((short) instr
) << 16;
813 imm
+= regs
->gpr
[ra
];
817 case 20: /* rlwimi */
818 mb
= (instr
>> 6) & 0x1f;
819 me
= (instr
>> 1) & 0x1f;
820 val
= DATA32(regs
->gpr
[rd
]);
821 imm
= MASK32(mb
, me
);
822 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) | (ROTATE(val
, rb
) & imm
);
825 case 21: /* rlwinm */
826 mb
= (instr
>> 6) & 0x1f;
827 me
= (instr
>> 1) & 0x1f;
828 val
= DATA32(regs
->gpr
[rd
]);
829 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
833 mb
= (instr
>> 6) & 0x1f;
834 me
= (instr
>> 1) & 0x1f;
835 rb
= regs
->gpr
[rb
] & 0x1f;
836 val
= DATA32(regs
->gpr
[rd
]);
837 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
841 imm
= (unsigned short) instr
;
842 regs
->gpr
[ra
] = regs
->gpr
[rd
] | imm
;
846 imm
= (unsigned short) instr
;
847 regs
->gpr
[ra
] = regs
->gpr
[rd
] | (imm
<< 16);
851 imm
= (unsigned short) instr
;
852 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ imm
;
856 imm
= (unsigned short) instr
;
857 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ (imm
<< 16);
861 imm
= (unsigned short) instr
;
862 regs
->gpr
[ra
] = regs
->gpr
[rd
] & imm
;
866 case 29: /* andis. */
867 imm
= (unsigned short) instr
;
868 regs
->gpr
[ra
] = regs
->gpr
[rd
] & (imm
<< 16);
874 mb
= ((instr
>> 6) & 0x1f) | (instr
& 0x20);
876 if ((instr
& 0x10) == 0) {
877 sh
= rb
| ((instr
& 2) << 4);
878 val
= ROTATE(val
, sh
);
879 switch ((instr
>> 2) & 3) {
881 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
884 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
887 regs
->gpr
[ra
] = val
& MASK64(mb
, 63 - sh
);
890 imm
= MASK64(mb
, 63 - sh
);
891 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) |
896 sh
= regs
->gpr
[rb
] & 0x3f;
897 val
= ROTATE(val
, sh
);
898 switch ((instr
>> 1) & 7) {
900 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
903 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
910 switch ((instr
>> 1) & 0x3ff) {
912 if (regs
->msr
& MSR_PR
)
914 regs
->gpr
[rd
] = regs
->msr
& MSR_MASK
;
916 case 146: /* mtmsr */
917 if (regs
->msr
& MSR_PR
)
920 if ((imm
& MSR_RI
) == 0)
921 /* can't step mtmsr that would clear MSR_RI */
926 case 178: /* mtmsrd */
927 /* only MSR_EE and MSR_RI get changed if bit 15 set */
928 /* mtmsrd doesn't change MSR_HV and MSR_ME */
929 if (regs
->msr
& MSR_PR
)
931 imm
= (instr
& 0x10000)? 0x8002: 0xefffffffffffefffUL
;
932 imm
= (regs
->msr
& MSR_MASK
& ~imm
)
933 | (regs
->gpr
[rd
] & imm
);
934 if ((imm
& MSR_RI
) == 0)
935 /* can't step mtmsrd that would clear MSR_RI */
941 regs
->gpr
[rd
] = regs
->ccr
;
942 regs
->gpr
[rd
] &= 0xffffffffUL
;
945 case 144: /* mtcrf */
948 for (sh
= 0; sh
< 8; ++sh
) {
949 if (instr
& (0x80000 >> sh
))
950 regs
->ccr
= (regs
->ccr
& ~imm
) |
956 case 339: /* mfspr */
957 spr
= (instr
>> 11) & 0x3ff;
959 case 0x20: /* mfxer */
960 regs
->gpr
[rd
] = regs
->xer
;
961 regs
->gpr
[rd
] &= 0xffffffffUL
;
963 case 0x100: /* mflr */
964 regs
->gpr
[rd
] = regs
->link
;
966 case 0x120: /* mfctr */
967 regs
->gpr
[rd
] = regs
->ctr
;
972 case 467: /* mtspr */
973 spr
= (instr
>> 11) & 0x3ff;
975 case 0x20: /* mtxer */
976 regs
->xer
= (regs
->gpr
[rd
] & 0xffffffffUL
);
978 case 0x100: /* mtlr */
979 regs
->link
= regs
->gpr
[rd
];
981 case 0x120: /* mtctr */
982 regs
->ctr
= regs
->gpr
[rd
];
988 * Compare instructions
992 val2
= regs
->gpr
[rb
];
995 /* word (32-bit) compare */
1000 do_cmp_signed(regs
, val
, val2
, rd
>> 2);
1004 val
= regs
->gpr
[ra
];
1005 val2
= regs
->gpr
[rb
];
1006 #ifdef __powerpc64__
1007 if ((rd
& 1) == 0) {
1008 /* word (32-bit) compare */
1009 val
= (unsigned int) val
;
1010 val2
= (unsigned int) val2
;
1013 do_cmp_unsigned(regs
, val
, val2
, rd
>> 2);
1017 * Arithmetic instructions
1020 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
],
1023 #ifdef __powerpc64__
1024 case 9: /* mulhdu */
1025 asm("mulhdu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1026 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1030 add_with_carry(regs
, rd
, regs
->gpr
[ra
],
1034 case 11: /* mulhwu */
1035 asm("mulhwu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1036 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1040 regs
->gpr
[rd
] = regs
->gpr
[rb
] - regs
->gpr
[ra
];
1042 #ifdef __powerpc64__
1043 case 73: /* mulhd */
1044 asm("mulhd %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1045 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1048 case 75: /* mulhw */
1049 asm("mulhw %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1050 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1054 regs
->gpr
[rd
] = -regs
->gpr
[ra
];
1057 case 136: /* subfe */
1058 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], regs
->gpr
[rb
],
1059 regs
->xer
& XER_CA
);
1062 case 138: /* adde */
1063 add_with_carry(regs
, rd
, regs
->gpr
[ra
], regs
->gpr
[rb
],
1064 regs
->xer
& XER_CA
);
1067 case 200: /* subfze */
1068 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], 0L,
1069 regs
->xer
& XER_CA
);
1072 case 202: /* addze */
1073 add_with_carry(regs
, rd
, regs
->gpr
[ra
], 0L,
1074 regs
->xer
& XER_CA
);
1077 case 232: /* subfme */
1078 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], -1L,
1079 regs
->xer
& XER_CA
);
1081 #ifdef __powerpc64__
1082 case 233: /* mulld */
1083 regs
->gpr
[rd
] = regs
->gpr
[ra
] * regs
->gpr
[rb
];
1086 case 234: /* addme */
1087 add_with_carry(regs
, rd
, regs
->gpr
[ra
], -1L,
1088 regs
->xer
& XER_CA
);
1091 case 235: /* mullw */
1092 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] *
1093 (unsigned int) regs
->gpr
[rb
];
1097 regs
->gpr
[rd
] = regs
->gpr
[ra
] + regs
->gpr
[rb
];
1099 #ifdef __powerpc64__
1100 case 457: /* divdu */
1101 regs
->gpr
[rd
] = regs
->gpr
[ra
] / regs
->gpr
[rb
];
1104 case 459: /* divwu */
1105 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] /
1106 (unsigned int) regs
->gpr
[rb
];
1108 #ifdef __powerpc64__
1109 case 489: /* divd */
1110 regs
->gpr
[rd
] = (long int) regs
->gpr
[ra
] /
1111 (long int) regs
->gpr
[rb
];
1114 case 491: /* divw */
1115 regs
->gpr
[rd
] = (int) regs
->gpr
[ra
] /
1116 (int) regs
->gpr
[rb
];
1121 * Logical instructions
1123 case 26: /* cntlzw */
1124 asm("cntlzw %0,%1" : "=r" (regs
->gpr
[ra
]) :
1125 "r" (regs
->gpr
[rd
]));
1127 #ifdef __powerpc64__
1128 case 58: /* cntlzd */
1129 asm("cntlzd %0,%1" : "=r" (regs
->gpr
[ra
]) :
1130 "r" (regs
->gpr
[rd
]));
1134 regs
->gpr
[ra
] = regs
->gpr
[rd
] & regs
->gpr
[rb
];
1138 regs
->gpr
[ra
] = regs
->gpr
[rd
] & ~regs
->gpr
[rb
];
1142 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] | regs
->gpr
[rb
]);
1146 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] ^ regs
->gpr
[rb
]);
1150 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ regs
->gpr
[rb
];
1154 regs
->gpr
[ra
] = regs
->gpr
[rd
] | ~regs
->gpr
[rb
];
1158 regs
->gpr
[ra
] = regs
->gpr
[rd
] | regs
->gpr
[rb
];
1161 case 476: /* nand */
1162 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] & regs
->gpr
[rb
]);
1165 case 922: /* extsh */
1166 regs
->gpr
[ra
] = (signed short) regs
->gpr
[rd
];
1169 case 954: /* extsb */
1170 regs
->gpr
[ra
] = (signed char) regs
->gpr
[rd
];
1172 #ifdef __powerpc64__
1173 case 986: /* extsw */
1174 regs
->gpr
[ra
] = (signed int) regs
->gpr
[rd
];
1179 * Shift instructions
1182 sh
= regs
->gpr
[rb
] & 0x3f;
1184 regs
->gpr
[ra
] = (regs
->gpr
[rd
] << sh
) & 0xffffffffUL
;
1190 sh
= regs
->gpr
[rb
] & 0x3f;
1192 regs
->gpr
[ra
] = (regs
->gpr
[rd
] & 0xffffffffUL
) >> sh
;
1197 case 792: /* sraw */
1198 sh
= regs
->gpr
[rb
] & 0x3f;
1199 ival
= (signed int) regs
->gpr
[rd
];
1200 regs
->gpr
[ra
] = ival
>> (sh
< 32 ? sh
: 31);
1201 if (ival
< 0 && (sh
>= 32 || (ival
& ((1 << sh
) - 1)) != 0))
1202 regs
->xer
|= XER_CA
;
1204 regs
->xer
&= ~XER_CA
;
1207 case 824: /* srawi */
1209 ival
= (signed int) regs
->gpr
[rd
];
1210 regs
->gpr
[ra
] = ival
>> sh
;
1211 if (ival
< 0 && (ival
& ((1 << sh
) - 1)) != 0)
1212 regs
->xer
|= XER_CA
;
1214 regs
->xer
&= ~XER_CA
;
1217 #ifdef __powerpc64__
1219 sh
= regs
->gpr
[rd
] & 0x7f;
1221 regs
->gpr
[ra
] = regs
->gpr
[rd
] << sh
;
1227 sh
= regs
->gpr
[rb
] & 0x7f;
1229 regs
->gpr
[ra
] = regs
->gpr
[rd
] >> sh
;
1234 case 794: /* srad */
1235 sh
= regs
->gpr
[rb
] & 0x7f;
1236 ival
= (signed long int) regs
->gpr
[rd
];
1237 regs
->gpr
[ra
] = ival
>> (sh
< 64 ? sh
: 63);
1238 if (ival
< 0 && (sh
>= 64 || (ival
& ((1 << sh
) - 1)) != 0))
1239 regs
->xer
|= XER_CA
;
1241 regs
->xer
&= ~XER_CA
;
1244 case 826: /* sradi with sh_5 = 0 */
1245 case 827: /* sradi with sh_5 = 1 */
1246 sh
= rb
| ((instr
& 2) << 4);
1247 ival
= (signed long int) regs
->gpr
[rd
];
1248 regs
->gpr
[ra
] = ival
>> sh
;
1249 if (ival
< 0 && (ival
& ((1 << sh
) - 1)) != 0)
1250 regs
->xer
|= XER_CA
;
1252 regs
->xer
&= ~XER_CA
;
1254 #endif /* __powerpc64__ */
1257 * Cache instructions
1259 case 54: /* dcbst */
1260 ea
= xform_ea(instr
, regs
, 0);
1261 if (!address_ok(regs
, ea
, 8))
1264 __cacheop_user_asmx(ea
, err
, "dcbst");
1270 ea
= xform_ea(instr
, regs
, 0);
1271 if (!address_ok(regs
, ea
, 8))
1274 __cacheop_user_asmx(ea
, err
, "dcbf");
1279 case 246: /* dcbtst */
1281 ea
= xform_ea(instr
, regs
, 0);
1282 prefetchw((void *) ea
);
1286 case 278: /* dcbt */
1288 ea
= xform_ea(instr
, regs
, 0);
1289 prefetch((void *) ea
);
1298 * Following cases are for loads and stores, so bail out
1299 * if we're in little-endian mode.
1301 if (regs
->msr
& MSR_LE
)
1305 * Save register RA in case it's an update form load or store
1306 * and the access faults.
1308 old_ra
= regs
->gpr
[ra
];
1313 switch ((instr
>> 1) & 0x3ff) {
1314 case 20: /* lwarx */
1315 ea
= xform_ea(instr
, regs
, 0);
1317 break; /* can't handle misaligned */
1319 if (!address_ok(regs
, ea
, 4))
1322 __get_user_asmx(val
, ea
, err
, "lwarx");
1324 regs
->gpr
[rd
] = val
;
1327 case 150: /* stwcx. */
1328 ea
= xform_ea(instr
, regs
, 0);
1330 break; /* can't handle misaligned */
1332 if (!address_ok(regs
, ea
, 4))
1335 __put_user_asmx(regs
->gpr
[rd
], ea
, err
, "stwcx.", cr
);
1337 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1339 ((regs
->xer
>> 3) & 0x10000000);
1342 #ifdef __powerpc64__
1343 case 84: /* ldarx */
1344 ea
= xform_ea(instr
, regs
, 0);
1346 break; /* can't handle misaligned */
1348 if (!address_ok(regs
, ea
, 8))
1351 __get_user_asmx(val
, ea
, err
, "ldarx");
1353 regs
->gpr
[rd
] = val
;
1356 case 214: /* stdcx. */
1357 ea
= xform_ea(instr
, regs
, 0);
1359 break; /* can't handle misaligned */
1361 if (!address_ok(regs
, ea
, 8))
1364 __put_user_asmx(regs
->gpr
[rd
], ea
, err
, "stdcx.", cr
);
1366 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1368 ((regs
->xer
>> 3) & 0x10000000);
1373 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1379 case 55: /* lwzux */
1380 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1385 case 119: /* lbzux */
1386 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1390 #ifdef CONFIG_ALTIVEC
1392 case 359: /* lvxl */
1393 if (!(regs
->msr
& MSR_VEC
))
1395 ea
= xform_ea(instr
, regs
, 0);
1396 err
= do_vec_load(rd
, do_lvx
, ea
, regs
);
1399 case 231: /* stvx */
1400 case 487: /* stvxl */
1401 if (!(regs
->msr
& MSR_VEC
))
1403 ea
= xform_ea(instr
, regs
, 0);
1404 err
= do_vec_store(rd
, do_stvx
, ea
, regs
);
1406 #endif /* CONFIG_ALTIVEC */
1408 #ifdef __powerpc64__
1409 case 149: /* stdx */
1410 case 181: /* stdux */
1411 val
= regs
->gpr
[rd
];
1412 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 8, regs
);
1416 case 151: /* stwx */
1417 case 183: /* stwux */
1418 val
= regs
->gpr
[rd
];
1419 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 4, regs
);
1422 case 215: /* stbx */
1423 case 247: /* stbux */
1424 val
= regs
->gpr
[rd
];
1425 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 1, regs
);
1428 case 279: /* lhzx */
1429 case 311: /* lhzux */
1430 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1434 #ifdef __powerpc64__
1435 case 341: /* lwax */
1436 case 373: /* lwaux */
1437 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1440 regs
->gpr
[rd
] = (signed int) regs
->gpr
[rd
];
1444 case 343: /* lhax */
1445 case 375: /* lhaux */
1446 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1449 regs
->gpr
[rd
] = (signed short) regs
->gpr
[rd
];
1452 case 407: /* sthx */
1453 case 439: /* sthux */
1454 val
= regs
->gpr
[rd
];
1455 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 2, regs
);
1458 #ifdef __powerpc64__
1459 case 532: /* ldbrx */
1460 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 8, regs
);
1462 regs
->gpr
[rd
] = byterev_8(val
);
1467 case 534: /* lwbrx */
1468 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 4, regs
);
1470 regs
->gpr
[rd
] = byterev_4(val
);
1473 #ifdef CONFIG_PPC_FPU
1474 case 535: /* lfsx */
1475 case 567: /* lfsux */
1476 if (!(regs
->msr
& MSR_FP
))
1478 ea
= xform_ea(instr
, regs
, u
);
1479 err
= do_fp_load(rd
, do_lfs
, ea
, 4, regs
);
1482 case 599: /* lfdx */
1483 case 631: /* lfdux */
1484 if (!(regs
->msr
& MSR_FP
))
1486 ea
= xform_ea(instr
, regs
, u
);
1487 err
= do_fp_load(rd
, do_lfd
, ea
, 8, regs
);
1490 case 663: /* stfsx */
1491 case 695: /* stfsux */
1492 if (!(regs
->msr
& MSR_FP
))
1494 ea
= xform_ea(instr
, regs
, u
);
1495 err
= do_fp_store(rd
, do_stfs
, ea
, 4, regs
);
1498 case 727: /* stfdx */
1499 case 759: /* stfdux */
1500 if (!(regs
->msr
& MSR_FP
))
1502 ea
= xform_ea(instr
, regs
, u
);
1503 err
= do_fp_store(rd
, do_stfd
, ea
, 8, regs
);
1507 #ifdef __powerpc64__
1508 case 660: /* stdbrx */
1509 val
= byterev_8(regs
->gpr
[rd
]);
1510 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 8, regs
);
1514 case 662: /* stwbrx */
1515 val
= byterev_4(regs
->gpr
[rd
]);
1516 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 4, regs
);
1519 case 790: /* lhbrx */
1520 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 2, regs
);
1522 regs
->gpr
[rd
] = byterev_2(val
);
1525 case 918: /* sthbrx */
1526 val
= byterev_2(regs
->gpr
[rd
]);
1527 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 2, regs
);
1531 case 844: /* lxvd2x */
1532 case 876: /* lxvd2ux */
1533 if (!(regs
->msr
& MSR_VSX
))
1535 rd
|= (instr
& 1) << 5;
1536 ea
= xform_ea(instr
, regs
, u
);
1537 err
= do_vsx_load(rd
, do_lxvd2x
, ea
, regs
);
1540 case 972: /* stxvd2x */
1541 case 1004: /* stxvd2ux */
1542 if (!(regs
->msr
& MSR_VSX
))
1544 rd
|= (instr
& 1) << 5;
1545 ea
= xform_ea(instr
, regs
, u
);
1546 err
= do_vsx_store(rd
, do_stxvd2x
, ea
, regs
);
1549 #endif /* CONFIG_VSX */
1555 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 4, regs
);
1560 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 1, regs
);
1564 val
= regs
->gpr
[rd
];
1565 err
= write_mem(val
, dform_ea(instr
, regs
), 4, regs
);
1569 val
= regs
->gpr
[rd
];
1570 val3
= dform_ea(instr
, regs
);
1572 * For PPC32 we always use stwu to change stack point with r1. So
1573 * this emulated store may corrupt the exception frame, now we
1574 * have to provide the exception frame trampoline, which is pushed
1575 * below the kprobed function stack. So we only update gpr[1] but
1576 * don't emulate the real store operation. We will do real store
1577 * operation safely in exception return code by checking this flag.
1579 if ((ra
== 1) && !(regs
->msr
& MSR_PR
) \
1580 && (val3
>= (regs
->gpr
[1] - STACK_INT_FRAME_SIZE
))) {
1583 * Check if we will touch kernel sack overflow
1585 if (val3
- STACK_INT_FRAME_SIZE
<= current
->thread
.ksp_limit
) {
1586 printk(KERN_CRIT
"Can't kprobe this since Kernel stack overflow.\n");
1590 #endif /* CONFIG_PPC32 */
1592 * Check if we already set since that means we'll
1593 * lose the previous value.
1595 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE
));
1596 set_thread_flag(TIF_EMULATE_STACK_STORE
);
1599 err
= write_mem(val
, val3
, 4, regs
);
1604 val
= regs
->gpr
[rd
];
1605 err
= write_mem(val
, dform_ea(instr
, regs
), 1, regs
);
1610 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 2, regs
);
1615 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 2, regs
);
1617 regs
->gpr
[rd
] = (signed short) regs
->gpr
[rd
];
1622 val
= regs
->gpr
[rd
];
1623 err
= write_mem(val
, dform_ea(instr
, regs
), 2, regs
);
1627 ra
= (instr
>> 16) & 0x1f;
1629 break; /* invalid form, ra in range to load */
1630 ea
= dform_ea(instr
, regs
);
1632 err
= read_mem(®s
->gpr
[rd
], ea
, 4, regs
);
1636 } while (++rd
< 32);
1640 ea
= dform_ea(instr
, regs
);
1642 err
= write_mem(regs
->gpr
[rd
], ea
, 4, regs
);
1646 } while (++rd
< 32);
1649 #ifdef CONFIG_PPC_FPU
1652 if (!(regs
->msr
& MSR_FP
))
1654 ea
= dform_ea(instr
, regs
);
1655 err
= do_fp_load(rd
, do_lfs
, ea
, 4, regs
);
1660 if (!(regs
->msr
& MSR_FP
))
1662 ea
= dform_ea(instr
, regs
);
1663 err
= do_fp_load(rd
, do_lfd
, ea
, 8, regs
);
1667 case 53: /* stfsu */
1668 if (!(regs
->msr
& MSR_FP
))
1670 ea
= dform_ea(instr
, regs
);
1671 err
= do_fp_store(rd
, do_stfs
, ea
, 4, regs
);
1675 case 55: /* stfdu */
1676 if (!(regs
->msr
& MSR_FP
))
1678 ea
= dform_ea(instr
, regs
);
1679 err
= do_fp_store(rd
, do_stfd
, ea
, 8, regs
);
1683 #ifdef __powerpc64__
1684 case 58: /* ld[u], lwa */
1685 switch (instr
& 3) {
1687 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1691 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1695 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1698 regs
->gpr
[rd
] = (signed int) regs
->gpr
[rd
];
1703 case 62: /* std[u] */
1704 val
= regs
->gpr
[rd
];
1705 switch (instr
& 3) {
1707 err
= write_mem(val
, dsform_ea(instr
, regs
), 8, regs
);
1710 err
= write_mem(val
, dsform_ea(instr
, regs
), 8, regs
);
1714 #endif /* __powerpc64__ */
1721 regs
->gpr
[ra
] = old_ra
;
1722 return 0; /* invoke DSI if -EFAULT? */
1725 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);