4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
20 extern char system_call_common
[];
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK 0xffffffff87c0ffffUL
26 #define MSR_MASK 0x87c0ffff
30 #define XER_SO 0x80000000U
31 #define XER_OV 0x40000000U
32 #define XER_CA 0x20000000U
36 * Functions in ldstfp.S
38 extern int do_lfs(int rn
, unsigned long ea
);
39 extern int do_lfd(int rn
, unsigned long ea
);
40 extern int do_stfs(int rn
, unsigned long ea
);
41 extern int do_stfd(int rn
, unsigned long ea
);
42 extern int do_lvx(int rn
, unsigned long ea
);
43 extern int do_stvx(int rn
, unsigned long ea
);
44 extern int do_lxvd2x(int rn
, unsigned long ea
);
45 extern int do_stxvd2x(int rn
, unsigned long ea
);
49 * Emulate the truncation of 64 bit values in 32-bit mode.
51 static unsigned long truncate_if_32bit(unsigned long msr
, unsigned long val
)
54 if ((msr
& MSR_64BIT
) == 0)
61 * Determine whether a conditional branch instruction would branch.
63 static int __kprobes
branch_taken(unsigned int instr
, struct pt_regs
*regs
)
65 unsigned int bo
= (instr
>> 21) & 0x1f;
69 /* decrement counter */
71 if (((bo
>> 1) & 1) ^ (regs
->ctr
== 0))
74 if ((bo
& 0x10) == 0) {
75 /* check bit from CR */
76 bi
= (instr
>> 16) & 0x1f;
77 if (((regs
->ccr
>> (31 - bi
)) & 1) != ((bo
>> 3) & 1))
84 static long __kprobes
address_ok(struct pt_regs
*regs
, unsigned long ea
, int nb
)
88 return __access_ok(ea
, nb
, USER_DS
);
92 * Calculate effective address for a D-form instruction
94 static unsigned long __kprobes
dform_ea(unsigned int instr
, struct pt_regs
*regs
)
99 ra
= (instr
>> 16) & 0x1f;
100 ea
= (signed short) instr
; /* sign-extend */
103 if (instr
& 0x04000000) { /* update forms */
104 if ((instr
>>26) != 47) /* stmw is not an update form */
109 return truncate_if_32bit(regs
->msr
, ea
);
114 * Calculate effective address for a DS-form instruction
116 static unsigned long __kprobes
dsform_ea(unsigned int instr
, struct pt_regs
*regs
)
121 ra
= (instr
>> 16) & 0x1f;
122 ea
= (signed short) (instr
& ~3); /* sign-extend */
125 if ((instr
& 3) == 1) /* update forms */
129 return truncate_if_32bit(regs
->msr
, ea
);
131 #endif /* __powerpc64 */
134 * Calculate effective address for an X-form instruction
136 static unsigned long __kprobes
xform_ea(unsigned int instr
, struct pt_regs
*regs
,
142 ra
= (instr
>> 16) & 0x1f;
143 rb
= (instr
>> 11) & 0x1f;
147 if (do_update
) /* update forms */
151 return truncate_if_32bit(regs
->msr
, ea
);
155 * Return the largest power of 2, not greater than sizeof(unsigned long),
156 * such that x is a multiple of it.
158 static inline unsigned long max_align(unsigned long x
)
160 x
|= sizeof(unsigned long);
161 return x
& -x
; /* isolates rightmost bit */
165 static inline unsigned long byterev_2(unsigned long x
)
167 return ((x
>> 8) & 0xff) | ((x
& 0xff) << 8);
170 static inline unsigned long byterev_4(unsigned long x
)
172 return ((x
>> 24) & 0xff) | ((x
>> 8) & 0xff00) |
173 ((x
& 0xff00) << 8) | ((x
& 0xff) << 24);
177 static inline unsigned long byterev_8(unsigned long x
)
179 return (byterev_4(x
) << 32) | byterev_4(x
>> 32);
183 static int __kprobes
read_mem_aligned(unsigned long *dest
, unsigned long ea
,
191 err
= __get_user(x
, (unsigned char __user
*) ea
);
194 err
= __get_user(x
, (unsigned short __user
*) ea
);
197 err
= __get_user(x
, (unsigned int __user
*) ea
);
201 err
= __get_user(x
, (unsigned long __user
*) ea
);
210 static int __kprobes
read_mem_unaligned(unsigned long *dest
, unsigned long ea
,
211 int nb
, struct pt_regs
*regs
)
214 unsigned long x
, b
, c
;
216 /* unaligned, do this in pieces */
218 for (; nb
> 0; nb
-= c
) {
222 err
= read_mem_aligned(&b
, ea
, c
);
225 x
= (x
<< (8 * c
)) + b
;
233 * Read memory at address ea for nb bytes, return 0 for success
234 * or -EFAULT if an error occurred.
236 static int __kprobes
read_mem(unsigned long *dest
, unsigned long ea
, int nb
,
237 struct pt_regs
*regs
)
239 if (!address_ok(regs
, ea
, nb
))
241 if ((ea
& (nb
- 1)) == 0)
242 return read_mem_aligned(dest
, ea
, nb
);
243 return read_mem_unaligned(dest
, ea
, nb
, regs
);
246 static int __kprobes
write_mem_aligned(unsigned long val
, unsigned long ea
,
253 err
= __put_user(val
, (unsigned char __user
*) ea
);
256 err
= __put_user(val
, (unsigned short __user
*) ea
);
259 err
= __put_user(val
, (unsigned int __user
*) ea
);
263 err
= __put_user(val
, (unsigned long __user
*) ea
);
270 static int __kprobes
write_mem_unaligned(unsigned long val
, unsigned long ea
,
271 int nb
, struct pt_regs
*regs
)
276 /* unaligned or little-endian, do this in pieces */
277 for (; nb
> 0; nb
-= c
) {
281 err
= write_mem_aligned(val
>> (nb
- c
) * 8, ea
, c
);
290 * Write memory at address ea for nb bytes, return 0 for success
291 * or -EFAULT if an error occurred.
293 static int __kprobes
write_mem(unsigned long val
, unsigned long ea
, int nb
,
294 struct pt_regs
*regs
)
296 if (!address_ok(regs
, ea
, nb
))
298 if ((ea
& (nb
- 1)) == 0)
299 return write_mem_aligned(val
, ea
, nb
);
300 return write_mem_unaligned(val
, ea
, nb
, regs
);
303 #ifdef CONFIG_PPC_FPU
305 * Check the address and alignment, and call func to do the actual
308 static int __kprobes
do_fp_load(int rn
, int (*func
)(int, unsigned long),
309 unsigned long ea
, int nb
,
310 struct pt_regs
*regs
)
313 unsigned long val
[sizeof(double) / sizeof(long)];
316 if (!address_ok(regs
, ea
, nb
))
319 return (*func
)(rn
, ea
);
320 ptr
= (unsigned long) &val
[0];
321 if (sizeof(unsigned long) == 8 || nb
== 4) {
322 err
= read_mem_unaligned(&val
[0], ea
, nb
, regs
);
323 ptr
+= sizeof(unsigned long) - nb
;
325 /* reading a double on 32-bit */
326 err
= read_mem_unaligned(&val
[0], ea
, 4, regs
);
328 err
= read_mem_unaligned(&val
[1], ea
+ 4, 4, regs
);
332 return (*func
)(rn
, ptr
);
335 static int __kprobes
do_fp_store(int rn
, int (*func
)(int, unsigned long),
336 unsigned long ea
, int nb
,
337 struct pt_regs
*regs
)
340 unsigned long val
[sizeof(double) / sizeof(long)];
343 if (!address_ok(regs
, ea
, nb
))
346 return (*func
)(rn
, ea
);
347 ptr
= (unsigned long) &val
[0];
348 if (sizeof(unsigned long) == 8 || nb
== 4) {
349 ptr
+= sizeof(unsigned long) - nb
;
350 err
= (*func
)(rn
, ptr
);
353 err
= write_mem_unaligned(val
[0], ea
, nb
, regs
);
355 /* writing a double on 32-bit */
356 err
= (*func
)(rn
, ptr
);
359 err
= write_mem_unaligned(val
[0], ea
, 4, regs
);
361 err
= write_mem_unaligned(val
[1], ea
+ 4, 4, regs
);
367 #ifdef CONFIG_ALTIVEC
368 /* For Altivec/VMX, no need to worry about alignment */
369 static int __kprobes
do_vec_load(int rn
, int (*func
)(int, unsigned long),
370 unsigned long ea
, struct pt_regs
*regs
)
372 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
374 return (*func
)(rn
, ea
);
377 static int __kprobes
do_vec_store(int rn
, int (*func
)(int, unsigned long),
378 unsigned long ea
, struct pt_regs
*regs
)
380 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
382 return (*func
)(rn
, ea
);
384 #endif /* CONFIG_ALTIVEC */
387 static int __kprobes
do_vsx_load(int rn
, int (*func
)(int, unsigned long),
388 unsigned long ea
, struct pt_regs
*regs
)
391 unsigned long val
[2];
393 if (!address_ok(regs
, ea
, 16))
396 return (*func
)(rn
, ea
);
397 err
= read_mem_unaligned(&val
[0], ea
, 8, regs
);
399 err
= read_mem_unaligned(&val
[1], ea
+ 8, 8, regs
);
401 err
= (*func
)(rn
, (unsigned long) &val
[0]);
405 static int __kprobes
do_vsx_store(int rn
, int (*func
)(int, unsigned long),
406 unsigned long ea
, struct pt_regs
*regs
)
409 unsigned long val
[2];
411 if (!address_ok(regs
, ea
, 16))
414 return (*func
)(rn
, ea
);
415 err
= (*func
)(rn
, (unsigned long) &val
[0]);
418 err
= write_mem_unaligned(val
[0], ea
, 8, regs
);
420 err
= write_mem_unaligned(val
[1], ea
+ 8, 8, regs
);
423 #endif /* CONFIG_VSX */
425 #define __put_user_asmx(x, addr, err, op, cr) \
426 __asm__ __volatile__( \
427 "1: " op " %2,0,%3\n" \
430 ".section .fixup,\"ax\"\n" \
434 ".section __ex_table,\"a\"\n" \
435 PPC_LONG_ALIGN "\n" \
438 : "=r" (err), "=r" (cr) \
439 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
441 #define __get_user_asmx(x, addr, err, op) \
442 __asm__ __volatile__( \
443 "1: "op" %1,0,%2\n" \
445 ".section .fixup,\"ax\"\n" \
449 ".section __ex_table,\"a\"\n" \
450 PPC_LONG_ALIGN "\n" \
453 : "=r" (err), "=r" (x) \
454 : "r" (addr), "i" (-EFAULT), "0" (err))
456 #define __cacheop_user_asmx(addr, err, op) \
457 __asm__ __volatile__( \
460 ".section .fixup,\"ax\"\n" \
464 ".section __ex_table,\"a\"\n" \
465 PPC_LONG_ALIGN "\n" \
469 : "r" (addr), "i" (-EFAULT), "0" (err))
471 static void __kprobes
set_cr0(struct pt_regs
*regs
, int rd
)
473 long val
= regs
->gpr
[rd
];
475 regs
->ccr
= (regs
->ccr
& 0x0fffffff) | ((regs
->xer
>> 3) & 0x10000000);
477 if (!(regs
->msr
& MSR_64BIT
))
481 regs
->ccr
|= 0x80000000;
483 regs
->ccr
|= 0x40000000;
485 regs
->ccr
|= 0x20000000;
488 static void __kprobes
add_with_carry(struct pt_regs
*regs
, int rd
,
489 unsigned long val1
, unsigned long val2
,
490 unsigned long carry_in
)
492 unsigned long val
= val1
+ val2
;
498 if (!(regs
->msr
& MSR_64BIT
)) {
499 val
= (unsigned int) val
;
500 val1
= (unsigned int) val1
;
503 if (val
< val1
|| (carry_in
&& val
== val1
))
506 regs
->xer
&= ~XER_CA
;
509 static void __kprobes
do_cmp_signed(struct pt_regs
*regs
, long v1
, long v2
,
512 unsigned int crval
, shift
;
514 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
521 shift
= (7 - crfld
) * 4;
522 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
525 static void __kprobes
do_cmp_unsigned(struct pt_regs
*regs
, unsigned long v1
,
526 unsigned long v2
, int crfld
)
528 unsigned int crval
, shift
;
530 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
537 shift
= (7 - crfld
) * 4;
538 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
542 * Elements of 32-bit rotate and mask instructions.
544 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
545 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
547 #define MASK64_L(mb) (~0UL >> (mb))
548 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
549 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
550 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
552 #define DATA32(x) (x)
554 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
557 * Emulate instructions that cause a transfer of control,
558 * loads and stores, and a few other instructions.
559 * Returns 1 if the step was emulated, 0 if not,
560 * or -1 if the instruction is one that should not be stepped,
561 * such as an rfid, or a mtmsrd that would clear MSR_RI.
563 int __kprobes
emulate_step(struct pt_regs
*regs
, unsigned int instr
)
565 unsigned int opcode
, ra
, rb
, rd
, spr
, u
;
566 unsigned long int imm
;
567 unsigned long int val
, val2
;
568 unsigned long int ea
;
569 unsigned int cr
, mb
, me
, sh
;
571 unsigned long old_ra
, val3
;
574 opcode
= instr
>> 26;
577 imm
= (signed short)(instr
& 0xfffc);
578 if ((instr
& 2) == 0)
581 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
);
583 regs
->link
= regs
->nip
;
584 if (branch_taken(instr
, regs
))
585 regs
->nip
= truncate_if_32bit(regs
->msr
, imm
);
590 * N.B. this uses knowledge about how the syscall
591 * entry code works. If that is changed, this will
592 * need to be changed also.
594 if (regs
->gpr
[0] == 0x1ebe &&
595 cpu_has_feature(CPU_FTR_REAL_LE
)) {
599 regs
->gpr
[9] = regs
->gpr
[13];
600 regs
->gpr
[10] = MSR_KERNEL
;
601 regs
->gpr
[11] = regs
->nip
+ 4;
602 regs
->gpr
[12] = regs
->msr
& MSR_MASK
;
603 regs
->gpr
[13] = (unsigned long) get_paca();
604 regs
->nip
= (unsigned long) &system_call_common
;
605 regs
->msr
= MSR_KERNEL
;
609 imm
= instr
& 0x03fffffc;
610 if (imm
& 0x02000000)
612 if ((instr
& 2) == 0)
615 regs
->link
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
616 imm
= truncate_if_32bit(regs
->msr
, imm
);
620 switch ((instr
>> 1) & 0x3ff) {
622 case 528: /* bcctr */
623 imm
= (instr
& 0x400)? regs
->ctr
: regs
->link
;
624 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
625 imm
= truncate_if_32bit(regs
->msr
, imm
);
627 regs
->link
= regs
->nip
;
628 if (branch_taken(instr
, regs
))
632 case 18: /* rfid, scary */
635 case 150: /* isync */
640 case 129: /* crandc */
641 case 193: /* crxor */
642 case 225: /* crnand */
643 case 257: /* crand */
644 case 289: /* creqv */
645 case 417: /* crorc */
647 ra
= (instr
>> 16) & 0x1f;
648 rb
= (instr
>> 11) & 0x1f;
649 rd
= (instr
>> 21) & 0x1f;
650 ra
= (regs
->ccr
>> (31 - ra
)) & 1;
651 rb
= (regs
->ccr
>> (31 - rb
)) & 1;
652 val
= (instr
>> (6 + ra
* 2 + rb
)) & 1;
653 regs
->ccr
= (regs
->ccr
& ~(1UL << (31 - rd
))) |
659 switch ((instr
>> 1) & 0x3ff) {
662 switch ((instr
>> 21) & 3) {
664 asm volatile("lwsync" : : : "memory");
666 case 2: /* ptesync */
667 asm volatile("ptesync" : : : "memory");
674 case 854: /* eieio */
681 /* Following cases refer to regs->gpr[], so we need all regs */
682 if (!FULL_REGS(regs
))
685 rd
= (instr
>> 21) & 0x1f;
686 ra
= (instr
>> 16) & 0x1f;
687 rb
= (instr
>> 11) & 0x1f;
691 regs
->gpr
[rd
] = regs
->gpr
[ra
] * (short) instr
;
696 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], imm
, 1);
700 imm
= (unsigned short) instr
;
704 val
= (unsigned int) val
;
706 do_cmp_unsigned(regs
, val
, imm
, rd
>> 2);
716 do_cmp_signed(regs
, val
, imm
, rd
>> 2);
721 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
724 case 13: /* addic. */
726 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
733 imm
+= regs
->gpr
[ra
];
738 imm
= ((short) instr
) << 16;
740 imm
+= regs
->gpr
[ra
];
744 case 20: /* rlwimi */
745 mb
= (instr
>> 6) & 0x1f;
746 me
= (instr
>> 1) & 0x1f;
747 val
= DATA32(regs
->gpr
[rd
]);
748 imm
= MASK32(mb
, me
);
749 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) | (ROTATE(val
, rb
) & imm
);
752 case 21: /* rlwinm */
753 mb
= (instr
>> 6) & 0x1f;
754 me
= (instr
>> 1) & 0x1f;
755 val
= DATA32(regs
->gpr
[rd
]);
756 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
760 mb
= (instr
>> 6) & 0x1f;
761 me
= (instr
>> 1) & 0x1f;
762 rb
= regs
->gpr
[rb
] & 0x1f;
763 val
= DATA32(regs
->gpr
[rd
]);
764 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
768 imm
= (unsigned short) instr
;
769 regs
->gpr
[ra
] = regs
->gpr
[rd
] | imm
;
773 imm
= (unsigned short) instr
;
774 regs
->gpr
[ra
] = regs
->gpr
[rd
] | (imm
<< 16);
778 imm
= (unsigned short) instr
;
779 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ imm
;
783 imm
= (unsigned short) instr
;
784 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ (imm
<< 16);
788 imm
= (unsigned short) instr
;
789 regs
->gpr
[ra
] = regs
->gpr
[rd
] & imm
;
793 case 29: /* andis. */
794 imm
= (unsigned short) instr
;
795 regs
->gpr
[ra
] = regs
->gpr
[rd
] & (imm
<< 16);
801 mb
= ((instr
>> 6) & 0x1f) | (instr
& 0x20);
803 if ((instr
& 0x10) == 0) {
804 sh
= rb
| ((instr
& 2) << 4);
805 val
= ROTATE(val
, sh
);
806 switch ((instr
>> 2) & 3) {
808 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
811 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
814 regs
->gpr
[ra
] = val
& MASK64(mb
, 63 - sh
);
817 imm
= MASK64(mb
, 63 - sh
);
818 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) |
823 sh
= regs
->gpr
[rb
] & 0x3f;
824 val
= ROTATE(val
, sh
);
825 switch ((instr
>> 1) & 7) {
827 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
830 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
837 switch ((instr
>> 1) & 0x3ff) {
839 if (regs
->msr
& MSR_PR
)
841 regs
->gpr
[rd
] = regs
->msr
& MSR_MASK
;
843 case 146: /* mtmsr */
844 if (regs
->msr
& MSR_PR
)
847 if ((imm
& MSR_RI
) == 0)
848 /* can't step mtmsr that would clear MSR_RI */
853 case 178: /* mtmsrd */
854 /* only MSR_EE and MSR_RI get changed if bit 15 set */
855 /* mtmsrd doesn't change MSR_HV and MSR_ME */
856 if (regs
->msr
& MSR_PR
)
858 imm
= (instr
& 0x10000)? 0x8002: 0xefffffffffffefffUL
;
859 imm
= (regs
->msr
& MSR_MASK
& ~imm
)
860 | (regs
->gpr
[rd
] & imm
);
861 if ((imm
& MSR_RI
) == 0)
862 /* can't step mtmsrd that would clear MSR_RI */
868 regs
->gpr
[rd
] = regs
->ccr
;
869 regs
->gpr
[rd
] &= 0xffffffffUL
;
872 case 144: /* mtcrf */
875 for (sh
= 0; sh
< 8; ++sh
) {
876 if (instr
& (0x80000 >> sh
))
877 regs
->ccr
= (regs
->ccr
& ~imm
) |
883 case 339: /* mfspr */
884 spr
= (instr
>> 11) & 0x3ff;
886 case 0x20: /* mfxer */
887 regs
->gpr
[rd
] = regs
->xer
;
888 regs
->gpr
[rd
] &= 0xffffffffUL
;
890 case 0x100: /* mflr */
891 regs
->gpr
[rd
] = regs
->link
;
893 case 0x120: /* mfctr */
894 regs
->gpr
[rd
] = regs
->ctr
;
899 case 467: /* mtspr */
900 spr
= (instr
>> 11) & 0x3ff;
902 case 0x20: /* mtxer */
903 regs
->xer
= (regs
->gpr
[rd
] & 0xffffffffUL
);
905 case 0x100: /* mtlr */
906 regs
->link
= regs
->gpr
[rd
];
908 case 0x120: /* mtctr */
909 regs
->ctr
= regs
->gpr
[rd
];
915 * Compare instructions
919 val2
= regs
->gpr
[rb
];
922 /* word (32-bit) compare */
927 do_cmp_signed(regs
, val
, val2
, rd
>> 2);
932 val2
= regs
->gpr
[rb
];
935 /* word (32-bit) compare */
936 val
= (unsigned int) val
;
937 val2
= (unsigned int) val2
;
940 do_cmp_unsigned(regs
, val
, val2
, rd
>> 2);
944 * Arithmetic instructions
947 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
],
952 asm("mulhdu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
953 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
957 add_with_carry(regs
, rd
, regs
->gpr
[ra
],
961 case 11: /* mulhwu */
962 asm("mulhwu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
963 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
967 regs
->gpr
[rd
] = regs
->gpr
[rb
] - regs
->gpr
[ra
];
971 asm("mulhd %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
972 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
976 asm("mulhw %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
977 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
981 regs
->gpr
[rd
] = -regs
->gpr
[ra
];
984 case 136: /* subfe */
985 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], regs
->gpr
[rb
],
990 add_with_carry(regs
, rd
, regs
->gpr
[ra
], regs
->gpr
[rb
],
994 case 200: /* subfze */
995 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], 0L,
999 case 202: /* addze */
1000 add_with_carry(regs
, rd
, regs
->gpr
[ra
], 0L,
1001 regs
->xer
& XER_CA
);
1004 case 232: /* subfme */
1005 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], -1L,
1006 regs
->xer
& XER_CA
);
1008 #ifdef __powerpc64__
1009 case 233: /* mulld */
1010 regs
->gpr
[rd
] = regs
->gpr
[ra
] * regs
->gpr
[rb
];
1013 case 234: /* addme */
1014 add_with_carry(regs
, rd
, regs
->gpr
[ra
], -1L,
1015 regs
->xer
& XER_CA
);
1018 case 235: /* mullw */
1019 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] *
1020 (unsigned int) regs
->gpr
[rb
];
1024 regs
->gpr
[rd
] = regs
->gpr
[ra
] + regs
->gpr
[rb
];
1026 #ifdef __powerpc64__
1027 case 457: /* divdu */
1028 regs
->gpr
[rd
] = regs
->gpr
[ra
] / regs
->gpr
[rb
];
1031 case 459: /* divwu */
1032 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] /
1033 (unsigned int) regs
->gpr
[rb
];
1035 #ifdef __powerpc64__
1036 case 489: /* divd */
1037 regs
->gpr
[rd
] = (long int) regs
->gpr
[ra
] /
1038 (long int) regs
->gpr
[rb
];
1041 case 491: /* divw */
1042 regs
->gpr
[rd
] = (int) regs
->gpr
[ra
] /
1043 (int) regs
->gpr
[rb
];
1048 * Logical instructions
1050 case 26: /* cntlzw */
1051 asm("cntlzw %0,%1" : "=r" (regs
->gpr
[ra
]) :
1052 "r" (regs
->gpr
[rd
]));
1054 #ifdef __powerpc64__
1055 case 58: /* cntlzd */
1056 asm("cntlzd %0,%1" : "=r" (regs
->gpr
[ra
]) :
1057 "r" (regs
->gpr
[rd
]));
1061 regs
->gpr
[ra
] = regs
->gpr
[rd
] & regs
->gpr
[rb
];
1065 regs
->gpr
[ra
] = regs
->gpr
[rd
] & ~regs
->gpr
[rb
];
1069 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] | regs
->gpr
[rb
]);
1073 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] ^ regs
->gpr
[rb
]);
1077 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ regs
->gpr
[rb
];
1081 regs
->gpr
[ra
] = regs
->gpr
[rd
] | ~regs
->gpr
[rb
];
1085 regs
->gpr
[ra
] = regs
->gpr
[rd
] | regs
->gpr
[rb
];
1088 case 476: /* nand */
1089 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] & regs
->gpr
[rb
]);
1092 case 922: /* extsh */
1093 regs
->gpr
[ra
] = (signed short) regs
->gpr
[rd
];
1096 case 954: /* extsb */
1097 regs
->gpr
[ra
] = (signed char) regs
->gpr
[rd
];
1099 #ifdef __powerpc64__
1100 case 986: /* extsw */
1101 regs
->gpr
[ra
] = (signed int) regs
->gpr
[rd
];
1106 * Shift instructions
1109 sh
= regs
->gpr
[rb
] & 0x3f;
1111 regs
->gpr
[ra
] = (regs
->gpr
[rd
] << sh
) & 0xffffffffUL
;
1117 sh
= regs
->gpr
[rb
] & 0x3f;
1119 regs
->gpr
[ra
] = (regs
->gpr
[rd
] & 0xffffffffUL
) >> sh
;
1124 case 792: /* sraw */
1125 sh
= regs
->gpr
[rb
] & 0x3f;
1126 ival
= (signed int) regs
->gpr
[rd
];
1127 regs
->gpr
[ra
] = ival
>> (sh
< 32 ? sh
: 31);
1128 if (ival
< 0 && (sh
>= 32 || (ival
& ((1 << sh
) - 1)) != 0))
1129 regs
->xer
|= XER_CA
;
1131 regs
->xer
&= ~XER_CA
;
1134 case 824: /* srawi */
1136 ival
= (signed int) regs
->gpr
[rd
];
1137 regs
->gpr
[ra
] = ival
>> sh
;
1138 if (ival
< 0 && (ival
& ((1 << sh
) - 1)) != 0)
1139 regs
->xer
|= XER_CA
;
1141 regs
->xer
&= ~XER_CA
;
1144 #ifdef __powerpc64__
1146 sh
= regs
->gpr
[rd
] & 0x7f;
1148 regs
->gpr
[ra
] = regs
->gpr
[rd
] << sh
;
1154 sh
= regs
->gpr
[rb
] & 0x7f;
1156 regs
->gpr
[ra
] = regs
->gpr
[rd
] >> sh
;
1161 case 794: /* srad */
1162 sh
= regs
->gpr
[rb
] & 0x7f;
1163 ival
= (signed long int) regs
->gpr
[rd
];
1164 regs
->gpr
[ra
] = ival
>> (sh
< 64 ? sh
: 63);
1165 if (ival
< 0 && (sh
>= 64 || (ival
& ((1 << sh
) - 1)) != 0))
1166 regs
->xer
|= XER_CA
;
1168 regs
->xer
&= ~XER_CA
;
1171 case 826: /* sradi with sh_5 = 0 */
1172 case 827: /* sradi with sh_5 = 1 */
1173 sh
= rb
| ((instr
& 2) << 4);
1174 ival
= (signed long int) regs
->gpr
[rd
];
1175 regs
->gpr
[ra
] = ival
>> sh
;
1176 if (ival
< 0 && (ival
& ((1 << sh
) - 1)) != 0)
1177 regs
->xer
|= XER_CA
;
1179 regs
->xer
&= ~XER_CA
;
1181 #endif /* __powerpc64__ */
1184 * Cache instructions
1186 case 54: /* dcbst */
1187 ea
= xform_ea(instr
, regs
, 0);
1188 if (!address_ok(regs
, ea
, 8))
1191 __cacheop_user_asmx(ea
, err
, "dcbst");
1197 ea
= xform_ea(instr
, regs
, 0);
1198 if (!address_ok(regs
, ea
, 8))
1201 __cacheop_user_asmx(ea
, err
, "dcbf");
1206 case 246: /* dcbtst */
1208 ea
= xform_ea(instr
, regs
, 0);
1209 prefetchw((void *) ea
);
1213 case 278: /* dcbt */
1215 ea
= xform_ea(instr
, regs
, 0);
1216 prefetch((void *) ea
);
1225 * Following cases are for loads and stores, so bail out
1226 * if we're in little-endian mode.
1228 if (regs
->msr
& MSR_LE
)
1232 * Save register RA in case it's an update form load or store
1233 * and the access faults.
1235 old_ra
= regs
->gpr
[ra
];
1240 switch ((instr
>> 1) & 0x3ff) {
1241 case 20: /* lwarx */
1242 ea
= xform_ea(instr
, regs
, 0);
1244 break; /* can't handle misaligned */
1246 if (!address_ok(regs
, ea
, 4))
1249 __get_user_asmx(val
, ea
, err
, "lwarx");
1251 regs
->gpr
[rd
] = val
;
1254 case 150: /* stwcx. */
1255 ea
= xform_ea(instr
, regs
, 0);
1257 break; /* can't handle misaligned */
1259 if (!address_ok(regs
, ea
, 4))
1262 __put_user_asmx(regs
->gpr
[rd
], ea
, err
, "stwcx.", cr
);
1264 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1266 ((regs
->xer
>> 3) & 0x10000000);
1269 #ifdef __powerpc64__
1270 case 84: /* ldarx */
1271 ea
= xform_ea(instr
, regs
, 0);
1273 break; /* can't handle misaligned */
1275 if (!address_ok(regs
, ea
, 8))
1278 __get_user_asmx(val
, ea
, err
, "ldarx");
1280 regs
->gpr
[rd
] = val
;
1283 case 214: /* stdcx. */
1284 ea
= xform_ea(instr
, regs
, 0);
1286 break; /* can't handle misaligned */
1288 if (!address_ok(regs
, ea
, 8))
1291 __put_user_asmx(regs
->gpr
[rd
], ea
, err
, "stdcx.", cr
);
1293 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1295 ((regs
->xer
>> 3) & 0x10000000);
1300 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1306 case 55: /* lwzux */
1307 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1312 case 119: /* lbzux */
1313 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1317 #ifdef CONFIG_ALTIVEC
1319 case 359: /* lvxl */
1320 if (!(regs
->msr
& MSR_VEC
))
1322 ea
= xform_ea(instr
, regs
, 0);
1323 err
= do_vec_load(rd
, do_lvx
, ea
, regs
);
1326 case 231: /* stvx */
1327 case 487: /* stvxl */
1328 if (!(regs
->msr
& MSR_VEC
))
1330 ea
= xform_ea(instr
, regs
, 0);
1331 err
= do_vec_store(rd
, do_stvx
, ea
, regs
);
1333 #endif /* CONFIG_ALTIVEC */
1335 #ifdef __powerpc64__
1336 case 149: /* stdx */
1337 case 181: /* stdux */
1338 val
= regs
->gpr
[rd
];
1339 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 8, regs
);
1343 case 151: /* stwx */
1344 case 183: /* stwux */
1345 val
= regs
->gpr
[rd
];
1346 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 4, regs
);
1349 case 215: /* stbx */
1350 case 247: /* stbux */
1351 val
= regs
->gpr
[rd
];
1352 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 1, regs
);
1355 case 279: /* lhzx */
1356 case 311: /* lhzux */
1357 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1361 #ifdef __powerpc64__
1362 case 341: /* lwax */
1363 case 373: /* lwaux */
1364 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1367 regs
->gpr
[rd
] = (signed int) regs
->gpr
[rd
];
1371 case 343: /* lhax */
1372 case 375: /* lhaux */
1373 err
= read_mem(®s
->gpr
[rd
], xform_ea(instr
, regs
, u
),
1376 regs
->gpr
[rd
] = (signed short) regs
->gpr
[rd
];
1379 case 407: /* sthx */
1380 case 439: /* sthux */
1381 val
= regs
->gpr
[rd
];
1382 err
= write_mem(val
, xform_ea(instr
, regs
, u
), 2, regs
);
1385 #ifdef __powerpc64__
1386 case 532: /* ldbrx */
1387 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 8, regs
);
1389 regs
->gpr
[rd
] = byterev_8(val
);
1394 case 534: /* lwbrx */
1395 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 4, regs
);
1397 regs
->gpr
[rd
] = byterev_4(val
);
1400 #ifdef CONFIG_PPC_FPU
1401 case 535: /* lfsx */
1402 case 567: /* lfsux */
1403 if (!(regs
->msr
& MSR_FP
))
1405 ea
= xform_ea(instr
, regs
, u
);
1406 err
= do_fp_load(rd
, do_lfs
, ea
, 4, regs
);
1409 case 599: /* lfdx */
1410 case 631: /* lfdux */
1411 if (!(regs
->msr
& MSR_FP
))
1413 ea
= xform_ea(instr
, regs
, u
);
1414 err
= do_fp_load(rd
, do_lfd
, ea
, 8, regs
);
1417 case 663: /* stfsx */
1418 case 695: /* stfsux */
1419 if (!(regs
->msr
& MSR_FP
))
1421 ea
= xform_ea(instr
, regs
, u
);
1422 err
= do_fp_store(rd
, do_stfs
, ea
, 4, regs
);
1425 case 727: /* stfdx */
1426 case 759: /* stfdux */
1427 if (!(regs
->msr
& MSR_FP
))
1429 ea
= xform_ea(instr
, regs
, u
);
1430 err
= do_fp_store(rd
, do_stfd
, ea
, 8, regs
);
1434 #ifdef __powerpc64__
1435 case 660: /* stdbrx */
1436 val
= byterev_8(regs
->gpr
[rd
]);
1437 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 8, regs
);
1441 case 662: /* stwbrx */
1442 val
= byterev_4(regs
->gpr
[rd
]);
1443 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 4, regs
);
1446 case 790: /* lhbrx */
1447 err
= read_mem(&val
, xform_ea(instr
, regs
, 0), 2, regs
);
1449 regs
->gpr
[rd
] = byterev_2(val
);
1452 case 918: /* sthbrx */
1453 val
= byterev_2(regs
->gpr
[rd
]);
1454 err
= write_mem(val
, xform_ea(instr
, regs
, 0), 2, regs
);
1458 case 844: /* lxvd2x */
1459 case 876: /* lxvd2ux */
1460 if (!(regs
->msr
& MSR_VSX
))
1462 rd
|= (instr
& 1) << 5;
1463 ea
= xform_ea(instr
, regs
, u
);
1464 err
= do_vsx_load(rd
, do_lxvd2x
, ea
, regs
);
1467 case 972: /* stxvd2x */
1468 case 1004: /* stxvd2ux */
1469 if (!(regs
->msr
& MSR_VSX
))
1471 rd
|= (instr
& 1) << 5;
1472 ea
= xform_ea(instr
, regs
, u
);
1473 err
= do_vsx_store(rd
, do_stxvd2x
, ea
, regs
);
1476 #endif /* CONFIG_VSX */
1482 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 4, regs
);
1487 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 1, regs
);
1491 val
= regs
->gpr
[rd
];
1492 err
= write_mem(val
, dform_ea(instr
, regs
), 4, regs
);
1496 val
= regs
->gpr
[rd
];
1497 val3
= dform_ea(instr
, regs
);
1499 * For PPC32 we always use stwu to change stack point with r1. So
1500 * this emulated store may corrupt the exception frame, now we
1501 * have to provide the exception frame trampoline, which is pushed
1502 * below the kprobed function stack. So we only update gpr[1] but
1503 * don't emulate the real store operation. We will do real store
1504 * operation safely in exception return code by checking this flag.
1506 if ((ra
== 1) && !(regs
->msr
& MSR_PR
) \
1507 && (val3
>= (regs
->gpr
[1] - STACK_INT_FRAME_SIZE
))) {
1510 * Check if we will touch kernel sack overflow
1512 if (val3
- STACK_INT_FRAME_SIZE
<= current
->thread
.ksp_limit
) {
1513 printk(KERN_CRIT
"Can't kprobe this since Kernel stack overflow.\n");
1517 #endif /* CONFIG_PPC32 */
1519 * Check if we already set since that means we'll
1520 * lose the previous value.
1522 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE
));
1523 set_thread_flag(TIF_EMULATE_STACK_STORE
);
1526 err
= write_mem(val
, val3
, 4, regs
);
1531 val
= regs
->gpr
[rd
];
1532 err
= write_mem(val
, dform_ea(instr
, regs
), 1, regs
);
1537 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 2, regs
);
1542 err
= read_mem(®s
->gpr
[rd
], dform_ea(instr
, regs
), 2, regs
);
1544 regs
->gpr
[rd
] = (signed short) regs
->gpr
[rd
];
1549 val
= regs
->gpr
[rd
];
1550 err
= write_mem(val
, dform_ea(instr
, regs
), 2, regs
);
1554 ra
= (instr
>> 16) & 0x1f;
1556 break; /* invalid form, ra in range to load */
1557 ea
= dform_ea(instr
, regs
);
1559 err
= read_mem(®s
->gpr
[rd
], ea
, 4, regs
);
1563 } while (++rd
< 32);
1567 ea
= dform_ea(instr
, regs
);
1569 err
= write_mem(regs
->gpr
[rd
], ea
, 4, regs
);
1573 } while (++rd
< 32);
1576 #ifdef CONFIG_PPC_FPU
1579 if (!(regs
->msr
& MSR_FP
))
1581 ea
= dform_ea(instr
, regs
);
1582 err
= do_fp_load(rd
, do_lfs
, ea
, 4, regs
);
1587 if (!(regs
->msr
& MSR_FP
))
1589 ea
= dform_ea(instr
, regs
);
1590 err
= do_fp_load(rd
, do_lfd
, ea
, 8, regs
);
1594 case 53: /* stfsu */
1595 if (!(regs
->msr
& MSR_FP
))
1597 ea
= dform_ea(instr
, regs
);
1598 err
= do_fp_store(rd
, do_stfs
, ea
, 4, regs
);
1602 case 55: /* stfdu */
1603 if (!(regs
->msr
& MSR_FP
))
1605 ea
= dform_ea(instr
, regs
);
1606 err
= do_fp_store(rd
, do_stfd
, ea
, 8, regs
);
1610 #ifdef __powerpc64__
1611 case 58: /* ld[u], lwa */
1612 switch (instr
& 3) {
1614 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1618 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1622 err
= read_mem(®s
->gpr
[rd
], dsform_ea(instr
, regs
),
1625 regs
->gpr
[rd
] = (signed int) regs
->gpr
[rd
];
1630 case 62: /* std[u] */
1631 val
= regs
->gpr
[rd
];
1632 switch (instr
& 3) {
1634 err
= write_mem(val
, dsform_ea(instr
, regs
), 8, regs
);
1637 err
= write_mem(val
, dsform_ea(instr
, regs
), 8, regs
);
1641 #endif /* __powerpc64__ */
1648 regs
->gpr
[ra
] = old_ra
;
1649 return 0; /* invoke DSI if -EFAULT? */
1652 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);