4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
20 extern char system_call_common
[];
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK 0xffffffff87c0ffffUL
26 #define MSR_MASK 0x87c0ffff
30 #define XER_SO 0x80000000U
31 #define XER_OV 0x40000000U
32 #define XER_CA 0x20000000U
36 * Functions in ldstfp.S
38 extern int do_lfs(int rn
, unsigned long ea
);
39 extern int do_lfd(int rn
, unsigned long ea
);
40 extern int do_stfs(int rn
, unsigned long ea
);
41 extern int do_stfd(int rn
, unsigned long ea
);
42 extern int do_lvx(int rn
, unsigned long ea
);
43 extern int do_stvx(int rn
, unsigned long ea
);
44 extern int do_lxvd2x(int rn
, unsigned long ea
);
45 extern int do_stxvd2x(int rn
, unsigned long ea
);
49 * Emulate the truncation of 64 bit values in 32-bit mode.
51 static unsigned long truncate_if_32bit(unsigned long msr
, unsigned long val
)
54 if ((msr
& MSR_64BIT
) == 0)
61 * Determine whether a conditional branch instruction would branch.
63 static int __kprobes
branch_taken(unsigned int instr
, struct pt_regs
*regs
)
65 unsigned int bo
= (instr
>> 21) & 0x1f;
69 /* decrement counter */
71 if (((bo
>> 1) & 1) ^ (regs
->ctr
== 0))
74 if ((bo
& 0x10) == 0) {
75 /* check bit from CR */
76 bi
= (instr
>> 16) & 0x1f;
77 if (((regs
->ccr
>> (31 - bi
)) & 1) != ((bo
>> 3) & 1))
84 static long __kprobes
address_ok(struct pt_regs
*regs
, unsigned long ea
, int nb
)
88 return __access_ok(ea
, nb
, USER_DS
);
92 * Calculate effective address for a D-form instruction
94 static unsigned long __kprobes
dform_ea(unsigned int instr
, struct pt_regs
*regs
)
99 ra
= (instr
>> 16) & 0x1f;
100 ea
= (signed short) instr
; /* sign-extend */
104 return truncate_if_32bit(regs
->msr
, ea
);
109 * Calculate effective address for a DS-form instruction
111 static unsigned long __kprobes
dsform_ea(unsigned int instr
, struct pt_regs
*regs
)
116 ra
= (instr
>> 16) & 0x1f;
117 ea
= (signed short) (instr
& ~3); /* sign-extend */
121 return truncate_if_32bit(regs
->msr
, ea
);
123 #endif /* __powerpc64 */
126 * Calculate effective address for an X-form instruction
128 static unsigned long __kprobes
xform_ea(unsigned int instr
,
129 struct pt_regs
*regs
)
134 ra
= (instr
>> 16) & 0x1f;
135 rb
= (instr
>> 11) & 0x1f;
140 return truncate_if_32bit(regs
->msr
, ea
);
144 * Return the largest power of 2, not greater than sizeof(unsigned long),
145 * such that x is a multiple of it.
147 static inline unsigned long max_align(unsigned long x
)
149 x
|= sizeof(unsigned long);
150 return x
& -x
; /* isolates rightmost bit */
154 static inline unsigned long byterev_2(unsigned long x
)
156 return ((x
>> 8) & 0xff) | ((x
& 0xff) << 8);
159 static inline unsigned long byterev_4(unsigned long x
)
161 return ((x
>> 24) & 0xff) | ((x
>> 8) & 0xff00) |
162 ((x
& 0xff00) << 8) | ((x
& 0xff) << 24);
166 static inline unsigned long byterev_8(unsigned long x
)
168 return (byterev_4(x
) << 32) | byterev_4(x
>> 32);
172 static int __kprobes
read_mem_aligned(unsigned long *dest
, unsigned long ea
,
180 err
= __get_user(x
, (unsigned char __user
*) ea
);
183 err
= __get_user(x
, (unsigned short __user
*) ea
);
186 err
= __get_user(x
, (unsigned int __user
*) ea
);
190 err
= __get_user(x
, (unsigned long __user
*) ea
);
199 static int __kprobes
read_mem_unaligned(unsigned long *dest
, unsigned long ea
,
200 int nb
, struct pt_regs
*regs
)
203 unsigned long x
, b
, c
;
204 #ifdef __LITTLE_ENDIAN__
205 int len
= nb
; /* save a copy of the length for byte reversal */
208 /* unaligned, do this in pieces */
210 for (; nb
> 0; nb
-= c
) {
211 #ifdef __LITTLE_ENDIAN__
214 #ifdef __BIG_ENDIAN__
219 err
= read_mem_aligned(&b
, ea
, c
);
222 x
= (x
<< (8 * c
)) + b
;
225 #ifdef __LITTLE_ENDIAN__
228 *dest
= byterev_2(x
);
231 *dest
= byterev_4(x
);
235 *dest
= byterev_8(x
);
240 #ifdef __BIG_ENDIAN__
247 * Read memory at address ea for nb bytes, return 0 for success
248 * or -EFAULT if an error occurred.
250 static int __kprobes
read_mem(unsigned long *dest
, unsigned long ea
, int nb
,
251 struct pt_regs
*regs
)
253 if (!address_ok(regs
, ea
, nb
))
255 if ((ea
& (nb
- 1)) == 0)
256 return read_mem_aligned(dest
, ea
, nb
);
257 return read_mem_unaligned(dest
, ea
, nb
, regs
);
260 static int __kprobes
write_mem_aligned(unsigned long val
, unsigned long ea
,
267 err
= __put_user(val
, (unsigned char __user
*) ea
);
270 err
= __put_user(val
, (unsigned short __user
*) ea
);
273 err
= __put_user(val
, (unsigned int __user
*) ea
);
277 err
= __put_user(val
, (unsigned long __user
*) ea
);
284 static int __kprobes
write_mem_unaligned(unsigned long val
, unsigned long ea
,
285 int nb
, struct pt_regs
*regs
)
290 #ifdef __LITTLE_ENDIAN__
293 val
= byterev_2(val
);
296 val
= byterev_4(val
);
300 val
= byterev_8(val
);
305 /* unaligned or little-endian, do this in pieces */
306 for (; nb
> 0; nb
-= c
) {
307 #ifdef __LITTLE_ENDIAN__
310 #ifdef __BIG_ENDIAN__
315 err
= write_mem_aligned(val
>> (nb
- c
) * 8, ea
, c
);
324 * Write memory at address ea for nb bytes, return 0 for success
325 * or -EFAULT if an error occurred.
327 static int __kprobes
write_mem(unsigned long val
, unsigned long ea
, int nb
,
328 struct pt_regs
*regs
)
330 if (!address_ok(regs
, ea
, nb
))
332 if ((ea
& (nb
- 1)) == 0)
333 return write_mem_aligned(val
, ea
, nb
);
334 return write_mem_unaligned(val
, ea
, nb
, regs
);
337 #ifdef CONFIG_PPC_FPU
339 * Check the address and alignment, and call func to do the actual
342 static int __kprobes
do_fp_load(int rn
, int (*func
)(int, unsigned long),
343 unsigned long ea
, int nb
,
344 struct pt_regs
*regs
)
351 #ifdef __BIG_ENDIAN__
355 #ifdef __LITTLE_ENDIAN__
363 if (!address_ok(regs
, ea
, nb
))
366 return (*func
)(rn
, ea
);
367 ptr
= (unsigned long) &data
.ul
;
368 if (sizeof(unsigned long) == 8 || nb
== 4) {
369 err
= read_mem_unaligned(&data
.ul
[0], ea
, nb
, regs
);
371 ptr
= (unsigned long)&(data
.single
.word
);
373 /* reading a double on 32-bit */
374 err
= read_mem_unaligned(&data
.ul
[0], ea
, 4, regs
);
376 err
= read_mem_unaligned(&data
.ul
[1], ea
+ 4, 4, regs
);
380 return (*func
)(rn
, ptr
);
383 static int __kprobes
do_fp_store(int rn
, int (*func
)(int, unsigned long),
384 unsigned long ea
, int nb
,
385 struct pt_regs
*regs
)
392 #ifdef __BIG_ENDIAN__
396 #ifdef __LITTLE_ENDIAN__
404 if (!address_ok(regs
, ea
, nb
))
407 return (*func
)(rn
, ea
);
408 ptr
= (unsigned long) &data
.ul
[0];
409 if (sizeof(unsigned long) == 8 || nb
== 4) {
411 ptr
= (unsigned long)&(data
.single
.word
);
412 err
= (*func
)(rn
, ptr
);
415 err
= write_mem_unaligned(data
.ul
[0], ea
, nb
, regs
);
417 /* writing a double on 32-bit */
418 err
= (*func
)(rn
, ptr
);
421 err
= write_mem_unaligned(data
.ul
[0], ea
, 4, regs
);
423 err
= write_mem_unaligned(data
.ul
[1], ea
+ 4, 4, regs
);
429 #ifdef CONFIG_ALTIVEC
430 /* For Altivec/VMX, no need to worry about alignment */
431 static int __kprobes
do_vec_load(int rn
, int (*func
)(int, unsigned long),
432 unsigned long ea
, struct pt_regs
*regs
)
434 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
436 return (*func
)(rn
, ea
);
439 static int __kprobes
do_vec_store(int rn
, int (*func
)(int, unsigned long),
440 unsigned long ea
, struct pt_regs
*regs
)
442 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
444 return (*func
)(rn
, ea
);
446 #endif /* CONFIG_ALTIVEC */
449 static int __kprobes
do_vsx_load(int rn
, int (*func
)(int, unsigned long),
450 unsigned long ea
, struct pt_regs
*regs
)
453 unsigned long val
[2];
455 if (!address_ok(regs
, ea
, 16))
458 return (*func
)(rn
, ea
);
459 err
= read_mem_unaligned(&val
[0], ea
, 8, regs
);
461 err
= read_mem_unaligned(&val
[1], ea
+ 8, 8, regs
);
463 err
= (*func
)(rn
, (unsigned long) &val
[0]);
467 static int __kprobes
do_vsx_store(int rn
, int (*func
)(int, unsigned long),
468 unsigned long ea
, struct pt_regs
*regs
)
471 unsigned long val
[2];
473 if (!address_ok(regs
, ea
, 16))
476 return (*func
)(rn
, ea
);
477 err
= (*func
)(rn
, (unsigned long) &val
[0]);
480 err
= write_mem_unaligned(val
[0], ea
, 8, regs
);
482 err
= write_mem_unaligned(val
[1], ea
+ 8, 8, regs
);
485 #endif /* CONFIG_VSX */
487 #define __put_user_asmx(x, addr, err, op, cr) \
488 __asm__ __volatile__( \
489 "1: " op " %2,0,%3\n" \
492 ".section .fixup,\"ax\"\n" \
496 ".section __ex_table,\"a\"\n" \
497 PPC_LONG_ALIGN "\n" \
500 : "=r" (err), "=r" (cr) \
501 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
503 #define __get_user_asmx(x, addr, err, op) \
504 __asm__ __volatile__( \
505 "1: "op" %1,0,%2\n" \
507 ".section .fixup,\"ax\"\n" \
511 ".section __ex_table,\"a\"\n" \
512 PPC_LONG_ALIGN "\n" \
515 : "=r" (err), "=r" (x) \
516 : "r" (addr), "i" (-EFAULT), "0" (err))
518 #define __cacheop_user_asmx(addr, err, op) \
519 __asm__ __volatile__( \
522 ".section .fixup,\"ax\"\n" \
526 ".section __ex_table,\"a\"\n" \
527 PPC_LONG_ALIGN "\n" \
531 : "r" (addr), "i" (-EFAULT), "0" (err))
533 static void __kprobes
set_cr0(struct pt_regs
*regs
, int rd
)
535 long val
= regs
->gpr
[rd
];
537 regs
->ccr
= (regs
->ccr
& 0x0fffffff) | ((regs
->xer
>> 3) & 0x10000000);
539 if (!(regs
->msr
& MSR_64BIT
))
543 regs
->ccr
|= 0x80000000;
545 regs
->ccr
|= 0x40000000;
547 regs
->ccr
|= 0x20000000;
550 static void __kprobes
add_with_carry(struct pt_regs
*regs
, int rd
,
551 unsigned long val1
, unsigned long val2
,
552 unsigned long carry_in
)
554 unsigned long val
= val1
+ val2
;
560 if (!(regs
->msr
& MSR_64BIT
)) {
561 val
= (unsigned int) val
;
562 val1
= (unsigned int) val1
;
565 if (val
< val1
|| (carry_in
&& val
== val1
))
568 regs
->xer
&= ~XER_CA
;
571 static void __kprobes
do_cmp_signed(struct pt_regs
*regs
, long v1
, long v2
,
574 unsigned int crval
, shift
;
576 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
583 shift
= (7 - crfld
) * 4;
584 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
587 static void __kprobes
do_cmp_unsigned(struct pt_regs
*regs
, unsigned long v1
,
588 unsigned long v2
, int crfld
)
590 unsigned int crval
, shift
;
592 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
599 shift
= (7 - crfld
) * 4;
600 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
603 static int __kprobes
trap_compare(long v1
, long v2
)
613 if ((unsigned long)v1
< (unsigned long)v2
)
615 else if ((unsigned long)v1
> (unsigned long)v2
)
621 * Elements of 32-bit rotate and mask instructions.
623 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
624 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
626 #define MASK64_L(mb) (~0UL >> (mb))
627 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
628 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
629 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
631 #define DATA32(x) (x)
633 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
636 * Decode an instruction, and execute it if that can be done just by
637 * modifying *regs (i.e. integer arithmetic and logical instructions,
638 * branches, and barrier instructions).
639 * Returns 1 if the instruction has been executed, or 0 if not.
640 * Sets *op to indicate what the instruction does.
642 int __kprobes
analyse_instr(struct instruction_op
*op
, struct pt_regs
*regs
,
645 unsigned int opcode
, ra
, rb
, rd
, spr
, u
;
646 unsigned long int imm
;
647 unsigned long int val
, val2
;
648 unsigned int mb
, me
, sh
;
653 opcode
= instr
>> 26;
657 imm
= (signed short)(instr
& 0xfffc);
658 if ((instr
& 2) == 0)
661 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
);
663 regs
->link
= regs
->nip
;
664 if (branch_taken(instr
, regs
))
665 regs
->nip
= truncate_if_32bit(regs
->msr
, imm
);
669 if ((instr
& 0xfe2) == 2)
677 imm
= instr
& 0x03fffffc;
678 if (imm
& 0x02000000)
680 if ((instr
& 2) == 0)
683 regs
->link
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
684 imm
= truncate_if_32bit(regs
->msr
, imm
);
688 switch ((instr
>> 1) & 0x3ff) {
690 rd
= 7 - ((instr
>> 23) & 0x7);
691 ra
= 7 - ((instr
>> 18) & 0x7);
694 val
= (regs
->ccr
>> ra
) & 0xf;
695 regs
->ccr
= (regs
->ccr
& ~(0xfUL
<< rd
)) | (val
<< rd
);
699 case 528: /* bcctr */
701 imm
= (instr
& 0x400)? regs
->ctr
: regs
->link
;
702 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
703 imm
= truncate_if_32bit(regs
->msr
, imm
);
705 regs
->link
= regs
->nip
;
706 if (branch_taken(instr
, regs
))
710 case 18: /* rfid, scary */
711 if (regs
->msr
& MSR_PR
)
716 case 150: /* isync */
722 case 129: /* crandc */
723 case 193: /* crxor */
724 case 225: /* crnand */
725 case 257: /* crand */
726 case 289: /* creqv */
727 case 417: /* crorc */
729 ra
= (instr
>> 16) & 0x1f;
730 rb
= (instr
>> 11) & 0x1f;
731 rd
= (instr
>> 21) & 0x1f;
732 ra
= (regs
->ccr
>> (31 - ra
)) & 1;
733 rb
= (regs
->ccr
>> (31 - rb
)) & 1;
734 val
= (instr
>> (6 + ra
* 2 + rb
)) & 1;
735 regs
->ccr
= (regs
->ccr
& ~(1UL << (31 - rd
))) |
741 switch ((instr
>> 1) & 0x3ff) {
745 switch ((instr
>> 21) & 3) {
747 asm volatile("lwsync" : : : "memory");
749 case 2: /* ptesync */
750 asm volatile("ptesync" : : : "memory");
757 case 854: /* eieio */
765 /* Following cases refer to regs->gpr[], so we need all regs */
766 if (!FULL_REGS(regs
))
769 rd
= (instr
>> 21) & 0x1f;
770 ra
= (instr
>> 16) & 0x1f;
771 rb
= (instr
>> 11) & 0x1f;
776 if (rd
& trap_compare(regs
->gpr
[ra
], (short) instr
))
781 if (rd
& trap_compare((int)regs
->gpr
[ra
], (short) instr
))
786 regs
->gpr
[rd
] = regs
->gpr
[ra
] * (short) instr
;
791 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], imm
, 1);
795 imm
= (unsigned short) instr
;
799 val
= (unsigned int) val
;
801 do_cmp_unsigned(regs
, val
, imm
, rd
>> 2);
811 do_cmp_signed(regs
, val
, imm
, rd
>> 2);
816 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
819 case 13: /* addic. */
821 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
828 imm
+= regs
->gpr
[ra
];
833 imm
= ((short) instr
) << 16;
835 imm
+= regs
->gpr
[ra
];
839 case 20: /* rlwimi */
840 mb
= (instr
>> 6) & 0x1f;
841 me
= (instr
>> 1) & 0x1f;
842 val
= DATA32(regs
->gpr
[rd
]);
843 imm
= MASK32(mb
, me
);
844 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) | (ROTATE(val
, rb
) & imm
);
847 case 21: /* rlwinm */
848 mb
= (instr
>> 6) & 0x1f;
849 me
= (instr
>> 1) & 0x1f;
850 val
= DATA32(regs
->gpr
[rd
]);
851 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
855 mb
= (instr
>> 6) & 0x1f;
856 me
= (instr
>> 1) & 0x1f;
857 rb
= regs
->gpr
[rb
] & 0x1f;
858 val
= DATA32(regs
->gpr
[rd
]);
859 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
863 imm
= (unsigned short) instr
;
864 regs
->gpr
[ra
] = regs
->gpr
[rd
] | imm
;
868 imm
= (unsigned short) instr
;
869 regs
->gpr
[ra
] = regs
->gpr
[rd
] | (imm
<< 16);
873 imm
= (unsigned short) instr
;
874 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ imm
;
878 imm
= (unsigned short) instr
;
879 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ (imm
<< 16);
883 imm
= (unsigned short) instr
;
884 regs
->gpr
[ra
] = regs
->gpr
[rd
] & imm
;
888 case 29: /* andis. */
889 imm
= (unsigned short) instr
;
890 regs
->gpr
[ra
] = regs
->gpr
[rd
] & (imm
<< 16);
896 mb
= ((instr
>> 6) & 0x1f) | (instr
& 0x20);
898 if ((instr
& 0x10) == 0) {
899 sh
= rb
| ((instr
& 2) << 4);
900 val
= ROTATE(val
, sh
);
901 switch ((instr
>> 2) & 3) {
903 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
906 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
909 regs
->gpr
[ra
] = val
& MASK64(mb
, 63 - sh
);
912 imm
= MASK64(mb
, 63 - sh
);
913 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) |
918 sh
= regs
->gpr
[rb
] & 0x3f;
919 val
= ROTATE(val
, sh
);
920 switch ((instr
>> 1) & 7) {
922 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
925 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
930 break; /* illegal instruction */
933 switch ((instr
>> 1) & 0x3ff) {
936 (rd
& trap_compare((int)regs
->gpr
[ra
],
937 (int)regs
->gpr
[rb
])))
942 if (rd
& trap_compare(regs
->gpr
[ra
], regs
->gpr
[rb
]))
947 if (regs
->msr
& MSR_PR
)
952 case 146: /* mtmsr */
953 if (regs
->msr
& MSR_PR
)
957 op
->val
= 0xffffffff & ~(MSR_ME
| MSR_LE
);
960 case 178: /* mtmsrd */
961 if (regs
->msr
& MSR_PR
)
965 /* only MSR_EE and MSR_RI get changed if bit 15 set */
966 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
967 imm
= (instr
& 0x10000)? 0x8002: 0xefffffffffffeffeUL
;
973 if ((instr
>> 20) & 1) {
975 for (sh
= 0; sh
< 8; ++sh
) {
976 if (instr
& (0x80000 >> sh
)) {
977 regs
->gpr
[rd
] = regs
->ccr
& imm
;
986 regs
->gpr
[rd
] = regs
->ccr
;
987 regs
->gpr
[rd
] &= 0xffffffffUL
;
990 case 144: /* mtcrf */
993 for (sh
= 0; sh
< 8; ++sh
) {
994 if (instr
& (0x80000 >> sh
))
995 regs
->ccr
= (regs
->ccr
& ~imm
) |
1001 case 339: /* mfspr */
1002 spr
= ((instr
>> 16) & 0x1f) | ((instr
>> 6) & 0x3e0);
1004 case SPRN_XER
: /* mfxer */
1005 regs
->gpr
[rd
] = regs
->xer
;
1006 regs
->gpr
[rd
] &= 0xffffffffUL
;
1008 case SPRN_LR
: /* mflr */
1009 regs
->gpr
[rd
] = regs
->link
;
1011 case SPRN_CTR
: /* mfctr */
1012 regs
->gpr
[rd
] = regs
->ctr
;
1022 case 467: /* mtspr */
1023 spr
= ((instr
>> 16) & 0x1f) | ((instr
>> 6) & 0x3e0);
1025 case SPRN_XER
: /* mtxer */
1026 regs
->xer
= (regs
->gpr
[rd
] & 0xffffffffUL
);
1028 case SPRN_LR
: /* mtlr */
1029 regs
->link
= regs
->gpr
[rd
];
1031 case SPRN_CTR
: /* mtctr */
1032 regs
->ctr
= regs
->gpr
[rd
];
1036 op
->val
= regs
->gpr
[rd
];
1043 * Compare instructions
1046 val
= regs
->gpr
[ra
];
1047 val2
= regs
->gpr
[rb
];
1048 #ifdef __powerpc64__
1049 if ((rd
& 1) == 0) {
1050 /* word (32-bit) compare */
1055 do_cmp_signed(regs
, val
, val2
, rd
>> 2);
1059 val
= regs
->gpr
[ra
];
1060 val2
= regs
->gpr
[rb
];
1061 #ifdef __powerpc64__
1062 if ((rd
& 1) == 0) {
1063 /* word (32-bit) compare */
1064 val
= (unsigned int) val
;
1065 val2
= (unsigned int) val2
;
1068 do_cmp_unsigned(regs
, val
, val2
, rd
>> 2);
1072 * Arithmetic instructions
1075 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
],
1078 #ifdef __powerpc64__
1079 case 9: /* mulhdu */
1080 asm("mulhdu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1081 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1085 add_with_carry(regs
, rd
, regs
->gpr
[ra
],
1089 case 11: /* mulhwu */
1090 asm("mulhwu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1091 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1095 regs
->gpr
[rd
] = regs
->gpr
[rb
] - regs
->gpr
[ra
];
1097 #ifdef __powerpc64__
1098 case 73: /* mulhd */
1099 asm("mulhd %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1100 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1103 case 75: /* mulhw */
1104 asm("mulhw %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1105 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1109 regs
->gpr
[rd
] = -regs
->gpr
[ra
];
1112 case 136: /* subfe */
1113 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], regs
->gpr
[rb
],
1114 regs
->xer
& XER_CA
);
1117 case 138: /* adde */
1118 add_with_carry(regs
, rd
, regs
->gpr
[ra
], regs
->gpr
[rb
],
1119 regs
->xer
& XER_CA
);
1122 case 200: /* subfze */
1123 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], 0L,
1124 regs
->xer
& XER_CA
);
1127 case 202: /* addze */
1128 add_with_carry(regs
, rd
, regs
->gpr
[ra
], 0L,
1129 regs
->xer
& XER_CA
);
1132 case 232: /* subfme */
1133 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], -1L,
1134 regs
->xer
& XER_CA
);
1136 #ifdef __powerpc64__
1137 case 233: /* mulld */
1138 regs
->gpr
[rd
] = regs
->gpr
[ra
] * regs
->gpr
[rb
];
1141 case 234: /* addme */
1142 add_with_carry(regs
, rd
, regs
->gpr
[ra
], -1L,
1143 regs
->xer
& XER_CA
);
1146 case 235: /* mullw */
1147 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] *
1148 (unsigned int) regs
->gpr
[rb
];
1152 regs
->gpr
[rd
] = regs
->gpr
[ra
] + regs
->gpr
[rb
];
1154 #ifdef __powerpc64__
1155 case 457: /* divdu */
1156 regs
->gpr
[rd
] = regs
->gpr
[ra
] / regs
->gpr
[rb
];
1159 case 459: /* divwu */
1160 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] /
1161 (unsigned int) regs
->gpr
[rb
];
1163 #ifdef __powerpc64__
1164 case 489: /* divd */
1165 regs
->gpr
[rd
] = (long int) regs
->gpr
[ra
] /
1166 (long int) regs
->gpr
[rb
];
1169 case 491: /* divw */
1170 regs
->gpr
[rd
] = (int) regs
->gpr
[ra
] /
1171 (int) regs
->gpr
[rb
];
1176 * Logical instructions
1178 case 26: /* cntlzw */
1179 asm("cntlzw %0,%1" : "=r" (regs
->gpr
[ra
]) :
1180 "r" (regs
->gpr
[rd
]));
1182 #ifdef __powerpc64__
1183 case 58: /* cntlzd */
1184 asm("cntlzd %0,%1" : "=r" (regs
->gpr
[ra
]) :
1185 "r" (regs
->gpr
[rd
]));
1189 regs
->gpr
[ra
] = regs
->gpr
[rd
] & regs
->gpr
[rb
];
1193 regs
->gpr
[ra
] = regs
->gpr
[rd
] & ~regs
->gpr
[rb
];
1197 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] | regs
->gpr
[rb
]);
1201 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] ^ regs
->gpr
[rb
]);
1205 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ regs
->gpr
[rb
];
1209 regs
->gpr
[ra
] = regs
->gpr
[rd
] | ~regs
->gpr
[rb
];
1213 regs
->gpr
[ra
] = regs
->gpr
[rd
] | regs
->gpr
[rb
];
1216 case 476: /* nand */
1217 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] & regs
->gpr
[rb
]);
1220 case 922: /* extsh */
1221 regs
->gpr
[ra
] = (signed short) regs
->gpr
[rd
];
1224 case 954: /* extsb */
1225 regs
->gpr
[ra
] = (signed char) regs
->gpr
[rd
];
1227 #ifdef __powerpc64__
1228 case 986: /* extsw */
1229 regs
->gpr
[ra
] = (signed int) regs
->gpr
[rd
];
1234 * Shift instructions
1237 sh
= regs
->gpr
[rb
] & 0x3f;
1239 regs
->gpr
[ra
] = (regs
->gpr
[rd
] << sh
) & 0xffffffffUL
;
1245 sh
= regs
->gpr
[rb
] & 0x3f;
1247 regs
->gpr
[ra
] = (regs
->gpr
[rd
] & 0xffffffffUL
) >> sh
;
1252 case 792: /* sraw */
1253 sh
= regs
->gpr
[rb
] & 0x3f;
1254 ival
= (signed int) regs
->gpr
[rd
];
1255 regs
->gpr
[ra
] = ival
>> (sh
< 32 ? sh
: 31);
1256 if (ival
< 0 && (sh
>= 32 || (ival
& ((1ul << sh
) - 1)) != 0))
1257 regs
->xer
|= XER_CA
;
1259 regs
->xer
&= ~XER_CA
;
1262 case 824: /* srawi */
1264 ival
= (signed int) regs
->gpr
[rd
];
1265 regs
->gpr
[ra
] = ival
>> sh
;
1266 if (ival
< 0 && (ival
& ((1ul << sh
) - 1)) != 0)
1267 regs
->xer
|= XER_CA
;
1269 regs
->xer
&= ~XER_CA
;
1272 #ifdef __powerpc64__
1274 sh
= regs
->gpr
[rb
] & 0x7f;
1276 regs
->gpr
[ra
] = regs
->gpr
[rd
] << sh
;
1282 sh
= regs
->gpr
[rb
] & 0x7f;
1284 regs
->gpr
[ra
] = regs
->gpr
[rd
] >> sh
;
1289 case 794: /* srad */
1290 sh
= regs
->gpr
[rb
] & 0x7f;
1291 ival
= (signed long int) regs
->gpr
[rd
];
1292 regs
->gpr
[ra
] = ival
>> (sh
< 64 ? sh
: 63);
1293 if (ival
< 0 && (sh
>= 64 || (ival
& ((1ul << sh
) - 1)) != 0))
1294 regs
->xer
|= XER_CA
;
1296 regs
->xer
&= ~XER_CA
;
1299 case 826: /* sradi with sh_5 = 0 */
1300 case 827: /* sradi with sh_5 = 1 */
1301 sh
= rb
| ((instr
& 2) << 4);
1302 ival
= (signed long int) regs
->gpr
[rd
];
1303 regs
->gpr
[ra
] = ival
>> sh
;
1304 if (ival
< 0 && (ival
& ((1ul << sh
) - 1)) != 0)
1305 regs
->xer
|= XER_CA
;
1307 regs
->xer
&= ~XER_CA
;
1309 #endif /* __powerpc64__ */
1312 * Cache instructions
1314 case 54: /* dcbst */
1315 op
->type
= MKOP(CACHEOP
, DCBST
, 0);
1316 op
->ea
= xform_ea(instr
, regs
);
1320 op
->type
= MKOP(CACHEOP
, DCBF
, 0);
1321 op
->ea
= xform_ea(instr
, regs
);
1324 case 246: /* dcbtst */
1325 op
->type
= MKOP(CACHEOP
, DCBTST
, 0);
1326 op
->ea
= xform_ea(instr
, regs
);
1330 case 278: /* dcbt */
1331 op
->type
= MKOP(CACHEOP
, DCBTST
, 0);
1332 op
->ea
= xform_ea(instr
, regs
);
1336 case 982: /* icbi */
1337 op
->type
= MKOP(CACHEOP
, ICBI
, 0);
1338 op
->ea
= xform_ea(instr
, regs
);
1348 op
->update_reg
= ra
;
1350 op
->val
= regs
->gpr
[rd
];
1351 u
= (instr
>> 20) & UPDATE
;
1356 op
->ea
= xform_ea(instr
, regs
);
1357 switch ((instr
>> 1) & 0x3ff) {
1358 case 20: /* lwarx */
1359 op
->type
= MKOP(LARX
, 0, 4);
1362 case 150: /* stwcx. */
1363 op
->type
= MKOP(STCX
, 0, 4);
1366 #ifdef __powerpc64__
1367 case 84: /* ldarx */
1368 op
->type
= MKOP(LARX
, 0, 8);
1371 case 214: /* stdcx. */
1372 op
->type
= MKOP(STCX
, 0, 8);
1377 op
->type
= MKOP(LOAD
, u
, 8);
1382 case 55: /* lwzux */
1383 op
->type
= MKOP(LOAD
, u
, 4);
1387 case 119: /* lbzux */
1388 op
->type
= MKOP(LOAD
, u
, 1);
1391 #ifdef CONFIG_ALTIVEC
1393 case 359: /* lvxl */
1394 if (!(regs
->msr
& MSR_VEC
))
1396 op
->type
= MKOP(LOAD_VMX
, 0, 16);
1399 case 231: /* stvx */
1400 case 487: /* stvxl */
1401 if (!(regs
->msr
& MSR_VEC
))
1403 op
->type
= MKOP(STORE_VMX
, 0, 16);
1405 #endif /* CONFIG_ALTIVEC */
1407 #ifdef __powerpc64__
1408 case 149: /* stdx */
1409 case 181: /* stdux */
1410 op
->type
= MKOP(STORE
, u
, 8);
1414 case 151: /* stwx */
1415 case 183: /* stwux */
1416 op
->type
= MKOP(STORE
, u
, 4);
1419 case 215: /* stbx */
1420 case 247: /* stbux */
1421 op
->type
= MKOP(STORE
, u
, 1);
1424 case 279: /* lhzx */
1425 case 311: /* lhzux */
1426 op
->type
= MKOP(LOAD
, u
, 2);
1429 #ifdef __powerpc64__
1430 case 341: /* lwax */
1431 case 373: /* lwaux */
1432 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 4);
1436 case 343: /* lhax */
1437 case 375: /* lhaux */
1438 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 2);
1441 case 407: /* sthx */
1442 case 439: /* sthux */
1443 op
->type
= MKOP(STORE
, u
, 2);
1446 #ifdef __powerpc64__
1447 case 532: /* ldbrx */
1448 op
->type
= MKOP(LOAD
, BYTEREV
, 8);
1452 case 533: /* lswx */
1453 op
->type
= MKOP(LOAD_MULTI
, 0, regs
->xer
& 0x7f);
1456 case 534: /* lwbrx */
1457 op
->type
= MKOP(LOAD
, BYTEREV
, 4);
1460 case 597: /* lswi */
1462 rb
= 32; /* # bytes to load */
1463 op
->type
= MKOP(LOAD_MULTI
, 0, rb
);
1466 op
->ea
= truncate_if_32bit(regs
->msr
,
1470 #ifdef CONFIG_PPC_FPU
1471 case 535: /* lfsx */
1472 case 567: /* lfsux */
1473 if (!(regs
->msr
& MSR_FP
))
1475 op
->type
= MKOP(LOAD_FP
, u
, 4);
1478 case 599: /* lfdx */
1479 case 631: /* lfdux */
1480 if (!(regs
->msr
& MSR_FP
))
1482 op
->type
= MKOP(LOAD_FP
, u
, 8);
1485 case 663: /* stfsx */
1486 case 695: /* stfsux */
1487 if (!(regs
->msr
& MSR_FP
))
1489 op
->type
= MKOP(STORE_FP
, u
, 4);
1492 case 727: /* stfdx */
1493 case 759: /* stfdux */
1494 if (!(regs
->msr
& MSR_FP
))
1496 op
->type
= MKOP(STORE_FP
, u
, 8);
1500 #ifdef __powerpc64__
1501 case 660: /* stdbrx */
1502 op
->type
= MKOP(STORE
, BYTEREV
, 8);
1503 op
->val
= byterev_8(regs
->gpr
[rd
]);
1507 case 661: /* stswx */
1508 op
->type
= MKOP(STORE_MULTI
, 0, regs
->xer
& 0x7f);
1511 case 662: /* stwbrx */
1512 op
->type
= MKOP(STORE
, BYTEREV
, 4);
1513 op
->val
= byterev_4(regs
->gpr
[rd
]);
1518 rb
= 32; /* # bytes to store */
1519 op
->type
= MKOP(STORE_MULTI
, 0, rb
);
1522 op
->ea
= truncate_if_32bit(regs
->msr
,
1526 case 790: /* lhbrx */
1527 op
->type
= MKOP(LOAD
, BYTEREV
, 2);
1530 case 918: /* sthbrx */
1531 op
->type
= MKOP(STORE
, BYTEREV
, 2);
1532 op
->val
= byterev_2(regs
->gpr
[rd
]);
1536 case 844: /* lxvd2x */
1537 case 876: /* lxvd2ux */
1538 if (!(regs
->msr
& MSR_VSX
))
1540 op
->reg
= rd
| ((instr
& 1) << 5);
1541 op
->type
= MKOP(LOAD_VSX
, u
, 16);
1544 case 972: /* stxvd2x */
1545 case 1004: /* stxvd2ux */
1546 if (!(regs
->msr
& MSR_VSX
))
1548 op
->reg
= rd
| ((instr
& 1) << 5);
1549 op
->type
= MKOP(STORE_VSX
, u
, 16);
1552 #endif /* CONFIG_VSX */
1558 op
->type
= MKOP(LOAD
, u
, 4);
1559 op
->ea
= dform_ea(instr
, regs
);
1564 op
->type
= MKOP(LOAD
, u
, 1);
1565 op
->ea
= dform_ea(instr
, regs
);
1570 op
->type
= MKOP(STORE
, u
, 4);
1571 op
->ea
= dform_ea(instr
, regs
);
1576 op
->type
= MKOP(STORE
, u
, 1);
1577 op
->ea
= dform_ea(instr
, regs
);
1582 op
->type
= MKOP(LOAD
, u
, 2);
1583 op
->ea
= dform_ea(instr
, regs
);
1588 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 2);
1589 op
->ea
= dform_ea(instr
, regs
);
1594 op
->type
= MKOP(STORE
, u
, 2);
1595 op
->ea
= dform_ea(instr
, regs
);
1600 break; /* invalid form, ra in range to load */
1601 op
->type
= MKOP(LOAD_MULTI
, 0, 4 * (32 - rd
));
1602 op
->ea
= dform_ea(instr
, regs
);
1606 op
->type
= MKOP(STORE_MULTI
, 0, 4 * (32 - rd
));
1607 op
->ea
= dform_ea(instr
, regs
);
1610 #ifdef CONFIG_PPC_FPU
1613 if (!(regs
->msr
& MSR_FP
))
1615 op
->type
= MKOP(LOAD_FP
, u
, 4);
1616 op
->ea
= dform_ea(instr
, regs
);
1621 if (!(regs
->msr
& MSR_FP
))
1623 op
->type
= MKOP(LOAD_FP
, u
, 8);
1624 op
->ea
= dform_ea(instr
, regs
);
1628 case 53: /* stfsu */
1629 if (!(regs
->msr
& MSR_FP
))
1631 op
->type
= MKOP(STORE_FP
, u
, 4);
1632 op
->ea
= dform_ea(instr
, regs
);
1636 case 55: /* stfdu */
1637 if (!(regs
->msr
& MSR_FP
))
1639 op
->type
= MKOP(STORE_FP
, u
, 8);
1640 op
->ea
= dform_ea(instr
, regs
);
1644 #ifdef __powerpc64__
1645 case 58: /* ld[u], lwa */
1646 op
->ea
= dsform_ea(instr
, regs
);
1647 switch (instr
& 3) {
1649 op
->type
= MKOP(LOAD
, 0, 8);
1652 op
->type
= MKOP(LOAD
, UPDATE
, 8);
1655 op
->type
= MKOP(LOAD
, SIGNEXT
, 4);
1660 case 62: /* std[u] */
1661 op
->ea
= dsform_ea(instr
, regs
);
1662 switch (instr
& 3) {
1664 op
->type
= MKOP(STORE
, 0, 8);
1667 op
->type
= MKOP(STORE
, UPDATE
, 8);
1671 #endif /* __powerpc64__ */
1686 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
1690 op
->type
= INTERRUPT
| 0x700;
1691 op
->val
= SRR1_PROGPRIV
;
1695 op
->type
= INTERRUPT
| 0x700;
1696 op
->val
= SRR1_PROGTRAP
;
1699 #ifdef CONFIG_PPC_FPU
1701 op
->type
= INTERRUPT
| 0x800;
1705 #ifdef CONFIG_ALTIVEC
1707 op
->type
= INTERRUPT
| 0xf20;
1713 op
->type
= INTERRUPT
| 0xf40;
1717 EXPORT_SYMBOL_GPL(analyse_instr
);
1720 * For PPC32 we always use stwu with r1 to change the stack pointer.
1721 * So this emulated store may corrupt the exception frame, now we
1722 * have to provide the exception frame trampoline, which is pushed
1723 * below the kprobed function stack. So we only update gpr[1] but
1724 * don't emulate the real store operation. We will do real store
1725 * operation safely in exception return code by checking this flag.
1727 static __kprobes
int handle_stack_update(unsigned long ea
, struct pt_regs
*regs
)
1731 * Check if we will touch kernel stack overflow
1733 if (ea
- STACK_INT_FRAME_SIZE
<= current
->thread
.ksp_limit
) {
1734 printk(KERN_CRIT
"Can't kprobe this since kernel stack would overflow.\n");
1737 #endif /* CONFIG_PPC32 */
1739 * Check if we already set since that means we'll
1740 * lose the previous value.
1742 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE
));
1743 set_thread_flag(TIF_EMULATE_STACK_STORE
);
1747 static __kprobes
void do_signext(unsigned long *valp
, int size
)
1751 *valp
= (signed short) *valp
;
1754 *valp
= (signed int) *valp
;
1759 static __kprobes
void do_byterev(unsigned long *valp
, int size
)
1763 *valp
= byterev_2(*valp
);
1766 *valp
= byterev_4(*valp
);
1768 #ifdef __powerpc64__
1770 *valp
= byterev_8(*valp
);
1777 * Emulate instructions that cause a transfer of control,
1778 * loads and stores, and a few other instructions.
1779 * Returns 1 if the step was emulated, 0 if not,
1780 * or -1 if the instruction is one that should not be stepped,
1781 * such as an rfid, or a mtmsrd that would clear MSR_RI.
1783 int __kprobes
emulate_step(struct pt_regs
*regs
, unsigned int instr
)
1785 struct instruction_op op
;
1791 r
= analyse_instr(&op
, regs
, instr
);
1796 size
= GETSIZE(op
.type
);
1797 switch (op
.type
& INSTR_TYPE_MASK
) {
1799 if (!address_ok(regs
, op
.ea
, 8))
1801 switch (op
.type
& CACHEOP_MASK
) {
1803 __cacheop_user_asmx(op
.ea
, err
, "dcbst");
1806 __cacheop_user_asmx(op
.ea
, err
, "dcbf");
1810 prefetchw((void *) op
.ea
);
1814 prefetch((void *) op
.ea
);
1817 __cacheop_user_asmx(op
.ea
, err
, "icbi");
1825 if (op
.ea
& (size
- 1))
1826 break; /* can't handle misaligned */
1828 if (!address_ok(regs
, op
.ea
, size
))
1833 __get_user_asmx(val
, op
.ea
, err
, "lwarx");
1835 #ifdef __powerpc64__
1837 __get_user_asmx(val
, op
.ea
, err
, "ldarx");
1844 regs
->gpr
[op
.reg
] = val
;
1848 if (op
.ea
& (size
- 1))
1849 break; /* can't handle misaligned */
1851 if (!address_ok(regs
, op
.ea
, size
))
1856 __put_user_asmx(op
.val
, op
.ea
, err
, "stwcx.", cr
);
1858 #ifdef __powerpc64__
1860 __put_user_asmx(op
.val
, op
.ea
, err
, "stdcx.", cr
);
1867 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1869 ((regs
->xer
>> 3) & 0x10000000);
1873 err
= read_mem(®s
->gpr
[op
.reg
], op
.ea
, size
, regs
);
1875 if (op
.type
& SIGNEXT
)
1876 do_signext(®s
->gpr
[op
.reg
], size
);
1877 if (op
.type
& BYTEREV
)
1878 do_byterev(®s
->gpr
[op
.reg
], size
);
1882 #ifdef CONFIG_PPC_FPU
1885 err
= do_fp_load(op
.reg
, do_lfs
, op
.ea
, size
, regs
);
1887 err
= do_fp_load(op
.reg
, do_lfd
, op
.ea
, size
, regs
);
1890 #ifdef CONFIG_ALTIVEC
1892 err
= do_vec_load(op
.reg
, do_lvx
, op
.ea
& ~0xfUL
, regs
);
1897 err
= do_vsx_load(op
.reg
, do_lxvd2x
, op
.ea
, regs
);
1901 if (regs
->msr
& MSR_LE
)
1904 for (i
= 0; i
< size
; i
+= 4) {
1908 err
= read_mem(®s
->gpr
[rd
], op
.ea
, nb
, regs
);
1911 if (nb
< 4) /* left-justify last bytes */
1912 regs
->gpr
[rd
] <<= 32 - 8 * nb
;
1919 if ((op
.type
& UPDATE
) && size
== sizeof(long) &&
1920 op
.reg
== 1 && op
.update_reg
== 1 &&
1921 !(regs
->msr
& MSR_PR
) &&
1922 op
.ea
>= regs
->gpr
[1] - STACK_INT_FRAME_SIZE
) {
1923 err
= handle_stack_update(op
.ea
, regs
);
1926 err
= write_mem(op
.val
, op
.ea
, size
, regs
);
1929 #ifdef CONFIG_PPC_FPU
1932 err
= do_fp_store(op
.reg
, do_stfs
, op
.ea
, size
, regs
);
1934 err
= do_fp_store(op
.reg
, do_stfd
, op
.ea
, size
, regs
);
1937 #ifdef CONFIG_ALTIVEC
1939 err
= do_vec_store(op
.reg
, do_stvx
, op
.ea
& ~0xfUL
, regs
);
1944 err
= do_vsx_store(op
.reg
, do_stxvd2x
, op
.ea
, regs
);
1948 if (regs
->msr
& MSR_LE
)
1951 for (i
= 0; i
< size
; i
+= 4) {
1952 val
= regs
->gpr
[rd
];
1957 val
>>= 32 - 8 * nb
;
1958 err
= write_mem(val
, op
.ea
, nb
, regs
);
1967 regs
->gpr
[op
.reg
] = regs
->msr
& MSR_MASK
;
1971 val
= regs
->gpr
[op
.reg
];
1972 if ((val
& MSR_RI
) == 0)
1973 /* can't step mtmsr[d] that would clear MSR_RI */
1975 /* here op.val is the mask of bits to change */
1976 regs
->msr
= (regs
->msr
& ~op
.val
) | (val
& op
.val
);
1980 case SYSCALL
: /* sc */
1982 * N.B. this uses knowledge about how the syscall
1983 * entry code works. If that is changed, this will
1984 * need to be changed also.
1986 if (regs
->gpr
[0] == 0x1ebe &&
1987 cpu_has_feature(CPU_FTR_REAL_LE
)) {
1988 regs
->msr
^= MSR_LE
;
1991 regs
->gpr
[9] = regs
->gpr
[13];
1992 regs
->gpr
[10] = MSR_KERNEL
;
1993 regs
->gpr
[11] = regs
->nip
+ 4;
1994 regs
->gpr
[12] = regs
->msr
& MSR_MASK
;
1995 regs
->gpr
[13] = (unsigned long) get_paca();
1996 regs
->nip
= (unsigned long) &system_call_common
;
1997 regs
->msr
= MSR_KERNEL
;
2009 if (op
.type
& UPDATE
)
2010 regs
->gpr
[op
.update_reg
] = op
.ea
;
2013 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);