4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
21 extern char system_call_common
[];
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK 0xffffffff87c0ffffUL
27 #define MSR_MASK 0x87c0ffff
31 #define XER_SO 0x80000000U
32 #define XER_OV 0x40000000U
33 #define XER_CA 0x20000000U
37 * Functions in ldstfp.S
39 extern int do_lfs(int rn
, unsigned long ea
);
40 extern int do_lfd(int rn
, unsigned long ea
);
41 extern int do_stfs(int rn
, unsigned long ea
);
42 extern int do_stfd(int rn
, unsigned long ea
);
43 extern int do_lvx(int rn
, unsigned long ea
);
44 extern int do_stvx(int rn
, unsigned long ea
);
45 extern int do_lxvd2x(int rn
, unsigned long ea
);
46 extern int do_stxvd2x(int rn
, unsigned long ea
);
50 * Emulate the truncation of 64 bit values in 32-bit mode.
52 static nokprobe_inline
unsigned long truncate_if_32bit(unsigned long msr
,
56 if ((msr
& MSR_64BIT
) == 0)
63 * Determine whether a conditional branch instruction would branch.
65 static nokprobe_inline
int branch_taken(unsigned int instr
, struct pt_regs
*regs
)
67 unsigned int bo
= (instr
>> 21) & 0x1f;
71 /* decrement counter */
73 if (((bo
>> 1) & 1) ^ (regs
->ctr
== 0))
76 if ((bo
& 0x10) == 0) {
77 /* check bit from CR */
78 bi
= (instr
>> 16) & 0x1f;
79 if (((regs
->ccr
>> (31 - bi
)) & 1) != ((bo
>> 3) & 1))
85 static nokprobe_inline
long address_ok(struct pt_regs
*regs
, unsigned long ea
, int nb
)
89 return __access_ok(ea
, nb
, USER_DS
);
93 * Calculate effective address for a D-form instruction
95 static nokprobe_inline
unsigned long dform_ea(unsigned int instr
, struct pt_regs
*regs
)
100 ra
= (instr
>> 16) & 0x1f;
101 ea
= (signed short) instr
; /* sign-extend */
105 return truncate_if_32bit(regs
->msr
, ea
);
110 * Calculate effective address for a DS-form instruction
112 static nokprobe_inline
unsigned long dsform_ea(unsigned int instr
, struct pt_regs
*regs
)
117 ra
= (instr
>> 16) & 0x1f;
118 ea
= (signed short) (instr
& ~3); /* sign-extend */
122 return truncate_if_32bit(regs
->msr
, ea
);
124 #endif /* __powerpc64 */
127 * Calculate effective address for an X-form instruction
129 static nokprobe_inline
unsigned long xform_ea(unsigned int instr
,
130 struct pt_regs
*regs
)
135 ra
= (instr
>> 16) & 0x1f;
136 rb
= (instr
>> 11) & 0x1f;
141 return truncate_if_32bit(regs
->msr
, ea
);
145 * Return the largest power of 2, not greater than sizeof(unsigned long),
146 * such that x is a multiple of it.
148 static nokprobe_inline
unsigned long max_align(unsigned long x
)
150 x
|= sizeof(unsigned long);
151 return x
& -x
; /* isolates rightmost bit */
155 static nokprobe_inline
unsigned long byterev_2(unsigned long x
)
157 return ((x
>> 8) & 0xff) | ((x
& 0xff) << 8);
160 static nokprobe_inline
unsigned long byterev_4(unsigned long x
)
162 return ((x
>> 24) & 0xff) | ((x
>> 8) & 0xff00) |
163 ((x
& 0xff00) << 8) | ((x
& 0xff) << 24);
167 static nokprobe_inline
unsigned long byterev_8(unsigned long x
)
169 return (byterev_4(x
) << 32) | byterev_4(x
>> 32);
173 static nokprobe_inline
int read_mem_aligned(unsigned long *dest
,
174 unsigned long ea
, int nb
)
181 err
= __get_user(x
, (unsigned char __user
*) ea
);
184 err
= __get_user(x
, (unsigned short __user
*) ea
);
187 err
= __get_user(x
, (unsigned int __user
*) ea
);
191 err
= __get_user(x
, (unsigned long __user
*) ea
);
200 static nokprobe_inline
int read_mem_unaligned(unsigned long *dest
,
201 unsigned long ea
, int nb
, struct pt_regs
*regs
)
204 unsigned long x
, b
, c
;
205 #ifdef __LITTLE_ENDIAN__
206 int len
= nb
; /* save a copy of the length for byte reversal */
209 /* unaligned, do this in pieces */
211 for (; nb
> 0; nb
-= c
) {
212 #ifdef __LITTLE_ENDIAN__
215 #ifdef __BIG_ENDIAN__
220 err
= read_mem_aligned(&b
, ea
, c
);
223 x
= (x
<< (8 * c
)) + b
;
226 #ifdef __LITTLE_ENDIAN__
229 *dest
= byterev_2(x
);
232 *dest
= byterev_4(x
);
236 *dest
= byterev_8(x
);
241 #ifdef __BIG_ENDIAN__
248 * Read memory at address ea for nb bytes, return 0 for success
249 * or -EFAULT if an error occurred.
251 static int read_mem(unsigned long *dest
, unsigned long ea
, int nb
,
252 struct pt_regs
*regs
)
254 if (!address_ok(regs
, ea
, nb
))
256 if ((ea
& (nb
- 1)) == 0)
257 return read_mem_aligned(dest
, ea
, nb
);
258 return read_mem_unaligned(dest
, ea
, nb
, regs
);
260 NOKPROBE_SYMBOL(read_mem
);
262 static nokprobe_inline
int write_mem_aligned(unsigned long val
,
263 unsigned long ea
, int nb
)
269 err
= __put_user(val
, (unsigned char __user
*) ea
);
272 err
= __put_user(val
, (unsigned short __user
*) ea
);
275 err
= __put_user(val
, (unsigned int __user
*) ea
);
279 err
= __put_user(val
, (unsigned long __user
*) ea
);
286 static nokprobe_inline
int write_mem_unaligned(unsigned long val
,
287 unsigned long ea
, int nb
, struct pt_regs
*regs
)
292 #ifdef __LITTLE_ENDIAN__
295 val
= byterev_2(val
);
298 val
= byterev_4(val
);
302 val
= byterev_8(val
);
307 /* unaligned or little-endian, do this in pieces */
308 for (; nb
> 0; nb
-= c
) {
309 #ifdef __LITTLE_ENDIAN__
312 #ifdef __BIG_ENDIAN__
317 err
= write_mem_aligned(val
>> (nb
- c
) * 8, ea
, c
);
326 * Write memory at address ea for nb bytes, return 0 for success
327 * or -EFAULT if an error occurred.
329 static int write_mem(unsigned long val
, unsigned long ea
, int nb
,
330 struct pt_regs
*regs
)
332 if (!address_ok(regs
, ea
, nb
))
334 if ((ea
& (nb
- 1)) == 0)
335 return write_mem_aligned(val
, ea
, nb
);
336 return write_mem_unaligned(val
, ea
, nb
, regs
);
338 NOKPROBE_SYMBOL(write_mem
);
340 #ifdef CONFIG_PPC_FPU
342 * Check the address and alignment, and call func to do the actual
345 static int do_fp_load(int rn
, int (*func
)(int, unsigned long),
346 unsigned long ea
, int nb
,
347 struct pt_regs
*regs
)
354 #ifdef __BIG_ENDIAN__
358 #ifdef __LITTLE_ENDIAN__
366 if (!address_ok(regs
, ea
, nb
))
369 return (*func
)(rn
, ea
);
370 ptr
= (unsigned long) &data
.ul
;
371 if (sizeof(unsigned long) == 8 || nb
== 4) {
372 err
= read_mem_unaligned(&data
.ul
[0], ea
, nb
, regs
);
374 ptr
= (unsigned long)&(data
.single
.word
);
376 /* reading a double on 32-bit */
377 err
= read_mem_unaligned(&data
.ul
[0], ea
, 4, regs
);
379 err
= read_mem_unaligned(&data
.ul
[1], ea
+ 4, 4, regs
);
383 return (*func
)(rn
, ptr
);
385 NOKPROBE_SYMBOL(do_fp_load
);
387 static int do_fp_store(int rn
, int (*func
)(int, unsigned long),
388 unsigned long ea
, int nb
,
389 struct pt_regs
*regs
)
396 #ifdef __BIG_ENDIAN__
400 #ifdef __LITTLE_ENDIAN__
408 if (!address_ok(regs
, ea
, nb
))
411 return (*func
)(rn
, ea
);
412 ptr
= (unsigned long) &data
.ul
[0];
413 if (sizeof(unsigned long) == 8 || nb
== 4) {
415 ptr
= (unsigned long)&(data
.single
.word
);
416 err
= (*func
)(rn
, ptr
);
419 err
= write_mem_unaligned(data
.ul
[0], ea
, nb
, regs
);
421 /* writing a double on 32-bit */
422 err
= (*func
)(rn
, ptr
);
425 err
= write_mem_unaligned(data
.ul
[0], ea
, 4, regs
);
427 err
= write_mem_unaligned(data
.ul
[1], ea
+ 4, 4, regs
);
431 NOKPROBE_SYMBOL(do_fp_store
);
434 #ifdef CONFIG_ALTIVEC
435 /* For Altivec/VMX, no need to worry about alignment */
436 static nokprobe_inline
int do_vec_load(int rn
, int (*func
)(int, unsigned long),
437 unsigned long ea
, struct pt_regs
*regs
)
439 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
441 return (*func
)(rn
, ea
);
444 static nokprobe_inline
int do_vec_store(int rn
, int (*func
)(int, unsigned long),
445 unsigned long ea
, struct pt_regs
*regs
)
447 if (!address_ok(regs
, ea
& ~0xfUL
, 16))
449 return (*func
)(rn
, ea
);
451 #endif /* CONFIG_ALTIVEC */
454 static nokprobe_inline
int do_vsx_load(int rn
, int (*func
)(int, unsigned long),
455 unsigned long ea
, struct pt_regs
*regs
)
458 unsigned long val
[2];
460 if (!address_ok(regs
, ea
, 16))
463 return (*func
)(rn
, ea
);
464 err
= read_mem_unaligned(&val
[0], ea
, 8, regs
);
466 err
= read_mem_unaligned(&val
[1], ea
+ 8, 8, regs
);
468 err
= (*func
)(rn
, (unsigned long) &val
[0]);
472 static nokprobe_inline
int do_vsx_store(int rn
, int (*func
)(int, unsigned long),
473 unsigned long ea
, struct pt_regs
*regs
)
476 unsigned long val
[2];
478 if (!address_ok(regs
, ea
, 16))
481 return (*func
)(rn
, ea
);
482 err
= (*func
)(rn
, (unsigned long) &val
[0]);
485 err
= write_mem_unaligned(val
[0], ea
, 8, regs
);
487 err
= write_mem_unaligned(val
[1], ea
+ 8, 8, regs
);
490 #endif /* CONFIG_VSX */
492 #define __put_user_asmx(x, addr, err, op, cr) \
493 __asm__ __volatile__( \
494 "1: " op " %2,0,%3\n" \
497 ".section .fixup,\"ax\"\n" \
502 : "=r" (err), "=r" (cr) \
503 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
505 #define __get_user_asmx(x, addr, err, op) \
506 __asm__ __volatile__( \
507 "1: "op" %1,0,%2\n" \
509 ".section .fixup,\"ax\"\n" \
514 : "=r" (err), "=r" (x) \
515 : "r" (addr), "i" (-EFAULT), "0" (err))
517 #define __cacheop_user_asmx(addr, err, op) \
518 __asm__ __volatile__( \
521 ".section .fixup,\"ax\"\n" \
527 : "r" (addr), "i" (-EFAULT), "0" (err))
529 static nokprobe_inline
void set_cr0(struct pt_regs
*regs
, int rd
)
531 long val
= regs
->gpr
[rd
];
533 regs
->ccr
= (regs
->ccr
& 0x0fffffff) | ((regs
->xer
>> 3) & 0x10000000);
535 if (!(regs
->msr
& MSR_64BIT
))
539 regs
->ccr
|= 0x80000000;
541 regs
->ccr
|= 0x40000000;
543 regs
->ccr
|= 0x20000000;
546 static nokprobe_inline
void add_with_carry(struct pt_regs
*regs
, int rd
,
547 unsigned long val1
, unsigned long val2
,
548 unsigned long carry_in
)
550 unsigned long val
= val1
+ val2
;
556 if (!(regs
->msr
& MSR_64BIT
)) {
557 val
= (unsigned int) val
;
558 val1
= (unsigned int) val1
;
561 if (val
< val1
|| (carry_in
&& val
== val1
))
564 regs
->xer
&= ~XER_CA
;
567 static nokprobe_inline
void do_cmp_signed(struct pt_regs
*regs
, long v1
, long v2
,
570 unsigned int crval
, shift
;
572 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
579 shift
= (7 - crfld
) * 4;
580 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
583 static nokprobe_inline
void do_cmp_unsigned(struct pt_regs
*regs
, unsigned long v1
,
584 unsigned long v2
, int crfld
)
586 unsigned int crval
, shift
;
588 crval
= (regs
->xer
>> 31) & 1; /* get SO bit */
595 shift
= (7 - crfld
) * 4;
596 regs
->ccr
= (regs
->ccr
& ~(0xf << shift
)) | (crval
<< shift
);
599 static nokprobe_inline
int trap_compare(long v1
, long v2
)
609 if ((unsigned long)v1
< (unsigned long)v2
)
611 else if ((unsigned long)v1
> (unsigned long)v2
)
617 * Elements of 32-bit rotate and mask instructions.
619 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
620 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
622 #define MASK64_L(mb) (~0UL >> (mb))
623 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
624 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
625 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
627 #define DATA32(x) (x)
629 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
632 * Decode an instruction, and execute it if that can be done just by
633 * modifying *regs (i.e. integer arithmetic and logical instructions,
634 * branches, and barrier instructions).
635 * Returns 1 if the instruction has been executed, or 0 if not.
636 * Sets *op to indicate what the instruction does.
638 int analyse_instr(struct instruction_op
*op
, struct pt_regs
*regs
,
641 unsigned int opcode
, ra
, rb
, rd
, spr
, u
;
642 unsigned long int imm
;
643 unsigned long int val
, val2
;
644 unsigned int mb
, me
, sh
;
649 opcode
= instr
>> 26;
653 imm
= (signed short)(instr
& 0xfffc);
654 if ((instr
& 2) == 0)
657 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
);
659 regs
->link
= regs
->nip
;
660 if (branch_taken(instr
, regs
))
661 regs
->nip
= truncate_if_32bit(regs
->msr
, imm
);
665 if ((instr
& 0xfe2) == 2)
673 imm
= instr
& 0x03fffffc;
674 if (imm
& 0x02000000)
676 if ((instr
& 2) == 0)
679 regs
->link
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
680 imm
= truncate_if_32bit(regs
->msr
, imm
);
684 switch ((instr
>> 1) & 0x3ff) {
686 rd
= 7 - ((instr
>> 23) & 0x7);
687 ra
= 7 - ((instr
>> 18) & 0x7);
690 val
= (regs
->ccr
>> ra
) & 0xf;
691 regs
->ccr
= (regs
->ccr
& ~(0xfUL
<< rd
)) | (val
<< rd
);
695 case 528: /* bcctr */
697 imm
= (instr
& 0x400)? regs
->ctr
: regs
->link
;
698 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
699 imm
= truncate_if_32bit(regs
->msr
, imm
);
701 regs
->link
= regs
->nip
;
702 if (branch_taken(instr
, regs
))
706 case 18: /* rfid, scary */
707 if (regs
->msr
& MSR_PR
)
712 case 150: /* isync */
718 case 129: /* crandc */
719 case 193: /* crxor */
720 case 225: /* crnand */
721 case 257: /* crand */
722 case 289: /* creqv */
723 case 417: /* crorc */
725 ra
= (instr
>> 16) & 0x1f;
726 rb
= (instr
>> 11) & 0x1f;
727 rd
= (instr
>> 21) & 0x1f;
728 ra
= (regs
->ccr
>> (31 - ra
)) & 1;
729 rb
= (regs
->ccr
>> (31 - rb
)) & 1;
730 val
= (instr
>> (6 + ra
* 2 + rb
)) & 1;
731 regs
->ccr
= (regs
->ccr
& ~(1UL << (31 - rd
))) |
737 switch ((instr
>> 1) & 0x3ff) {
741 switch ((instr
>> 21) & 3) {
743 asm volatile("lwsync" : : : "memory");
745 case 2: /* ptesync */
746 asm volatile("ptesync" : : : "memory");
753 case 854: /* eieio */
761 /* Following cases refer to regs->gpr[], so we need all regs */
762 if (!FULL_REGS(regs
))
765 rd
= (instr
>> 21) & 0x1f;
766 ra
= (instr
>> 16) & 0x1f;
767 rb
= (instr
>> 11) & 0x1f;
772 if (rd
& trap_compare(regs
->gpr
[ra
], (short) instr
))
777 if (rd
& trap_compare((int)regs
->gpr
[ra
], (short) instr
))
782 regs
->gpr
[rd
] = regs
->gpr
[ra
] * (short) instr
;
787 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], imm
, 1);
791 imm
= (unsigned short) instr
;
795 val
= (unsigned int) val
;
797 do_cmp_unsigned(regs
, val
, imm
, rd
>> 2);
807 do_cmp_signed(regs
, val
, imm
, rd
>> 2);
812 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
815 case 13: /* addic. */
817 add_with_carry(regs
, rd
, regs
->gpr
[ra
], imm
, 0);
824 imm
+= regs
->gpr
[ra
];
829 imm
= ((short) instr
) << 16;
831 imm
+= regs
->gpr
[ra
];
835 case 20: /* rlwimi */
836 mb
= (instr
>> 6) & 0x1f;
837 me
= (instr
>> 1) & 0x1f;
838 val
= DATA32(regs
->gpr
[rd
]);
839 imm
= MASK32(mb
, me
);
840 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) | (ROTATE(val
, rb
) & imm
);
843 case 21: /* rlwinm */
844 mb
= (instr
>> 6) & 0x1f;
845 me
= (instr
>> 1) & 0x1f;
846 val
= DATA32(regs
->gpr
[rd
]);
847 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
851 mb
= (instr
>> 6) & 0x1f;
852 me
= (instr
>> 1) & 0x1f;
853 rb
= regs
->gpr
[rb
] & 0x1f;
854 val
= DATA32(regs
->gpr
[rd
]);
855 regs
->gpr
[ra
] = ROTATE(val
, rb
) & MASK32(mb
, me
);
859 imm
= (unsigned short) instr
;
860 regs
->gpr
[ra
] = regs
->gpr
[rd
] | imm
;
864 imm
= (unsigned short) instr
;
865 regs
->gpr
[ra
] = regs
->gpr
[rd
] | (imm
<< 16);
869 imm
= (unsigned short) instr
;
870 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ imm
;
874 imm
= (unsigned short) instr
;
875 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ (imm
<< 16);
879 imm
= (unsigned short) instr
;
880 regs
->gpr
[ra
] = regs
->gpr
[rd
] & imm
;
884 case 29: /* andis. */
885 imm
= (unsigned short) instr
;
886 regs
->gpr
[ra
] = regs
->gpr
[rd
] & (imm
<< 16);
892 mb
= ((instr
>> 6) & 0x1f) | (instr
& 0x20);
894 if ((instr
& 0x10) == 0) {
895 sh
= rb
| ((instr
& 2) << 4);
896 val
= ROTATE(val
, sh
);
897 switch ((instr
>> 2) & 3) {
899 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
902 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
905 regs
->gpr
[ra
] = val
& MASK64(mb
, 63 - sh
);
908 imm
= MASK64(mb
, 63 - sh
);
909 regs
->gpr
[ra
] = (regs
->gpr
[ra
] & ~imm
) |
914 sh
= regs
->gpr
[rb
] & 0x3f;
915 val
= ROTATE(val
, sh
);
916 switch ((instr
>> 1) & 7) {
918 regs
->gpr
[ra
] = val
& MASK64_L(mb
);
921 regs
->gpr
[ra
] = val
& MASK64_R(mb
);
926 break; /* illegal instruction */
929 switch ((instr
>> 1) & 0x3ff) {
932 (rd
& trap_compare((int)regs
->gpr
[ra
],
933 (int)regs
->gpr
[rb
])))
938 if (rd
& trap_compare(regs
->gpr
[ra
], regs
->gpr
[rb
]))
943 if (regs
->msr
& MSR_PR
)
948 case 146: /* mtmsr */
949 if (regs
->msr
& MSR_PR
)
953 op
->val
= 0xffffffff & ~(MSR_ME
| MSR_LE
);
956 case 178: /* mtmsrd */
957 if (regs
->msr
& MSR_PR
)
961 /* only MSR_EE and MSR_RI get changed if bit 15 set */
962 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
963 imm
= (instr
& 0x10000)? 0x8002: 0xefffffffffffeffeUL
;
969 if ((instr
>> 20) & 1) {
971 for (sh
= 0; sh
< 8; ++sh
) {
972 if (instr
& (0x80000 >> sh
)) {
973 regs
->gpr
[rd
] = regs
->ccr
& imm
;
982 regs
->gpr
[rd
] = regs
->ccr
;
983 regs
->gpr
[rd
] &= 0xffffffffUL
;
986 case 144: /* mtcrf */
989 for (sh
= 0; sh
< 8; ++sh
) {
990 if (instr
& (0x80000 >> sh
))
991 regs
->ccr
= (regs
->ccr
& ~imm
) |
997 case 339: /* mfspr */
998 spr
= ((instr
>> 16) & 0x1f) | ((instr
>> 6) & 0x3e0);
1000 case SPRN_XER
: /* mfxer */
1001 regs
->gpr
[rd
] = regs
->xer
;
1002 regs
->gpr
[rd
] &= 0xffffffffUL
;
1004 case SPRN_LR
: /* mflr */
1005 regs
->gpr
[rd
] = regs
->link
;
1007 case SPRN_CTR
: /* mfctr */
1008 regs
->gpr
[rd
] = regs
->ctr
;
1018 case 467: /* mtspr */
1019 spr
= ((instr
>> 16) & 0x1f) | ((instr
>> 6) & 0x3e0);
1021 case SPRN_XER
: /* mtxer */
1022 regs
->xer
= (regs
->gpr
[rd
] & 0xffffffffUL
);
1024 case SPRN_LR
: /* mtlr */
1025 regs
->link
= regs
->gpr
[rd
];
1027 case SPRN_CTR
: /* mtctr */
1028 regs
->ctr
= regs
->gpr
[rd
];
1032 op
->val
= regs
->gpr
[rd
];
1039 * Compare instructions
1042 val
= regs
->gpr
[ra
];
1043 val2
= regs
->gpr
[rb
];
1044 #ifdef __powerpc64__
1045 if ((rd
& 1) == 0) {
1046 /* word (32-bit) compare */
1051 do_cmp_signed(regs
, val
, val2
, rd
>> 2);
1055 val
= regs
->gpr
[ra
];
1056 val2
= regs
->gpr
[rb
];
1057 #ifdef __powerpc64__
1058 if ((rd
& 1) == 0) {
1059 /* word (32-bit) compare */
1060 val
= (unsigned int) val
;
1061 val2
= (unsigned int) val2
;
1064 do_cmp_unsigned(regs
, val
, val2
, rd
>> 2);
1068 * Arithmetic instructions
1071 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
],
1074 #ifdef __powerpc64__
1075 case 9: /* mulhdu */
1076 asm("mulhdu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1077 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1081 add_with_carry(regs
, rd
, regs
->gpr
[ra
],
1085 case 11: /* mulhwu */
1086 asm("mulhwu %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1087 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1091 regs
->gpr
[rd
] = regs
->gpr
[rb
] - regs
->gpr
[ra
];
1093 #ifdef __powerpc64__
1094 case 73: /* mulhd */
1095 asm("mulhd %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1096 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1099 case 75: /* mulhw */
1100 asm("mulhw %0,%1,%2" : "=r" (regs
->gpr
[rd
]) :
1101 "r" (regs
->gpr
[ra
]), "r" (regs
->gpr
[rb
]));
1105 regs
->gpr
[rd
] = -regs
->gpr
[ra
];
1108 case 136: /* subfe */
1109 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], regs
->gpr
[rb
],
1110 regs
->xer
& XER_CA
);
1113 case 138: /* adde */
1114 add_with_carry(regs
, rd
, regs
->gpr
[ra
], regs
->gpr
[rb
],
1115 regs
->xer
& XER_CA
);
1118 case 200: /* subfze */
1119 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], 0L,
1120 regs
->xer
& XER_CA
);
1123 case 202: /* addze */
1124 add_with_carry(regs
, rd
, regs
->gpr
[ra
], 0L,
1125 regs
->xer
& XER_CA
);
1128 case 232: /* subfme */
1129 add_with_carry(regs
, rd
, ~regs
->gpr
[ra
], -1L,
1130 regs
->xer
& XER_CA
);
1132 #ifdef __powerpc64__
1133 case 233: /* mulld */
1134 regs
->gpr
[rd
] = regs
->gpr
[ra
] * regs
->gpr
[rb
];
1137 case 234: /* addme */
1138 add_with_carry(regs
, rd
, regs
->gpr
[ra
], -1L,
1139 regs
->xer
& XER_CA
);
1142 case 235: /* mullw */
1143 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] *
1144 (unsigned int) regs
->gpr
[rb
];
1148 regs
->gpr
[rd
] = regs
->gpr
[ra
] + regs
->gpr
[rb
];
1150 #ifdef __powerpc64__
1151 case 457: /* divdu */
1152 regs
->gpr
[rd
] = regs
->gpr
[ra
] / regs
->gpr
[rb
];
1155 case 459: /* divwu */
1156 regs
->gpr
[rd
] = (unsigned int) regs
->gpr
[ra
] /
1157 (unsigned int) regs
->gpr
[rb
];
1159 #ifdef __powerpc64__
1160 case 489: /* divd */
1161 regs
->gpr
[rd
] = (long int) regs
->gpr
[ra
] /
1162 (long int) regs
->gpr
[rb
];
1165 case 491: /* divw */
1166 regs
->gpr
[rd
] = (int) regs
->gpr
[ra
] /
1167 (int) regs
->gpr
[rb
];
1172 * Logical instructions
1174 case 26: /* cntlzw */
1175 asm("cntlzw %0,%1" : "=r" (regs
->gpr
[ra
]) :
1176 "r" (regs
->gpr
[rd
]));
1178 #ifdef __powerpc64__
1179 case 58: /* cntlzd */
1180 asm("cntlzd %0,%1" : "=r" (regs
->gpr
[ra
]) :
1181 "r" (regs
->gpr
[rd
]));
1185 regs
->gpr
[ra
] = regs
->gpr
[rd
] & regs
->gpr
[rb
];
1189 regs
->gpr
[ra
] = regs
->gpr
[rd
] & ~regs
->gpr
[rb
];
1193 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] | regs
->gpr
[rb
]);
1197 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] ^ regs
->gpr
[rb
]);
1201 regs
->gpr
[ra
] = regs
->gpr
[rd
] ^ regs
->gpr
[rb
];
1205 regs
->gpr
[ra
] = regs
->gpr
[rd
] | ~regs
->gpr
[rb
];
1209 regs
->gpr
[ra
] = regs
->gpr
[rd
] | regs
->gpr
[rb
];
1212 case 476: /* nand */
1213 regs
->gpr
[ra
] = ~(regs
->gpr
[rd
] & regs
->gpr
[rb
]);
1216 case 922: /* extsh */
1217 regs
->gpr
[ra
] = (signed short) regs
->gpr
[rd
];
1220 case 954: /* extsb */
1221 regs
->gpr
[ra
] = (signed char) regs
->gpr
[rd
];
1223 #ifdef __powerpc64__
1224 case 986: /* extsw */
1225 regs
->gpr
[ra
] = (signed int) regs
->gpr
[rd
];
1230 * Shift instructions
1233 sh
= regs
->gpr
[rb
] & 0x3f;
1235 regs
->gpr
[ra
] = (regs
->gpr
[rd
] << sh
) & 0xffffffffUL
;
1241 sh
= regs
->gpr
[rb
] & 0x3f;
1243 regs
->gpr
[ra
] = (regs
->gpr
[rd
] & 0xffffffffUL
) >> sh
;
1248 case 792: /* sraw */
1249 sh
= regs
->gpr
[rb
] & 0x3f;
1250 ival
= (signed int) regs
->gpr
[rd
];
1251 regs
->gpr
[ra
] = ival
>> (sh
< 32 ? sh
: 31);
1252 if (ival
< 0 && (sh
>= 32 || (ival
& ((1ul << sh
) - 1)) != 0))
1253 regs
->xer
|= XER_CA
;
1255 regs
->xer
&= ~XER_CA
;
1258 case 824: /* srawi */
1260 ival
= (signed int) regs
->gpr
[rd
];
1261 regs
->gpr
[ra
] = ival
>> sh
;
1262 if (ival
< 0 && (ival
& ((1ul << sh
) - 1)) != 0)
1263 regs
->xer
|= XER_CA
;
1265 regs
->xer
&= ~XER_CA
;
1268 #ifdef __powerpc64__
1270 sh
= regs
->gpr
[rb
] & 0x7f;
1272 regs
->gpr
[ra
] = regs
->gpr
[rd
] << sh
;
1278 sh
= regs
->gpr
[rb
] & 0x7f;
1280 regs
->gpr
[ra
] = regs
->gpr
[rd
] >> sh
;
1285 case 794: /* srad */
1286 sh
= regs
->gpr
[rb
] & 0x7f;
1287 ival
= (signed long int) regs
->gpr
[rd
];
1288 regs
->gpr
[ra
] = ival
>> (sh
< 64 ? sh
: 63);
1289 if (ival
< 0 && (sh
>= 64 || (ival
& ((1ul << sh
) - 1)) != 0))
1290 regs
->xer
|= XER_CA
;
1292 regs
->xer
&= ~XER_CA
;
1295 case 826: /* sradi with sh_5 = 0 */
1296 case 827: /* sradi with sh_5 = 1 */
1297 sh
= rb
| ((instr
& 2) << 4);
1298 ival
= (signed long int) regs
->gpr
[rd
];
1299 regs
->gpr
[ra
] = ival
>> sh
;
1300 if (ival
< 0 && (ival
& ((1ul << sh
) - 1)) != 0)
1301 regs
->xer
|= XER_CA
;
1303 regs
->xer
&= ~XER_CA
;
1305 #endif /* __powerpc64__ */
1308 * Cache instructions
1310 case 54: /* dcbst */
1311 op
->type
= MKOP(CACHEOP
, DCBST
, 0);
1312 op
->ea
= xform_ea(instr
, regs
);
1316 op
->type
= MKOP(CACHEOP
, DCBF
, 0);
1317 op
->ea
= xform_ea(instr
, regs
);
1320 case 246: /* dcbtst */
1321 op
->type
= MKOP(CACHEOP
, DCBTST
, 0);
1322 op
->ea
= xform_ea(instr
, regs
);
1326 case 278: /* dcbt */
1327 op
->type
= MKOP(CACHEOP
, DCBTST
, 0);
1328 op
->ea
= xform_ea(instr
, regs
);
1332 case 982: /* icbi */
1333 op
->type
= MKOP(CACHEOP
, ICBI
, 0);
1334 op
->ea
= xform_ea(instr
, regs
);
1344 op
->update_reg
= ra
;
1346 op
->val
= regs
->gpr
[rd
];
1347 u
= (instr
>> 20) & UPDATE
;
1352 op
->ea
= xform_ea(instr
, regs
);
1353 switch ((instr
>> 1) & 0x3ff) {
1354 case 20: /* lwarx */
1355 op
->type
= MKOP(LARX
, 0, 4);
1358 case 150: /* stwcx. */
1359 op
->type
= MKOP(STCX
, 0, 4);
1362 #ifdef __powerpc64__
1363 case 84: /* ldarx */
1364 op
->type
= MKOP(LARX
, 0, 8);
1367 case 214: /* stdcx. */
1368 op
->type
= MKOP(STCX
, 0, 8);
1373 op
->type
= MKOP(LOAD
, u
, 8);
1378 case 55: /* lwzux */
1379 op
->type
= MKOP(LOAD
, u
, 4);
1383 case 119: /* lbzux */
1384 op
->type
= MKOP(LOAD
, u
, 1);
1387 #ifdef CONFIG_ALTIVEC
1389 case 359: /* lvxl */
1390 if (!(regs
->msr
& MSR_VEC
))
1392 op
->type
= MKOP(LOAD_VMX
, 0, 16);
1395 case 231: /* stvx */
1396 case 487: /* stvxl */
1397 if (!(regs
->msr
& MSR_VEC
))
1399 op
->type
= MKOP(STORE_VMX
, 0, 16);
1401 #endif /* CONFIG_ALTIVEC */
1403 #ifdef __powerpc64__
1404 case 149: /* stdx */
1405 case 181: /* stdux */
1406 op
->type
= MKOP(STORE
, u
, 8);
1410 case 151: /* stwx */
1411 case 183: /* stwux */
1412 op
->type
= MKOP(STORE
, u
, 4);
1415 case 215: /* stbx */
1416 case 247: /* stbux */
1417 op
->type
= MKOP(STORE
, u
, 1);
1420 case 279: /* lhzx */
1421 case 311: /* lhzux */
1422 op
->type
= MKOP(LOAD
, u
, 2);
1425 #ifdef __powerpc64__
1426 case 341: /* lwax */
1427 case 373: /* lwaux */
1428 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 4);
1432 case 343: /* lhax */
1433 case 375: /* lhaux */
1434 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 2);
1437 case 407: /* sthx */
1438 case 439: /* sthux */
1439 op
->type
= MKOP(STORE
, u
, 2);
1442 #ifdef __powerpc64__
1443 case 532: /* ldbrx */
1444 op
->type
= MKOP(LOAD
, BYTEREV
, 8);
1448 case 533: /* lswx */
1449 op
->type
= MKOP(LOAD_MULTI
, 0, regs
->xer
& 0x7f);
1452 case 534: /* lwbrx */
1453 op
->type
= MKOP(LOAD
, BYTEREV
, 4);
1456 case 597: /* lswi */
1458 rb
= 32; /* # bytes to load */
1459 op
->type
= MKOP(LOAD_MULTI
, 0, rb
);
1462 op
->ea
= truncate_if_32bit(regs
->msr
,
1466 #ifdef CONFIG_PPC_FPU
1467 case 535: /* lfsx */
1468 case 567: /* lfsux */
1469 if (!(regs
->msr
& MSR_FP
))
1471 op
->type
= MKOP(LOAD_FP
, u
, 4);
1474 case 599: /* lfdx */
1475 case 631: /* lfdux */
1476 if (!(regs
->msr
& MSR_FP
))
1478 op
->type
= MKOP(LOAD_FP
, u
, 8);
1481 case 663: /* stfsx */
1482 case 695: /* stfsux */
1483 if (!(regs
->msr
& MSR_FP
))
1485 op
->type
= MKOP(STORE_FP
, u
, 4);
1488 case 727: /* stfdx */
1489 case 759: /* stfdux */
1490 if (!(regs
->msr
& MSR_FP
))
1492 op
->type
= MKOP(STORE_FP
, u
, 8);
1496 #ifdef __powerpc64__
1497 case 660: /* stdbrx */
1498 op
->type
= MKOP(STORE
, BYTEREV
, 8);
1499 op
->val
= byterev_8(regs
->gpr
[rd
]);
1503 case 661: /* stswx */
1504 op
->type
= MKOP(STORE_MULTI
, 0, regs
->xer
& 0x7f);
1507 case 662: /* stwbrx */
1508 op
->type
= MKOP(STORE
, BYTEREV
, 4);
1509 op
->val
= byterev_4(regs
->gpr
[rd
]);
1514 rb
= 32; /* # bytes to store */
1515 op
->type
= MKOP(STORE_MULTI
, 0, rb
);
1518 op
->ea
= truncate_if_32bit(regs
->msr
,
1522 case 790: /* lhbrx */
1523 op
->type
= MKOP(LOAD
, BYTEREV
, 2);
1526 case 918: /* sthbrx */
1527 op
->type
= MKOP(STORE
, BYTEREV
, 2);
1528 op
->val
= byterev_2(regs
->gpr
[rd
]);
1532 case 844: /* lxvd2x */
1533 case 876: /* lxvd2ux */
1534 if (!(regs
->msr
& MSR_VSX
))
1536 op
->reg
= rd
| ((instr
& 1) << 5);
1537 op
->type
= MKOP(LOAD_VSX
, u
, 16);
1540 case 972: /* stxvd2x */
1541 case 1004: /* stxvd2ux */
1542 if (!(regs
->msr
& MSR_VSX
))
1544 op
->reg
= rd
| ((instr
& 1) << 5);
1545 op
->type
= MKOP(STORE_VSX
, u
, 16);
1548 #endif /* CONFIG_VSX */
1554 op
->type
= MKOP(LOAD
, u
, 4);
1555 op
->ea
= dform_ea(instr
, regs
);
1560 op
->type
= MKOP(LOAD
, u
, 1);
1561 op
->ea
= dform_ea(instr
, regs
);
1566 op
->type
= MKOP(STORE
, u
, 4);
1567 op
->ea
= dform_ea(instr
, regs
);
1572 op
->type
= MKOP(STORE
, u
, 1);
1573 op
->ea
= dform_ea(instr
, regs
);
1578 op
->type
= MKOP(LOAD
, u
, 2);
1579 op
->ea
= dform_ea(instr
, regs
);
1584 op
->type
= MKOP(LOAD
, SIGNEXT
| u
, 2);
1585 op
->ea
= dform_ea(instr
, regs
);
1590 op
->type
= MKOP(STORE
, u
, 2);
1591 op
->ea
= dform_ea(instr
, regs
);
1596 break; /* invalid form, ra in range to load */
1597 op
->type
= MKOP(LOAD_MULTI
, 0, 4 * (32 - rd
));
1598 op
->ea
= dform_ea(instr
, regs
);
1602 op
->type
= MKOP(STORE_MULTI
, 0, 4 * (32 - rd
));
1603 op
->ea
= dform_ea(instr
, regs
);
1606 #ifdef CONFIG_PPC_FPU
1609 if (!(regs
->msr
& MSR_FP
))
1611 op
->type
= MKOP(LOAD_FP
, u
, 4);
1612 op
->ea
= dform_ea(instr
, regs
);
1617 if (!(regs
->msr
& MSR_FP
))
1619 op
->type
= MKOP(LOAD_FP
, u
, 8);
1620 op
->ea
= dform_ea(instr
, regs
);
1624 case 53: /* stfsu */
1625 if (!(regs
->msr
& MSR_FP
))
1627 op
->type
= MKOP(STORE_FP
, u
, 4);
1628 op
->ea
= dform_ea(instr
, regs
);
1632 case 55: /* stfdu */
1633 if (!(regs
->msr
& MSR_FP
))
1635 op
->type
= MKOP(STORE_FP
, u
, 8);
1636 op
->ea
= dform_ea(instr
, regs
);
1640 #ifdef __powerpc64__
1641 case 58: /* ld[u], lwa */
1642 op
->ea
= dsform_ea(instr
, regs
);
1643 switch (instr
& 3) {
1645 op
->type
= MKOP(LOAD
, 0, 8);
1648 op
->type
= MKOP(LOAD
, UPDATE
, 8);
1651 op
->type
= MKOP(LOAD
, SIGNEXT
, 4);
1656 case 62: /* std[u] */
1657 op
->ea
= dsform_ea(instr
, regs
);
1658 switch (instr
& 3) {
1660 op
->type
= MKOP(STORE
, 0, 8);
1663 op
->type
= MKOP(STORE
, UPDATE
, 8);
1667 #endif /* __powerpc64__ */
1682 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
1686 op
->type
= INTERRUPT
| 0x700;
1687 op
->val
= SRR1_PROGPRIV
;
1691 op
->type
= INTERRUPT
| 0x700;
1692 op
->val
= SRR1_PROGTRAP
;
1695 #ifdef CONFIG_PPC_FPU
1697 op
->type
= INTERRUPT
| 0x800;
1701 #ifdef CONFIG_ALTIVEC
1703 op
->type
= INTERRUPT
| 0xf20;
1709 op
->type
= INTERRUPT
| 0xf40;
1713 EXPORT_SYMBOL_GPL(analyse_instr
);
1714 NOKPROBE_SYMBOL(analyse_instr
);
1717 * For PPC32 we always use stwu with r1 to change the stack pointer.
1718 * So this emulated store may corrupt the exception frame, now we
1719 * have to provide the exception frame trampoline, which is pushed
1720 * below the kprobed function stack. So we only update gpr[1] but
1721 * don't emulate the real store operation. We will do real store
1722 * operation safely in exception return code by checking this flag.
1724 static nokprobe_inline
int handle_stack_update(unsigned long ea
, struct pt_regs
*regs
)
1728 * Check if we will touch kernel stack overflow
1730 if (ea
- STACK_INT_FRAME_SIZE
<= current
->thread
.ksp_limit
) {
1731 printk(KERN_CRIT
"Can't kprobe this since kernel stack would overflow.\n");
1734 #endif /* CONFIG_PPC32 */
1736 * Check if we already set since that means we'll
1737 * lose the previous value.
1739 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE
));
1740 set_thread_flag(TIF_EMULATE_STACK_STORE
);
1744 static nokprobe_inline
void do_signext(unsigned long *valp
, int size
)
1748 *valp
= (signed short) *valp
;
1751 *valp
= (signed int) *valp
;
1756 static nokprobe_inline
void do_byterev(unsigned long *valp
, int size
)
1760 *valp
= byterev_2(*valp
);
1763 *valp
= byterev_4(*valp
);
1765 #ifdef __powerpc64__
1767 *valp
= byterev_8(*valp
);
1774 * Emulate instructions that cause a transfer of control,
1775 * loads and stores, and a few other instructions.
1776 * Returns 1 if the step was emulated, 0 if not,
1777 * or -1 if the instruction is one that should not be stepped,
1778 * such as an rfid, or a mtmsrd that would clear MSR_RI.
1780 int emulate_step(struct pt_regs
*regs
, unsigned int instr
)
1782 struct instruction_op op
;
1788 r
= analyse_instr(&op
, regs
, instr
);
1793 size
= GETSIZE(op
.type
);
1794 switch (op
.type
& INSTR_TYPE_MASK
) {
1796 if (!address_ok(regs
, op
.ea
, 8))
1798 switch (op
.type
& CACHEOP_MASK
) {
1800 __cacheop_user_asmx(op
.ea
, err
, "dcbst");
1803 __cacheop_user_asmx(op
.ea
, err
, "dcbf");
1807 prefetchw((void *) op
.ea
);
1811 prefetch((void *) op
.ea
);
1814 __cacheop_user_asmx(op
.ea
, err
, "icbi");
1822 if (op
.ea
& (size
- 1))
1823 break; /* can't handle misaligned */
1824 if (!address_ok(regs
, op
.ea
, size
))
1829 __get_user_asmx(val
, op
.ea
, err
, "lwarx");
1831 #ifdef __powerpc64__
1833 __get_user_asmx(val
, op
.ea
, err
, "ldarx");
1840 regs
->gpr
[op
.reg
] = val
;
1844 if (op
.ea
& (size
- 1))
1845 break; /* can't handle misaligned */
1846 if (!address_ok(regs
, op
.ea
, size
))
1851 __put_user_asmx(op
.val
, op
.ea
, err
, "stwcx.", cr
);
1853 #ifdef __powerpc64__
1855 __put_user_asmx(op
.val
, op
.ea
, err
, "stdcx.", cr
);
1862 regs
->ccr
= (regs
->ccr
& 0x0fffffff) |
1864 ((regs
->xer
>> 3) & 0x10000000);
1868 err
= read_mem(®s
->gpr
[op
.reg
], op
.ea
, size
, regs
);
1870 if (op
.type
& SIGNEXT
)
1871 do_signext(®s
->gpr
[op
.reg
], size
);
1872 if (op
.type
& BYTEREV
)
1873 do_byterev(®s
->gpr
[op
.reg
], size
);
1877 #ifdef CONFIG_PPC_FPU
1880 err
= do_fp_load(op
.reg
, do_lfs
, op
.ea
, size
, regs
);
1882 err
= do_fp_load(op
.reg
, do_lfd
, op
.ea
, size
, regs
);
1885 #ifdef CONFIG_ALTIVEC
1887 err
= do_vec_load(op
.reg
, do_lvx
, op
.ea
& ~0xfUL
, regs
);
1892 err
= do_vsx_load(op
.reg
, do_lxvd2x
, op
.ea
, regs
);
1896 if (regs
->msr
& MSR_LE
)
1899 for (i
= 0; i
< size
; i
+= 4) {
1903 err
= read_mem(®s
->gpr
[rd
], op
.ea
, nb
, regs
);
1906 if (nb
< 4) /* left-justify last bytes */
1907 regs
->gpr
[rd
] <<= 32 - 8 * nb
;
1914 if ((op
.type
& UPDATE
) && size
== sizeof(long) &&
1915 op
.reg
== 1 && op
.update_reg
== 1 &&
1916 !(regs
->msr
& MSR_PR
) &&
1917 op
.ea
>= regs
->gpr
[1] - STACK_INT_FRAME_SIZE
) {
1918 err
= handle_stack_update(op
.ea
, regs
);
1921 err
= write_mem(op
.val
, op
.ea
, size
, regs
);
1924 #ifdef CONFIG_PPC_FPU
1927 err
= do_fp_store(op
.reg
, do_stfs
, op
.ea
, size
, regs
);
1929 err
= do_fp_store(op
.reg
, do_stfd
, op
.ea
, size
, regs
);
1932 #ifdef CONFIG_ALTIVEC
1934 err
= do_vec_store(op
.reg
, do_stvx
, op
.ea
& ~0xfUL
, regs
);
1939 err
= do_vsx_store(op
.reg
, do_stxvd2x
, op
.ea
, regs
);
1943 if (regs
->msr
& MSR_LE
)
1946 for (i
= 0; i
< size
; i
+= 4) {
1947 val
= regs
->gpr
[rd
];
1952 val
>>= 32 - 8 * nb
;
1953 err
= write_mem(val
, op
.ea
, nb
, regs
);
1962 regs
->gpr
[op
.reg
] = regs
->msr
& MSR_MASK
;
1966 val
= regs
->gpr
[op
.reg
];
1967 if ((val
& MSR_RI
) == 0)
1968 /* can't step mtmsr[d] that would clear MSR_RI */
1970 /* here op.val is the mask of bits to change */
1971 regs
->msr
= (regs
->msr
& ~op
.val
) | (val
& op
.val
);
1975 case SYSCALL
: /* sc */
1977 * N.B. this uses knowledge about how the syscall
1978 * entry code works. If that is changed, this will
1979 * need to be changed also.
1981 if (regs
->gpr
[0] == 0x1ebe &&
1982 cpu_has_feature(CPU_FTR_REAL_LE
)) {
1983 regs
->msr
^= MSR_LE
;
1986 regs
->gpr
[9] = regs
->gpr
[13];
1987 regs
->gpr
[10] = MSR_KERNEL
;
1988 regs
->gpr
[11] = regs
->nip
+ 4;
1989 regs
->gpr
[12] = regs
->msr
& MSR_MASK
;
1990 regs
->gpr
[13] = (unsigned long) get_paca();
1991 regs
->nip
= (unsigned long) &system_call_common
;
1992 regs
->msr
= MSR_KERNEL
;
2004 if (op
.type
& UPDATE
)
2005 regs
->gpr
[op
.update_reg
] = op
.ea
;
2008 regs
->nip
= truncate_if_32bit(regs
->msr
, regs
->nip
+ 4);
2011 NOKPROBE_SYMBOL(emulate_step
);