Linux 4.9.199
[linux/fpc-iii.git] / arch / powerpc / lib / sstep.c
blob776c1a1f9bc2bc29211ddaee44724a4d69818cbe
1 /*
2 * Single-step support.
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
20 extern char system_call_common[];
22 #ifdef CONFIG_PPC64
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK 0xffffffff87c0ffffUL
25 #else
26 #define MSR_MASK 0x87c0ffff
27 #endif
29 /* Bits in XER */
30 #define XER_SO 0x80000000U
31 #define XER_OV 0x40000000U
32 #define XER_CA 0x20000000U
34 #ifdef CONFIG_PPC_FPU
36 * Functions in ldstfp.S
38 extern int do_lfs(int rn, unsigned long ea);
39 extern int do_lfd(int rn, unsigned long ea);
40 extern int do_stfs(int rn, unsigned long ea);
41 extern int do_stfd(int rn, unsigned long ea);
42 extern int do_lvx(int rn, unsigned long ea);
43 extern int do_stvx(int rn, unsigned long ea);
44 extern int do_lxvd2x(int rn, unsigned long ea);
45 extern int do_stxvd2x(int rn, unsigned long ea);
46 #endif
49 * Emulate the truncation of 64 bit values in 32-bit mode.
51 static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
53 #ifdef __powerpc64__
54 if ((msr & MSR_64BIT) == 0)
55 val &= 0xffffffffUL;
56 #endif
57 return val;
61 * Determine whether a conditional branch instruction would branch.
63 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
65 unsigned int bo = (instr >> 21) & 0x1f;
66 unsigned int bi;
68 if ((bo & 4) == 0) {
69 /* decrement counter */
70 --regs->ctr;
71 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
72 return 0;
74 if ((bo & 0x10) == 0) {
75 /* check bit from CR */
76 bi = (instr >> 16) & 0x1f;
77 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
78 return 0;
80 return 1;
84 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
86 if (!user_mode(regs))
87 return 1;
88 return __access_ok(ea, nb, USER_DS);
92 * Calculate effective address for a D-form instruction
94 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
96 int ra;
97 unsigned long ea;
99 ra = (instr >> 16) & 0x1f;
100 ea = (signed short) instr; /* sign-extend */
101 if (ra)
102 ea += regs->gpr[ra];
104 return truncate_if_32bit(regs->msr, ea);
107 #ifdef __powerpc64__
109 * Calculate effective address for a DS-form instruction
111 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
113 int ra;
114 unsigned long ea;
116 ra = (instr >> 16) & 0x1f;
117 ea = (signed short) (instr & ~3); /* sign-extend */
118 if (ra)
119 ea += regs->gpr[ra];
121 return truncate_if_32bit(regs->msr, ea);
123 #endif /* __powerpc64 */
126 * Calculate effective address for an X-form instruction
128 static unsigned long __kprobes xform_ea(unsigned int instr,
129 struct pt_regs *regs)
131 int ra, rb;
132 unsigned long ea;
134 ra = (instr >> 16) & 0x1f;
135 rb = (instr >> 11) & 0x1f;
136 ea = regs->gpr[rb];
137 if (ra)
138 ea += regs->gpr[ra];
140 return truncate_if_32bit(regs->msr, ea);
144 * Return the largest power of 2, not greater than sizeof(unsigned long),
145 * such that x is a multiple of it.
147 static inline unsigned long max_align(unsigned long x)
149 x |= sizeof(unsigned long);
150 return x & -x; /* isolates rightmost bit */
154 static inline unsigned long byterev_2(unsigned long x)
156 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
159 static inline unsigned long byterev_4(unsigned long x)
161 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
162 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
165 #ifdef __powerpc64__
166 static inline unsigned long byterev_8(unsigned long x)
168 return (byterev_4(x) << 32) | byterev_4(x >> 32);
170 #endif
172 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
173 int nb)
175 int err = 0;
176 unsigned long x = 0;
178 switch (nb) {
179 case 1:
180 err = __get_user(x, (unsigned char __user *) ea);
181 break;
182 case 2:
183 err = __get_user(x, (unsigned short __user *) ea);
184 break;
185 case 4:
186 err = __get_user(x, (unsigned int __user *) ea);
187 break;
188 #ifdef __powerpc64__
189 case 8:
190 err = __get_user(x, (unsigned long __user *) ea);
191 break;
192 #endif
194 if (!err)
195 *dest = x;
196 return err;
199 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
200 int nb, struct pt_regs *regs)
202 int err;
203 unsigned long x, b, c;
204 #ifdef __LITTLE_ENDIAN__
205 int len = nb; /* save a copy of the length for byte reversal */
206 #endif
208 /* unaligned, do this in pieces */
209 x = 0;
210 for (; nb > 0; nb -= c) {
211 #ifdef __LITTLE_ENDIAN__
212 c = 1;
213 #endif
214 #ifdef __BIG_ENDIAN__
215 c = max_align(ea);
216 #endif
217 if (c > nb)
218 c = max_align(nb);
219 err = read_mem_aligned(&b, ea, c);
220 if (err)
221 return err;
222 x = (x << (8 * c)) + b;
223 ea += c;
225 #ifdef __LITTLE_ENDIAN__
226 switch (len) {
227 case 2:
228 *dest = byterev_2(x);
229 break;
230 case 4:
231 *dest = byterev_4(x);
232 break;
233 #ifdef __powerpc64__
234 case 8:
235 *dest = byterev_8(x);
236 break;
237 #endif
239 #endif
240 #ifdef __BIG_ENDIAN__
241 *dest = x;
242 #endif
243 return 0;
247 * Read memory at address ea for nb bytes, return 0 for success
248 * or -EFAULT if an error occurred.
250 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
251 struct pt_regs *regs)
253 if (!address_ok(regs, ea, nb))
254 return -EFAULT;
255 if ((ea & (nb - 1)) == 0)
256 return read_mem_aligned(dest, ea, nb);
257 return read_mem_unaligned(dest, ea, nb, regs);
260 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
261 int nb)
263 int err = 0;
265 switch (nb) {
266 case 1:
267 err = __put_user(val, (unsigned char __user *) ea);
268 break;
269 case 2:
270 err = __put_user(val, (unsigned short __user *) ea);
271 break;
272 case 4:
273 err = __put_user(val, (unsigned int __user *) ea);
274 break;
275 #ifdef __powerpc64__
276 case 8:
277 err = __put_user(val, (unsigned long __user *) ea);
278 break;
279 #endif
281 return err;
284 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
285 int nb, struct pt_regs *regs)
287 int err;
288 unsigned long c;
290 #ifdef __LITTLE_ENDIAN__
291 switch (nb) {
292 case 2:
293 val = byterev_2(val);
294 break;
295 case 4:
296 val = byterev_4(val);
297 break;
298 #ifdef __powerpc64__
299 case 8:
300 val = byterev_8(val);
301 break;
302 #endif
304 #endif
305 /* unaligned or little-endian, do this in pieces */
306 for (; nb > 0; nb -= c) {
307 #ifdef __LITTLE_ENDIAN__
308 c = 1;
309 #endif
310 #ifdef __BIG_ENDIAN__
311 c = max_align(ea);
312 #endif
313 if (c > nb)
314 c = max_align(nb);
315 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
316 if (err)
317 return err;
318 ea += c;
320 return 0;
324 * Write memory at address ea for nb bytes, return 0 for success
325 * or -EFAULT if an error occurred.
327 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
328 struct pt_regs *regs)
330 if (!address_ok(regs, ea, nb))
331 return -EFAULT;
332 if ((ea & (nb - 1)) == 0)
333 return write_mem_aligned(val, ea, nb);
334 return write_mem_unaligned(val, ea, nb, regs);
337 #ifdef CONFIG_PPC_FPU
339 * Check the address and alignment, and call func to do the actual
340 * load or store.
342 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
343 unsigned long ea, int nb,
344 struct pt_regs *regs)
346 int err;
347 union {
348 double dbl;
349 unsigned long ul[2];
350 struct {
351 #ifdef __BIG_ENDIAN__
352 unsigned _pad_;
353 unsigned word;
354 #endif
355 #ifdef __LITTLE_ENDIAN__
356 unsigned word;
357 unsigned _pad_;
358 #endif
359 } single;
360 } data;
361 unsigned long ptr;
363 if (!address_ok(regs, ea, nb))
364 return -EFAULT;
365 if ((ea & 3) == 0)
366 return (*func)(rn, ea);
367 ptr = (unsigned long) &data.ul;
368 if (sizeof(unsigned long) == 8 || nb == 4) {
369 err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
370 if (nb == 4)
371 ptr = (unsigned long)&(data.single.word);
372 } else {
373 /* reading a double on 32-bit */
374 err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
375 if (!err)
376 err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
378 if (err)
379 return err;
380 return (*func)(rn, ptr);
383 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
384 unsigned long ea, int nb,
385 struct pt_regs *regs)
387 int err;
388 union {
389 double dbl;
390 unsigned long ul[2];
391 struct {
392 #ifdef __BIG_ENDIAN__
393 unsigned _pad_;
394 unsigned word;
395 #endif
396 #ifdef __LITTLE_ENDIAN__
397 unsigned word;
398 unsigned _pad_;
399 #endif
400 } single;
401 } data;
402 unsigned long ptr;
404 if (!address_ok(regs, ea, nb))
405 return -EFAULT;
406 if ((ea & 3) == 0)
407 return (*func)(rn, ea);
408 ptr = (unsigned long) &data.ul[0];
409 if (sizeof(unsigned long) == 8 || nb == 4) {
410 if (nb == 4)
411 ptr = (unsigned long)&(data.single.word);
412 err = (*func)(rn, ptr);
413 if (err)
414 return err;
415 err = write_mem_unaligned(data.ul[0], ea, nb, regs);
416 } else {
417 /* writing a double on 32-bit */
418 err = (*func)(rn, ptr);
419 if (err)
420 return err;
421 err = write_mem_unaligned(data.ul[0], ea, 4, regs);
422 if (!err)
423 err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
425 return err;
427 #endif
429 #ifdef CONFIG_ALTIVEC
430 /* For Altivec/VMX, no need to worry about alignment */
431 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
432 unsigned long ea, struct pt_regs *regs)
434 if (!address_ok(regs, ea & ~0xfUL, 16))
435 return -EFAULT;
436 return (*func)(rn, ea);
439 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
440 unsigned long ea, struct pt_regs *regs)
442 if (!address_ok(regs, ea & ~0xfUL, 16))
443 return -EFAULT;
444 return (*func)(rn, ea);
446 #endif /* CONFIG_ALTIVEC */
448 #ifdef CONFIG_VSX
449 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
450 unsigned long ea, struct pt_regs *regs)
452 int err;
453 unsigned long val[2];
455 if (!address_ok(regs, ea, 16))
456 return -EFAULT;
457 if ((ea & 3) == 0)
458 return (*func)(rn, ea);
459 err = read_mem_unaligned(&val[0], ea, 8, regs);
460 if (!err)
461 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
462 if (!err)
463 err = (*func)(rn, (unsigned long) &val[0]);
464 return err;
467 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
468 unsigned long ea, struct pt_regs *regs)
470 int err;
471 unsigned long val[2];
473 if (!address_ok(regs, ea, 16))
474 return -EFAULT;
475 if ((ea & 3) == 0)
476 return (*func)(rn, ea);
477 err = (*func)(rn, (unsigned long) &val[0]);
478 if (err)
479 return err;
480 err = write_mem_unaligned(val[0], ea, 8, regs);
481 if (!err)
482 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
483 return err;
485 #endif /* CONFIG_VSX */
487 #define __put_user_asmx(x, addr, err, op, cr) \
488 __asm__ __volatile__( \
489 "1: " op " %2,0,%3\n" \
490 " mfcr %1\n" \
491 "2:\n" \
492 ".section .fixup,\"ax\"\n" \
493 "3: li %0,%4\n" \
494 " b 2b\n" \
495 ".previous\n" \
496 ".section __ex_table,\"a\"\n" \
497 PPC_LONG_ALIGN "\n" \
498 PPC_LONG "1b,3b\n" \
499 ".previous" \
500 : "=r" (err), "=r" (cr) \
501 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
503 #define __get_user_asmx(x, addr, err, op) \
504 __asm__ __volatile__( \
505 "1: "op" %1,0,%2\n" \
506 "2:\n" \
507 ".section .fixup,\"ax\"\n" \
508 "3: li %0,%3\n" \
509 " b 2b\n" \
510 ".previous\n" \
511 ".section __ex_table,\"a\"\n" \
512 PPC_LONG_ALIGN "\n" \
513 PPC_LONG "1b,3b\n" \
514 ".previous" \
515 : "=r" (err), "=r" (x) \
516 : "r" (addr), "i" (-EFAULT), "0" (err))
518 #define __cacheop_user_asmx(addr, err, op) \
519 __asm__ __volatile__( \
520 "1: "op" 0,%1\n" \
521 "2:\n" \
522 ".section .fixup,\"ax\"\n" \
523 "3: li %0,%3\n" \
524 " b 2b\n" \
525 ".previous\n" \
526 ".section __ex_table,\"a\"\n" \
527 PPC_LONG_ALIGN "\n" \
528 PPC_LONG "1b,3b\n" \
529 ".previous" \
530 : "=r" (err) \
531 : "r" (addr), "i" (-EFAULT), "0" (err))
533 static void __kprobes set_cr0(struct pt_regs *regs, int rd)
535 long val = regs->gpr[rd];
537 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
538 #ifdef __powerpc64__
539 if (!(regs->msr & MSR_64BIT))
540 val = (int) val;
541 #endif
542 if (val < 0)
543 regs->ccr |= 0x80000000;
544 else if (val > 0)
545 regs->ccr |= 0x40000000;
546 else
547 regs->ccr |= 0x20000000;
550 static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
551 unsigned long val1, unsigned long val2,
552 unsigned long carry_in)
554 unsigned long val = val1 + val2;
556 if (carry_in)
557 ++val;
558 regs->gpr[rd] = val;
559 #ifdef __powerpc64__
560 if (!(regs->msr & MSR_64BIT)) {
561 val = (unsigned int) val;
562 val1 = (unsigned int) val1;
564 #endif
565 if (val < val1 || (carry_in && val == val1))
566 regs->xer |= XER_CA;
567 else
568 regs->xer &= ~XER_CA;
571 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
572 int crfld)
574 unsigned int crval, shift;
576 crval = (regs->xer >> 31) & 1; /* get SO bit */
577 if (v1 < v2)
578 crval |= 8;
579 else if (v1 > v2)
580 crval |= 4;
581 else
582 crval |= 2;
583 shift = (7 - crfld) * 4;
584 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
587 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
588 unsigned long v2, int crfld)
590 unsigned int crval, shift;
592 crval = (regs->xer >> 31) & 1; /* get SO bit */
593 if (v1 < v2)
594 crval |= 8;
595 else if (v1 > v2)
596 crval |= 4;
597 else
598 crval |= 2;
599 shift = (7 - crfld) * 4;
600 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
603 static int __kprobes trap_compare(long v1, long v2)
605 int ret = 0;
607 if (v1 < v2)
608 ret |= 0x10;
609 else if (v1 > v2)
610 ret |= 0x08;
611 else
612 ret |= 0x04;
613 if ((unsigned long)v1 < (unsigned long)v2)
614 ret |= 0x02;
615 else if ((unsigned long)v1 > (unsigned long)v2)
616 ret |= 0x01;
617 return ret;
621 * Elements of 32-bit rotate and mask instructions.
623 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
624 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
625 #ifdef __powerpc64__
626 #define MASK64_L(mb) (~0UL >> (mb))
627 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
628 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
629 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
630 #else
631 #define DATA32(x) (x)
632 #endif
633 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
636 * Decode an instruction, and execute it if that can be done just by
637 * modifying *regs (i.e. integer arithmetic and logical instructions,
638 * branches, and barrier instructions).
639 * Returns 1 if the instruction has been executed, or 0 if not.
640 * Sets *op to indicate what the instruction does.
642 int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
643 unsigned int instr)
645 unsigned int opcode, ra, rb, rd, spr, u;
646 unsigned long int imm;
647 unsigned long int val, val2;
648 unsigned int mb, me, sh;
649 long ival;
651 op->type = COMPUTE;
653 opcode = instr >> 26;
654 switch (opcode) {
655 case 16: /* bc */
656 op->type = BRANCH;
657 imm = (signed short)(instr & 0xfffc);
658 if ((instr & 2) == 0)
659 imm += regs->nip;
660 regs->nip += 4;
661 regs->nip = truncate_if_32bit(regs->msr, regs->nip);
662 if (instr & 1)
663 regs->link = regs->nip;
664 if (branch_taken(instr, regs))
665 regs->nip = truncate_if_32bit(regs->msr, imm);
666 return 1;
667 #ifdef CONFIG_PPC64
668 case 17: /* sc */
669 if ((instr & 0xfe2) == 2)
670 op->type = SYSCALL;
671 else
672 op->type = UNKNOWN;
673 return 0;
674 #endif
675 case 18: /* b */
676 op->type = BRANCH;
677 imm = instr & 0x03fffffc;
678 if (imm & 0x02000000)
679 imm -= 0x04000000;
680 if ((instr & 2) == 0)
681 imm += regs->nip;
682 if (instr & 1)
683 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
684 imm = truncate_if_32bit(regs->msr, imm);
685 regs->nip = imm;
686 return 1;
687 case 19:
688 switch ((instr >> 1) & 0x3ff) {
689 case 0: /* mcrf */
690 rd = 7 - ((instr >> 23) & 0x7);
691 ra = 7 - ((instr >> 18) & 0x7);
692 rd *= 4;
693 ra *= 4;
694 val = (regs->ccr >> ra) & 0xf;
695 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
696 goto instr_done;
698 case 16: /* bclr */
699 case 528: /* bcctr */
700 op->type = BRANCH;
701 imm = (instr & 0x400)? regs->ctr: regs->link;
702 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
703 imm = truncate_if_32bit(regs->msr, imm);
704 if (instr & 1)
705 regs->link = regs->nip;
706 if (branch_taken(instr, regs))
707 regs->nip = imm;
708 return 1;
710 case 18: /* rfid, scary */
711 if (regs->msr & MSR_PR)
712 goto priv;
713 op->type = RFI;
714 return 0;
716 case 150: /* isync */
717 op->type = BARRIER;
718 isync();
719 goto instr_done;
721 case 33: /* crnor */
722 case 129: /* crandc */
723 case 193: /* crxor */
724 case 225: /* crnand */
725 case 257: /* crand */
726 case 289: /* creqv */
727 case 417: /* crorc */
728 case 449: /* cror */
729 ra = (instr >> 16) & 0x1f;
730 rb = (instr >> 11) & 0x1f;
731 rd = (instr >> 21) & 0x1f;
732 ra = (regs->ccr >> (31 - ra)) & 1;
733 rb = (regs->ccr >> (31 - rb)) & 1;
734 val = (instr >> (6 + ra * 2 + rb)) & 1;
735 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
736 (val << (31 - rd));
737 goto instr_done;
739 break;
740 case 31:
741 switch ((instr >> 1) & 0x3ff) {
742 case 598: /* sync */
743 op->type = BARRIER;
744 #ifdef __powerpc64__
745 switch ((instr >> 21) & 3) {
746 case 1: /* lwsync */
747 asm volatile("lwsync" : : : "memory");
748 goto instr_done;
749 case 2: /* ptesync */
750 asm volatile("ptesync" : : : "memory");
751 goto instr_done;
753 #endif
754 mb();
755 goto instr_done;
757 case 854: /* eieio */
758 op->type = BARRIER;
759 eieio();
760 goto instr_done;
762 break;
765 /* Following cases refer to regs->gpr[], so we need all regs */
766 if (!FULL_REGS(regs))
767 return 0;
769 rd = (instr >> 21) & 0x1f;
770 ra = (instr >> 16) & 0x1f;
771 rb = (instr >> 11) & 0x1f;
773 switch (opcode) {
774 #ifdef __powerpc64__
775 case 2: /* tdi */
776 if (rd & trap_compare(regs->gpr[ra], (short) instr))
777 goto trap;
778 goto instr_done;
779 #endif
780 case 3: /* twi */
781 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
782 goto trap;
783 goto instr_done;
785 case 7: /* mulli */
786 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
787 goto instr_done;
789 case 8: /* subfic */
790 imm = (short) instr;
791 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
792 goto instr_done;
794 case 10: /* cmpli */
795 imm = (unsigned short) instr;
796 val = regs->gpr[ra];
797 #ifdef __powerpc64__
798 if ((rd & 1) == 0)
799 val = (unsigned int) val;
800 #endif
801 do_cmp_unsigned(regs, val, imm, rd >> 2);
802 goto instr_done;
804 case 11: /* cmpi */
805 imm = (short) instr;
806 val = regs->gpr[ra];
807 #ifdef __powerpc64__
808 if ((rd & 1) == 0)
809 val = (int) val;
810 #endif
811 do_cmp_signed(regs, val, imm, rd >> 2);
812 goto instr_done;
814 case 12: /* addic */
815 imm = (short) instr;
816 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
817 goto instr_done;
819 case 13: /* addic. */
820 imm = (short) instr;
821 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
822 set_cr0(regs, rd);
823 goto instr_done;
825 case 14: /* addi */
826 imm = (short) instr;
827 if (ra)
828 imm += regs->gpr[ra];
829 regs->gpr[rd] = imm;
830 goto instr_done;
832 case 15: /* addis */
833 imm = ((short) instr) << 16;
834 if (ra)
835 imm += regs->gpr[ra];
836 regs->gpr[rd] = imm;
837 goto instr_done;
839 case 20: /* rlwimi */
840 mb = (instr >> 6) & 0x1f;
841 me = (instr >> 1) & 0x1f;
842 val = DATA32(regs->gpr[rd]);
843 imm = MASK32(mb, me);
844 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
845 goto logical_done;
847 case 21: /* rlwinm */
848 mb = (instr >> 6) & 0x1f;
849 me = (instr >> 1) & 0x1f;
850 val = DATA32(regs->gpr[rd]);
851 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
852 goto logical_done;
854 case 23: /* rlwnm */
855 mb = (instr >> 6) & 0x1f;
856 me = (instr >> 1) & 0x1f;
857 rb = regs->gpr[rb] & 0x1f;
858 val = DATA32(regs->gpr[rd]);
859 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
860 goto logical_done;
862 case 24: /* ori */
863 imm = (unsigned short) instr;
864 regs->gpr[ra] = regs->gpr[rd] | imm;
865 goto instr_done;
867 case 25: /* oris */
868 imm = (unsigned short) instr;
869 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
870 goto instr_done;
872 case 26: /* xori */
873 imm = (unsigned short) instr;
874 regs->gpr[ra] = regs->gpr[rd] ^ imm;
875 goto instr_done;
877 case 27: /* xoris */
878 imm = (unsigned short) instr;
879 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
880 goto instr_done;
882 case 28: /* andi. */
883 imm = (unsigned short) instr;
884 regs->gpr[ra] = regs->gpr[rd] & imm;
885 set_cr0(regs, ra);
886 goto instr_done;
888 case 29: /* andis. */
889 imm = (unsigned short) instr;
890 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
891 set_cr0(regs, ra);
892 goto instr_done;
894 #ifdef __powerpc64__
895 case 30: /* rld* */
896 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
897 val = regs->gpr[rd];
898 if ((instr & 0x10) == 0) {
899 sh = rb | ((instr & 2) << 4);
900 val = ROTATE(val, sh);
901 switch ((instr >> 2) & 3) {
902 case 0: /* rldicl */
903 regs->gpr[ra] = val & MASK64_L(mb);
904 goto logical_done;
905 case 1: /* rldicr */
906 regs->gpr[ra] = val & MASK64_R(mb);
907 goto logical_done;
908 case 2: /* rldic */
909 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
910 goto logical_done;
911 case 3: /* rldimi */
912 imm = MASK64(mb, 63 - sh);
913 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
914 (val & imm);
915 goto logical_done;
917 } else {
918 sh = regs->gpr[rb] & 0x3f;
919 val = ROTATE(val, sh);
920 switch ((instr >> 1) & 7) {
921 case 0: /* rldcl */
922 regs->gpr[ra] = val & MASK64_L(mb);
923 goto logical_done;
924 case 1: /* rldcr */
925 regs->gpr[ra] = val & MASK64_R(mb);
926 goto logical_done;
929 #endif
930 break; /* illegal instruction */
932 case 31:
933 switch ((instr >> 1) & 0x3ff) {
934 case 4: /* tw */
935 if (rd == 0x1f ||
936 (rd & trap_compare((int)regs->gpr[ra],
937 (int)regs->gpr[rb])))
938 goto trap;
939 goto instr_done;
940 #ifdef __powerpc64__
941 case 68: /* td */
942 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
943 goto trap;
944 goto instr_done;
945 #endif
946 case 83: /* mfmsr */
947 if (regs->msr & MSR_PR)
948 goto priv;
949 op->type = MFMSR;
950 op->reg = rd;
951 return 0;
952 case 146: /* mtmsr */
953 if (regs->msr & MSR_PR)
954 goto priv;
955 op->type = MTMSR;
956 op->reg = rd;
957 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
958 return 0;
959 #ifdef CONFIG_PPC64
960 case 178: /* mtmsrd */
961 if (regs->msr & MSR_PR)
962 goto priv;
963 op->type = MTMSR;
964 op->reg = rd;
965 /* only MSR_EE and MSR_RI get changed if bit 15 set */
966 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
967 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
968 op->val = imm;
969 return 0;
970 #endif
972 case 19: /* mfcr */
973 if ((instr >> 20) & 1) {
974 imm = 0xf0000000UL;
975 for (sh = 0; sh < 8; ++sh) {
976 if (instr & (0x80000 >> sh)) {
977 regs->gpr[rd] = regs->ccr & imm;
978 break;
980 imm >>= 4;
983 goto instr_done;
986 regs->gpr[rd] = regs->ccr;
987 regs->gpr[rd] &= 0xffffffffUL;
988 goto instr_done;
990 case 144: /* mtcrf */
991 imm = 0xf0000000UL;
992 val = regs->gpr[rd];
993 for (sh = 0; sh < 8; ++sh) {
994 if (instr & (0x80000 >> sh))
995 regs->ccr = (regs->ccr & ~imm) |
996 (val & imm);
997 imm >>= 4;
999 goto instr_done;
1001 case 339: /* mfspr */
1002 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1003 switch (spr) {
1004 case SPRN_XER: /* mfxer */
1005 regs->gpr[rd] = regs->xer;
1006 regs->gpr[rd] &= 0xffffffffUL;
1007 goto instr_done;
1008 case SPRN_LR: /* mflr */
1009 regs->gpr[rd] = regs->link;
1010 goto instr_done;
1011 case SPRN_CTR: /* mfctr */
1012 regs->gpr[rd] = regs->ctr;
1013 goto instr_done;
1014 default:
1015 op->type = MFSPR;
1016 op->reg = rd;
1017 op->spr = spr;
1018 return 0;
1020 break;
1022 case 467: /* mtspr */
1023 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1024 switch (spr) {
1025 case SPRN_XER: /* mtxer */
1026 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
1027 goto instr_done;
1028 case SPRN_LR: /* mtlr */
1029 regs->link = regs->gpr[rd];
1030 goto instr_done;
1031 case SPRN_CTR: /* mtctr */
1032 regs->ctr = regs->gpr[rd];
1033 goto instr_done;
1034 default:
1035 op->type = MTSPR;
1036 op->val = regs->gpr[rd];
1037 op->spr = spr;
1038 return 0;
1040 break;
1043 * Compare instructions
1045 case 0: /* cmp */
1046 val = regs->gpr[ra];
1047 val2 = regs->gpr[rb];
1048 #ifdef __powerpc64__
1049 if ((rd & 1) == 0) {
1050 /* word (32-bit) compare */
1051 val = (int) val;
1052 val2 = (int) val2;
1054 #endif
1055 do_cmp_signed(regs, val, val2, rd >> 2);
1056 goto instr_done;
1058 case 32: /* cmpl */
1059 val = regs->gpr[ra];
1060 val2 = regs->gpr[rb];
1061 #ifdef __powerpc64__
1062 if ((rd & 1) == 0) {
1063 /* word (32-bit) compare */
1064 val = (unsigned int) val;
1065 val2 = (unsigned int) val2;
1067 #endif
1068 do_cmp_unsigned(regs, val, val2, rd >> 2);
1069 goto instr_done;
1072 * Arithmetic instructions
1074 case 8: /* subfc */
1075 add_with_carry(regs, rd, ~regs->gpr[ra],
1076 regs->gpr[rb], 1);
1077 goto arith_done;
1078 #ifdef __powerpc64__
1079 case 9: /* mulhdu */
1080 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1081 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1082 goto arith_done;
1083 #endif
1084 case 10: /* addc */
1085 add_with_carry(regs, rd, regs->gpr[ra],
1086 regs->gpr[rb], 0);
1087 goto arith_done;
1089 case 11: /* mulhwu */
1090 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1091 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1092 goto arith_done;
1094 case 40: /* subf */
1095 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
1096 goto arith_done;
1097 #ifdef __powerpc64__
1098 case 73: /* mulhd */
1099 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
1100 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1101 goto arith_done;
1102 #endif
1103 case 75: /* mulhw */
1104 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
1105 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1106 goto arith_done;
1108 case 104: /* neg */
1109 regs->gpr[rd] = -regs->gpr[ra];
1110 goto arith_done;
1112 case 136: /* subfe */
1113 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
1114 regs->xer & XER_CA);
1115 goto arith_done;
1117 case 138: /* adde */
1118 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1119 regs->xer & XER_CA);
1120 goto arith_done;
1122 case 200: /* subfze */
1123 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1124 regs->xer & XER_CA);
1125 goto arith_done;
1127 case 202: /* addze */
1128 add_with_carry(regs, rd, regs->gpr[ra], 0L,
1129 regs->xer & XER_CA);
1130 goto arith_done;
1132 case 232: /* subfme */
1133 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1134 regs->xer & XER_CA);
1135 goto arith_done;
1136 #ifdef __powerpc64__
1137 case 233: /* mulld */
1138 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1139 goto arith_done;
1140 #endif
1141 case 234: /* addme */
1142 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1143 regs->xer & XER_CA);
1144 goto arith_done;
1146 case 235: /* mullw */
1147 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1148 (unsigned int) regs->gpr[rb];
1149 goto arith_done;
1151 case 266: /* add */
1152 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1153 goto arith_done;
1154 #ifdef __powerpc64__
1155 case 457: /* divdu */
1156 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1157 goto arith_done;
1158 #endif
1159 case 459: /* divwu */
1160 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1161 (unsigned int) regs->gpr[rb];
1162 goto arith_done;
1163 #ifdef __powerpc64__
1164 case 489: /* divd */
1165 regs->gpr[rd] = (long int) regs->gpr[ra] /
1166 (long int) regs->gpr[rb];
1167 goto arith_done;
1168 #endif
1169 case 491: /* divw */
1170 regs->gpr[rd] = (int) regs->gpr[ra] /
1171 (int) regs->gpr[rb];
1172 goto arith_done;
1176 * Logical instructions
1178 case 26: /* cntlzw */
1179 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1180 "r" (regs->gpr[rd]));
1181 goto logical_done;
1182 #ifdef __powerpc64__
1183 case 58: /* cntlzd */
1184 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1185 "r" (regs->gpr[rd]));
1186 goto logical_done;
1187 #endif
1188 case 28: /* and */
1189 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1190 goto logical_done;
1192 case 60: /* andc */
1193 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1194 goto logical_done;
1196 case 124: /* nor */
1197 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1198 goto logical_done;
1200 case 284: /* xor */
1201 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1202 goto logical_done;
1204 case 316: /* xor */
1205 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1206 goto logical_done;
1208 case 412: /* orc */
1209 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1210 goto logical_done;
1212 case 444: /* or */
1213 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1214 goto logical_done;
1216 case 476: /* nand */
1217 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1218 goto logical_done;
1220 case 922: /* extsh */
1221 regs->gpr[ra] = (signed short) regs->gpr[rd];
1222 goto logical_done;
1224 case 954: /* extsb */
1225 regs->gpr[ra] = (signed char) regs->gpr[rd];
1226 goto logical_done;
1227 #ifdef __powerpc64__
1228 case 986: /* extsw */
1229 regs->gpr[ra] = (signed int) regs->gpr[rd];
1230 goto logical_done;
1231 #endif
1234 * Shift instructions
1236 case 24: /* slw */
1237 sh = regs->gpr[rb] & 0x3f;
1238 if (sh < 32)
1239 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1240 else
1241 regs->gpr[ra] = 0;
1242 goto logical_done;
1244 case 536: /* srw */
1245 sh = regs->gpr[rb] & 0x3f;
1246 if (sh < 32)
1247 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1248 else
1249 regs->gpr[ra] = 0;
1250 goto logical_done;
1252 case 792: /* sraw */
1253 sh = regs->gpr[rb] & 0x3f;
1254 ival = (signed int) regs->gpr[rd];
1255 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1256 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1257 regs->xer |= XER_CA;
1258 else
1259 regs->xer &= ~XER_CA;
1260 goto logical_done;
1262 case 824: /* srawi */
1263 sh = rb;
1264 ival = (signed int) regs->gpr[rd];
1265 regs->gpr[ra] = ival >> sh;
1266 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1267 regs->xer |= XER_CA;
1268 else
1269 regs->xer &= ~XER_CA;
1270 goto logical_done;
1272 #ifdef __powerpc64__
1273 case 27: /* sld */
1274 sh = regs->gpr[rb] & 0x7f;
1275 if (sh < 64)
1276 regs->gpr[ra] = regs->gpr[rd] << sh;
1277 else
1278 regs->gpr[ra] = 0;
1279 goto logical_done;
1281 case 539: /* srd */
1282 sh = regs->gpr[rb] & 0x7f;
1283 if (sh < 64)
1284 regs->gpr[ra] = regs->gpr[rd] >> sh;
1285 else
1286 regs->gpr[ra] = 0;
1287 goto logical_done;
1289 case 794: /* srad */
1290 sh = regs->gpr[rb] & 0x7f;
1291 ival = (signed long int) regs->gpr[rd];
1292 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1293 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1294 regs->xer |= XER_CA;
1295 else
1296 regs->xer &= ~XER_CA;
1297 goto logical_done;
1299 case 826: /* sradi with sh_5 = 0 */
1300 case 827: /* sradi with sh_5 = 1 */
1301 sh = rb | ((instr & 2) << 4);
1302 ival = (signed long int) regs->gpr[rd];
1303 regs->gpr[ra] = ival >> sh;
1304 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1305 regs->xer |= XER_CA;
1306 else
1307 regs->xer &= ~XER_CA;
1308 goto logical_done;
1309 #endif /* __powerpc64__ */
1312 * Cache instructions
1314 case 54: /* dcbst */
1315 op->type = MKOP(CACHEOP, DCBST, 0);
1316 op->ea = xform_ea(instr, regs);
1317 return 0;
1319 case 86: /* dcbf */
1320 op->type = MKOP(CACHEOP, DCBF, 0);
1321 op->ea = xform_ea(instr, regs);
1322 return 0;
1324 case 246: /* dcbtst */
1325 op->type = MKOP(CACHEOP, DCBTST, 0);
1326 op->ea = xform_ea(instr, regs);
1327 op->reg = rd;
1328 return 0;
1330 case 278: /* dcbt */
1331 op->type = MKOP(CACHEOP, DCBTST, 0);
1332 op->ea = xform_ea(instr, regs);
1333 op->reg = rd;
1334 return 0;
1336 case 982: /* icbi */
1337 op->type = MKOP(CACHEOP, ICBI, 0);
1338 op->ea = xform_ea(instr, regs);
1339 return 0;
1341 break;
1345 * Loads and stores.
1347 op->type = UNKNOWN;
1348 op->update_reg = ra;
1349 op->reg = rd;
1350 op->val = regs->gpr[rd];
1351 u = (instr >> 20) & UPDATE;
1353 switch (opcode) {
1354 case 31:
1355 u = instr & UPDATE;
1356 op->ea = xform_ea(instr, regs);
1357 switch ((instr >> 1) & 0x3ff) {
1358 case 20: /* lwarx */
1359 op->type = MKOP(LARX, 0, 4);
1360 break;
1362 case 150: /* stwcx. */
1363 op->type = MKOP(STCX, 0, 4);
1364 break;
1366 #ifdef __powerpc64__
1367 case 84: /* ldarx */
1368 op->type = MKOP(LARX, 0, 8);
1369 break;
1371 case 214: /* stdcx. */
1372 op->type = MKOP(STCX, 0, 8);
1373 break;
1375 case 21: /* ldx */
1376 case 53: /* ldux */
1377 op->type = MKOP(LOAD, u, 8);
1378 break;
1379 #endif
1381 case 23: /* lwzx */
1382 case 55: /* lwzux */
1383 op->type = MKOP(LOAD, u, 4);
1384 break;
1386 case 87: /* lbzx */
1387 case 119: /* lbzux */
1388 op->type = MKOP(LOAD, u, 1);
1389 break;
1391 #ifdef CONFIG_ALTIVEC
1392 case 103: /* lvx */
1393 case 359: /* lvxl */
1394 if (!(regs->msr & MSR_VEC))
1395 goto vecunavail;
1396 op->type = MKOP(LOAD_VMX, 0, 16);
1397 break;
1399 case 231: /* stvx */
1400 case 487: /* stvxl */
1401 if (!(regs->msr & MSR_VEC))
1402 goto vecunavail;
1403 op->type = MKOP(STORE_VMX, 0, 16);
1404 break;
1405 #endif /* CONFIG_ALTIVEC */
1407 #ifdef __powerpc64__
1408 case 149: /* stdx */
1409 case 181: /* stdux */
1410 op->type = MKOP(STORE, u, 8);
1411 break;
1412 #endif
1414 case 151: /* stwx */
1415 case 183: /* stwux */
1416 op->type = MKOP(STORE, u, 4);
1417 break;
1419 case 215: /* stbx */
1420 case 247: /* stbux */
1421 op->type = MKOP(STORE, u, 1);
1422 break;
1424 case 279: /* lhzx */
1425 case 311: /* lhzux */
1426 op->type = MKOP(LOAD, u, 2);
1427 break;
1429 #ifdef __powerpc64__
1430 case 341: /* lwax */
1431 case 373: /* lwaux */
1432 op->type = MKOP(LOAD, SIGNEXT | u, 4);
1433 break;
1434 #endif
1436 case 343: /* lhax */
1437 case 375: /* lhaux */
1438 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1439 break;
1441 case 407: /* sthx */
1442 case 439: /* sthux */
1443 op->type = MKOP(STORE, u, 2);
1444 break;
1446 #ifdef __powerpc64__
1447 case 532: /* ldbrx */
1448 op->type = MKOP(LOAD, BYTEREV, 8);
1449 break;
1451 #endif
1452 case 533: /* lswx */
1453 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1454 break;
1456 case 534: /* lwbrx */
1457 op->type = MKOP(LOAD, BYTEREV, 4);
1458 break;
1460 case 597: /* lswi */
1461 if (rb == 0)
1462 rb = 32; /* # bytes to load */
1463 op->type = MKOP(LOAD_MULTI, 0, rb);
1464 op->ea = 0;
1465 if (ra)
1466 op->ea = truncate_if_32bit(regs->msr,
1467 regs->gpr[ra]);
1468 break;
1470 #ifdef CONFIG_PPC_FPU
1471 case 535: /* lfsx */
1472 case 567: /* lfsux */
1473 if (!(regs->msr & MSR_FP))
1474 goto fpunavail;
1475 op->type = MKOP(LOAD_FP, u, 4);
1476 break;
1478 case 599: /* lfdx */
1479 case 631: /* lfdux */
1480 if (!(regs->msr & MSR_FP))
1481 goto fpunavail;
1482 op->type = MKOP(LOAD_FP, u, 8);
1483 break;
1485 case 663: /* stfsx */
1486 case 695: /* stfsux */
1487 if (!(regs->msr & MSR_FP))
1488 goto fpunavail;
1489 op->type = MKOP(STORE_FP, u, 4);
1490 break;
1492 case 727: /* stfdx */
1493 case 759: /* stfdux */
1494 if (!(regs->msr & MSR_FP))
1495 goto fpunavail;
1496 op->type = MKOP(STORE_FP, u, 8);
1497 break;
1498 #endif
1500 #ifdef __powerpc64__
1501 case 660: /* stdbrx */
1502 op->type = MKOP(STORE, BYTEREV, 8);
1503 op->val = byterev_8(regs->gpr[rd]);
1504 break;
1506 #endif
1507 case 661: /* stswx */
1508 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1509 break;
1511 case 662: /* stwbrx */
1512 op->type = MKOP(STORE, BYTEREV, 4);
1513 op->val = byterev_4(regs->gpr[rd]);
1514 break;
1516 case 725:
1517 if (rb == 0)
1518 rb = 32; /* # bytes to store */
1519 op->type = MKOP(STORE_MULTI, 0, rb);
1520 op->ea = 0;
1521 if (ra)
1522 op->ea = truncate_if_32bit(regs->msr,
1523 regs->gpr[ra]);
1524 break;
1526 case 790: /* lhbrx */
1527 op->type = MKOP(LOAD, BYTEREV, 2);
1528 break;
1530 case 918: /* sthbrx */
1531 op->type = MKOP(STORE, BYTEREV, 2);
1532 op->val = byterev_2(regs->gpr[rd]);
1533 break;
1535 #ifdef CONFIG_VSX
1536 case 844: /* lxvd2x */
1537 case 876: /* lxvd2ux */
1538 if (!(regs->msr & MSR_VSX))
1539 goto vsxunavail;
1540 op->reg = rd | ((instr & 1) << 5);
1541 op->type = MKOP(LOAD_VSX, u, 16);
1542 break;
1544 case 972: /* stxvd2x */
1545 case 1004: /* stxvd2ux */
1546 if (!(regs->msr & MSR_VSX))
1547 goto vsxunavail;
1548 op->reg = rd | ((instr & 1) << 5);
1549 op->type = MKOP(STORE_VSX, u, 16);
1550 break;
1552 #endif /* CONFIG_VSX */
1554 break;
1556 case 32: /* lwz */
1557 case 33: /* lwzu */
1558 op->type = MKOP(LOAD, u, 4);
1559 op->ea = dform_ea(instr, regs);
1560 break;
1562 case 34: /* lbz */
1563 case 35: /* lbzu */
1564 op->type = MKOP(LOAD, u, 1);
1565 op->ea = dform_ea(instr, regs);
1566 break;
1568 case 36: /* stw */
1569 case 37: /* stwu */
1570 op->type = MKOP(STORE, u, 4);
1571 op->ea = dform_ea(instr, regs);
1572 break;
1574 case 38: /* stb */
1575 case 39: /* stbu */
1576 op->type = MKOP(STORE, u, 1);
1577 op->ea = dform_ea(instr, regs);
1578 break;
1580 case 40: /* lhz */
1581 case 41: /* lhzu */
1582 op->type = MKOP(LOAD, u, 2);
1583 op->ea = dform_ea(instr, regs);
1584 break;
1586 case 42: /* lha */
1587 case 43: /* lhau */
1588 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1589 op->ea = dform_ea(instr, regs);
1590 break;
1592 case 44: /* sth */
1593 case 45: /* sthu */
1594 op->type = MKOP(STORE, u, 2);
1595 op->ea = dform_ea(instr, regs);
1596 break;
1598 case 46: /* lmw */
1599 if (ra >= rd)
1600 break; /* invalid form, ra in range to load */
1601 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
1602 op->ea = dform_ea(instr, regs);
1603 break;
1605 case 47: /* stmw */
1606 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
1607 op->ea = dform_ea(instr, regs);
1608 break;
1610 #ifdef CONFIG_PPC_FPU
1611 case 48: /* lfs */
1612 case 49: /* lfsu */
1613 if (!(regs->msr & MSR_FP))
1614 goto fpunavail;
1615 op->type = MKOP(LOAD_FP, u, 4);
1616 op->ea = dform_ea(instr, regs);
1617 break;
1619 case 50: /* lfd */
1620 case 51: /* lfdu */
1621 if (!(regs->msr & MSR_FP))
1622 goto fpunavail;
1623 op->type = MKOP(LOAD_FP, u, 8);
1624 op->ea = dform_ea(instr, regs);
1625 break;
1627 case 52: /* stfs */
1628 case 53: /* stfsu */
1629 if (!(regs->msr & MSR_FP))
1630 goto fpunavail;
1631 op->type = MKOP(STORE_FP, u, 4);
1632 op->ea = dform_ea(instr, regs);
1633 break;
1635 case 54: /* stfd */
1636 case 55: /* stfdu */
1637 if (!(regs->msr & MSR_FP))
1638 goto fpunavail;
1639 op->type = MKOP(STORE_FP, u, 8);
1640 op->ea = dform_ea(instr, regs);
1641 break;
1642 #endif
1644 #ifdef __powerpc64__
1645 case 58: /* ld[u], lwa */
1646 op->ea = dsform_ea(instr, regs);
1647 switch (instr & 3) {
1648 case 0: /* ld */
1649 op->type = MKOP(LOAD, 0, 8);
1650 break;
1651 case 1: /* ldu */
1652 op->type = MKOP(LOAD, UPDATE, 8);
1653 break;
1654 case 2: /* lwa */
1655 op->type = MKOP(LOAD, SIGNEXT, 4);
1656 break;
1658 break;
1660 case 62: /* std[u] */
1661 op->ea = dsform_ea(instr, regs);
1662 switch (instr & 3) {
1663 case 0: /* std */
1664 op->type = MKOP(STORE, 0, 8);
1665 break;
1666 case 1: /* stdu */
1667 op->type = MKOP(STORE, UPDATE, 8);
1668 break;
1670 break;
1671 #endif /* __powerpc64__ */
1674 return 0;
1676 logical_done:
1677 if (instr & 1)
1678 set_cr0(regs, ra);
1679 goto instr_done;
1681 arith_done:
1682 if (instr & 1)
1683 set_cr0(regs, rd);
1685 instr_done:
1686 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1687 return 1;
1689 priv:
1690 op->type = INTERRUPT | 0x700;
1691 op->val = SRR1_PROGPRIV;
1692 return 0;
1694 trap:
1695 op->type = INTERRUPT | 0x700;
1696 op->val = SRR1_PROGTRAP;
1697 return 0;
1699 #ifdef CONFIG_PPC_FPU
1700 fpunavail:
1701 op->type = INTERRUPT | 0x800;
1702 return 0;
1703 #endif
1705 #ifdef CONFIG_ALTIVEC
1706 vecunavail:
1707 op->type = INTERRUPT | 0xf20;
1708 return 0;
1709 #endif
1711 #ifdef CONFIG_VSX
1712 vsxunavail:
1713 op->type = INTERRUPT | 0xf40;
1714 return 0;
1715 #endif
1717 EXPORT_SYMBOL_GPL(analyse_instr);
1720 * For PPC32 we always use stwu with r1 to change the stack pointer.
1721 * So this emulated store may corrupt the exception frame, now we
1722 * have to provide the exception frame trampoline, which is pushed
1723 * below the kprobed function stack. So we only update gpr[1] but
1724 * don't emulate the real store operation. We will do real store
1725 * operation safely in exception return code by checking this flag.
1727 static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
1729 #ifdef CONFIG_PPC32
1731 * Check if we will touch kernel stack overflow
1733 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1734 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
1735 return -EINVAL;
1737 #endif /* CONFIG_PPC32 */
1739 * Check if we already set since that means we'll
1740 * lose the previous value.
1742 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1743 set_thread_flag(TIF_EMULATE_STACK_STORE);
1744 return 0;
1747 static __kprobes void do_signext(unsigned long *valp, int size)
1749 switch (size) {
1750 case 2:
1751 *valp = (signed short) *valp;
1752 break;
1753 case 4:
1754 *valp = (signed int) *valp;
1755 break;
1759 static __kprobes void do_byterev(unsigned long *valp, int size)
1761 switch (size) {
1762 case 2:
1763 *valp = byterev_2(*valp);
1764 break;
1765 case 4:
1766 *valp = byterev_4(*valp);
1767 break;
1768 #ifdef __powerpc64__
1769 case 8:
1770 *valp = byterev_8(*valp);
1771 break;
1772 #endif
1777 * Emulate instructions that cause a transfer of control,
1778 * loads and stores, and a few other instructions.
1779 * Returns 1 if the step was emulated, 0 if not,
1780 * or -1 if the instruction is one that should not be stepped,
1781 * such as an rfid, or a mtmsrd that would clear MSR_RI.
1783 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1785 struct instruction_op op;
1786 int r, err, size;
1787 unsigned long val;
1788 unsigned int cr;
1789 int i, rd, nb;
1791 r = analyse_instr(&op, regs, instr);
1792 if (r != 0)
1793 return r;
1795 err = 0;
1796 size = GETSIZE(op.type);
1797 switch (op.type & INSTR_TYPE_MASK) {
1798 case CACHEOP:
1799 if (!address_ok(regs, op.ea, 8))
1800 return 0;
1801 switch (op.type & CACHEOP_MASK) {
1802 case DCBST:
1803 __cacheop_user_asmx(op.ea, err, "dcbst");
1804 break;
1805 case DCBF:
1806 __cacheop_user_asmx(op.ea, err, "dcbf");
1807 break;
1808 case DCBTST:
1809 if (op.reg == 0)
1810 prefetchw((void *) op.ea);
1811 break;
1812 case DCBT:
1813 if (op.reg == 0)
1814 prefetch((void *) op.ea);
1815 break;
1816 case ICBI:
1817 __cacheop_user_asmx(op.ea, err, "icbi");
1818 break;
1820 if (err)
1821 return 0;
1822 goto instr_done;
1824 case LARX:
1825 if (op.ea & (size - 1))
1826 break; /* can't handle misaligned */
1827 err = -EFAULT;
1828 if (!address_ok(regs, op.ea, size))
1829 goto ldst_done;
1830 err = 0;
1831 switch (size) {
1832 case 4:
1833 __get_user_asmx(val, op.ea, err, "lwarx");
1834 break;
1835 #ifdef __powerpc64__
1836 case 8:
1837 __get_user_asmx(val, op.ea, err, "ldarx");
1838 break;
1839 #endif
1840 default:
1841 return 0;
1843 if (!err)
1844 regs->gpr[op.reg] = val;
1845 goto ldst_done;
1847 case STCX:
1848 if (op.ea & (size - 1))
1849 break; /* can't handle misaligned */
1850 err = -EFAULT;
1851 if (!address_ok(regs, op.ea, size))
1852 goto ldst_done;
1853 err = 0;
1854 switch (size) {
1855 case 4:
1856 __put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
1857 break;
1858 #ifdef __powerpc64__
1859 case 8:
1860 __put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
1861 break;
1862 #endif
1863 default:
1864 return 0;
1866 if (!err)
1867 regs->ccr = (regs->ccr & 0x0fffffff) |
1868 (cr & 0xe0000000) |
1869 ((regs->xer >> 3) & 0x10000000);
1870 goto ldst_done;
1872 case LOAD:
1873 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
1874 if (!err) {
1875 if (op.type & SIGNEXT)
1876 do_signext(&regs->gpr[op.reg], size);
1877 if (op.type & BYTEREV)
1878 do_byterev(&regs->gpr[op.reg], size);
1880 goto ldst_done;
1882 #ifdef CONFIG_PPC_FPU
1883 case LOAD_FP:
1884 if (size == 4)
1885 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1886 else
1887 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
1888 goto ldst_done;
1889 #endif
1890 #ifdef CONFIG_ALTIVEC
1891 case LOAD_VMX:
1892 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1893 goto ldst_done;
1894 #endif
1895 #ifdef CONFIG_VSX
1896 case LOAD_VSX:
1897 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1898 goto ldst_done;
1899 #endif
1900 case LOAD_MULTI:
1901 if (regs->msr & MSR_LE)
1902 return 0;
1903 rd = op.reg;
1904 for (i = 0; i < size; i += 4) {
1905 nb = size - i;
1906 if (nb > 4)
1907 nb = 4;
1908 err = read_mem(&regs->gpr[rd], op.ea, nb, regs);
1909 if (err)
1910 return 0;
1911 if (nb < 4) /* left-justify last bytes */
1912 regs->gpr[rd] <<= 32 - 8 * nb;
1913 op.ea += 4;
1914 ++rd;
1916 goto instr_done;
1918 case STORE:
1919 if ((op.type & UPDATE) && size == sizeof(long) &&
1920 op.reg == 1 && op.update_reg == 1 &&
1921 !(regs->msr & MSR_PR) &&
1922 op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
1923 err = handle_stack_update(op.ea, regs);
1924 goto ldst_done;
1926 err = write_mem(op.val, op.ea, size, regs);
1927 goto ldst_done;
1929 #ifdef CONFIG_PPC_FPU
1930 case STORE_FP:
1931 if (size == 4)
1932 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1933 else
1934 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
1935 goto ldst_done;
1936 #endif
1937 #ifdef CONFIG_ALTIVEC
1938 case STORE_VMX:
1939 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1940 goto ldst_done;
1941 #endif
1942 #ifdef CONFIG_VSX
1943 case STORE_VSX:
1944 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1945 goto ldst_done;
1946 #endif
1947 case STORE_MULTI:
1948 if (regs->msr & MSR_LE)
1949 return 0;
1950 rd = op.reg;
1951 for (i = 0; i < size; i += 4) {
1952 val = regs->gpr[rd];
1953 nb = size - i;
1954 if (nb > 4)
1955 nb = 4;
1956 else
1957 val >>= 32 - 8 * nb;
1958 err = write_mem(val, op.ea, nb, regs);
1959 if (err)
1960 return 0;
1961 op.ea += 4;
1962 ++rd;
1964 goto instr_done;
1966 case MFMSR:
1967 regs->gpr[op.reg] = regs->msr & MSR_MASK;
1968 goto instr_done;
1970 case MTMSR:
1971 val = regs->gpr[op.reg];
1972 if ((val & MSR_RI) == 0)
1973 /* can't step mtmsr[d] that would clear MSR_RI */
1974 return -1;
1975 /* here op.val is the mask of bits to change */
1976 regs->msr = (regs->msr & ~op.val) | (val & op.val);
1977 goto instr_done;
1979 #ifdef CONFIG_PPC64
1980 case SYSCALL: /* sc */
1982 * N.B. this uses knowledge about how the syscall
1983 * entry code works. If that is changed, this will
1984 * need to be changed also.
1986 if (regs->gpr[0] == 0x1ebe &&
1987 cpu_has_feature(CPU_FTR_REAL_LE)) {
1988 regs->msr ^= MSR_LE;
1989 goto instr_done;
1991 regs->gpr[9] = regs->gpr[13];
1992 regs->gpr[10] = MSR_KERNEL;
1993 regs->gpr[11] = regs->nip + 4;
1994 regs->gpr[12] = regs->msr & MSR_MASK;
1995 regs->gpr[13] = (unsigned long) get_paca();
1996 regs->nip = (unsigned long) &system_call_common;
1997 regs->msr = MSR_KERNEL;
1998 return 1;
2000 case RFI:
2001 return -1;
2002 #endif
2004 return 0;
2006 ldst_done:
2007 if (err)
2008 return 0;
2009 if (op.type & UPDATE)
2010 regs->gpr[op.update_reg] = op.ea;
2012 instr_done:
2013 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
2014 return 1;