staging: r8723au: move function type in line with function name
[linux/fpc-iii.git] / arch / powerpc / lib / sstep.c
blobdc885b30f7a6e0f600dc5694521dfdb11b248f93
1 /*
2 * Single-step support.
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <asm/uaccess.h>
18 #include <asm/cputable.h>
20 extern char system_call_common[];
22 #ifdef CONFIG_PPC64
23 /* Bits in SRR1 that are copied from MSR */
24 #define MSR_MASK 0xffffffff87c0ffffUL
25 #else
26 #define MSR_MASK 0x87c0ffff
27 #endif
29 /* Bits in XER */
30 #define XER_SO 0x80000000U
31 #define XER_OV 0x40000000U
32 #define XER_CA 0x20000000U
34 #ifdef CONFIG_PPC_FPU
36 * Functions in ldstfp.S
38 extern int do_lfs(int rn, unsigned long ea);
39 extern int do_lfd(int rn, unsigned long ea);
40 extern int do_stfs(int rn, unsigned long ea);
41 extern int do_stfd(int rn, unsigned long ea);
42 extern int do_lvx(int rn, unsigned long ea);
43 extern int do_stvx(int rn, unsigned long ea);
44 extern int do_lxvd2x(int rn, unsigned long ea);
45 extern int do_stxvd2x(int rn, unsigned long ea);
46 #endif
49 * Emulate the truncation of 64 bit values in 32-bit mode.
51 static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val)
53 #ifdef __powerpc64__
54 if ((msr & MSR_64BIT) == 0)
55 val &= 0xffffffffUL;
56 #endif
57 return val;
61 * Determine whether a conditional branch instruction would branch.
63 static int __kprobes branch_taken(unsigned int instr, struct pt_regs *regs)
65 unsigned int bo = (instr >> 21) & 0x1f;
66 unsigned int bi;
68 if ((bo & 4) == 0) {
69 /* decrement counter */
70 --regs->ctr;
71 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
72 return 0;
74 if ((bo & 0x10) == 0) {
75 /* check bit from CR */
76 bi = (instr >> 16) & 0x1f;
77 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
78 return 0;
80 return 1;
84 static long __kprobes address_ok(struct pt_regs *regs, unsigned long ea, int nb)
86 if (!user_mode(regs))
87 return 1;
88 return __access_ok(ea, nb, USER_DS);
92 * Calculate effective address for a D-form instruction
94 static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs)
96 int ra;
97 unsigned long ea;
99 ra = (instr >> 16) & 0x1f;
100 ea = (signed short) instr; /* sign-extend */
101 if (ra)
102 ea += regs->gpr[ra];
104 return truncate_if_32bit(regs->msr, ea);
107 #ifdef __powerpc64__
109 * Calculate effective address for a DS-form instruction
111 static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *regs)
113 int ra;
114 unsigned long ea;
116 ra = (instr >> 16) & 0x1f;
117 ea = (signed short) (instr & ~3); /* sign-extend */
118 if (ra)
119 ea += regs->gpr[ra];
121 return truncate_if_32bit(regs->msr, ea);
123 #endif /* __powerpc64 */
126 * Calculate effective address for an X-form instruction
128 static unsigned long __kprobes xform_ea(unsigned int instr,
129 struct pt_regs *regs)
131 int ra, rb;
132 unsigned long ea;
134 ra = (instr >> 16) & 0x1f;
135 rb = (instr >> 11) & 0x1f;
136 ea = regs->gpr[rb];
137 if (ra)
138 ea += regs->gpr[ra];
140 return truncate_if_32bit(regs->msr, ea);
144 * Return the largest power of 2, not greater than sizeof(unsigned long),
145 * such that x is a multiple of it.
147 static inline unsigned long max_align(unsigned long x)
149 x |= sizeof(unsigned long);
150 return x & -x; /* isolates rightmost bit */
154 static inline unsigned long byterev_2(unsigned long x)
156 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
159 static inline unsigned long byterev_4(unsigned long x)
161 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
162 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
165 #ifdef __powerpc64__
166 static inline unsigned long byterev_8(unsigned long x)
168 return (byterev_4(x) << 32) | byterev_4(x >> 32);
170 #endif
172 static int __kprobes read_mem_aligned(unsigned long *dest, unsigned long ea,
173 int nb)
175 int err = 0;
176 unsigned long x = 0;
178 switch (nb) {
179 case 1:
180 err = __get_user(x, (unsigned char __user *) ea);
181 break;
182 case 2:
183 err = __get_user(x, (unsigned short __user *) ea);
184 break;
185 case 4:
186 err = __get_user(x, (unsigned int __user *) ea);
187 break;
188 #ifdef __powerpc64__
189 case 8:
190 err = __get_user(x, (unsigned long __user *) ea);
191 break;
192 #endif
194 if (!err)
195 *dest = x;
196 return err;
199 static int __kprobes read_mem_unaligned(unsigned long *dest, unsigned long ea,
200 int nb, struct pt_regs *regs)
202 int err;
203 unsigned long x, b, c;
204 #ifdef __LITTLE_ENDIAN__
205 int len = nb; /* save a copy of the length for byte reversal */
206 #endif
208 /* unaligned, do this in pieces */
209 x = 0;
210 for (; nb > 0; nb -= c) {
211 #ifdef __LITTLE_ENDIAN__
212 c = 1;
213 #endif
214 #ifdef __BIG_ENDIAN__
215 c = max_align(ea);
216 #endif
217 if (c > nb)
218 c = max_align(nb);
219 err = read_mem_aligned(&b, ea, c);
220 if (err)
221 return err;
222 x = (x << (8 * c)) + b;
223 ea += c;
225 #ifdef __LITTLE_ENDIAN__
226 switch (len) {
227 case 2:
228 *dest = byterev_2(x);
229 break;
230 case 4:
231 *dest = byterev_4(x);
232 break;
233 #ifdef __powerpc64__
234 case 8:
235 *dest = byterev_8(x);
236 break;
237 #endif
239 #endif
240 #ifdef __BIG_ENDIAN__
241 *dest = x;
242 #endif
243 return 0;
247 * Read memory at address ea for nb bytes, return 0 for success
248 * or -EFAULT if an error occurred.
250 static int __kprobes read_mem(unsigned long *dest, unsigned long ea, int nb,
251 struct pt_regs *regs)
253 if (!address_ok(regs, ea, nb))
254 return -EFAULT;
255 if ((ea & (nb - 1)) == 0)
256 return read_mem_aligned(dest, ea, nb);
257 return read_mem_unaligned(dest, ea, nb, regs);
260 static int __kprobes write_mem_aligned(unsigned long val, unsigned long ea,
261 int nb)
263 int err = 0;
265 switch (nb) {
266 case 1:
267 err = __put_user(val, (unsigned char __user *) ea);
268 break;
269 case 2:
270 err = __put_user(val, (unsigned short __user *) ea);
271 break;
272 case 4:
273 err = __put_user(val, (unsigned int __user *) ea);
274 break;
275 #ifdef __powerpc64__
276 case 8:
277 err = __put_user(val, (unsigned long __user *) ea);
278 break;
279 #endif
281 return err;
284 static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
285 int nb, struct pt_regs *regs)
287 int err;
288 unsigned long c;
290 #ifdef __LITTLE_ENDIAN__
291 switch (nb) {
292 case 2:
293 val = byterev_2(val);
294 break;
295 case 4:
296 val = byterev_4(val);
297 break;
298 #ifdef __powerpc64__
299 case 8:
300 val = byterev_8(val);
301 break;
302 #endif
304 #endif
305 /* unaligned or little-endian, do this in pieces */
306 for (; nb > 0; nb -= c) {
307 #ifdef __LITTLE_ENDIAN__
308 c = 1;
309 #endif
310 #ifdef __BIG_ENDIAN__
311 c = max_align(ea);
312 #endif
313 if (c > nb)
314 c = max_align(nb);
315 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
316 if (err)
317 return err;
318 ea += c;
320 return 0;
324 * Write memory at address ea for nb bytes, return 0 for success
325 * or -EFAULT if an error occurred.
327 static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb,
328 struct pt_regs *regs)
330 if (!address_ok(regs, ea, nb))
331 return -EFAULT;
332 if ((ea & (nb - 1)) == 0)
333 return write_mem_aligned(val, ea, nb);
334 return write_mem_unaligned(val, ea, nb, regs);
337 #ifdef CONFIG_PPC_FPU
339 * Check the address and alignment, and call func to do the actual
340 * load or store.
342 static int __kprobes do_fp_load(int rn, int (*func)(int, unsigned long),
343 unsigned long ea, int nb,
344 struct pt_regs *regs)
346 int err;
347 union {
348 double dbl;
349 unsigned long ul[2];
350 struct {
351 #ifdef __BIG_ENDIAN__
352 unsigned _pad_;
353 unsigned word;
354 #endif
355 #ifdef __LITTLE_ENDIAN__
356 unsigned word;
357 unsigned _pad_;
358 #endif
359 } single;
360 } data;
361 unsigned long ptr;
363 if (!address_ok(regs, ea, nb))
364 return -EFAULT;
365 if ((ea & 3) == 0)
366 return (*func)(rn, ea);
367 ptr = (unsigned long) &data.ul;
368 if (sizeof(unsigned long) == 8 || nb == 4) {
369 err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
370 if (nb == 4)
371 ptr = (unsigned long)&(data.single.word);
372 } else {
373 /* reading a double on 32-bit */
374 err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
375 if (!err)
376 err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
378 if (err)
379 return err;
380 return (*func)(rn, ptr);
383 static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long),
384 unsigned long ea, int nb,
385 struct pt_regs *regs)
387 int err;
388 union {
389 double dbl;
390 unsigned long ul[2];
391 struct {
392 #ifdef __BIG_ENDIAN__
393 unsigned _pad_;
394 unsigned word;
395 #endif
396 #ifdef __LITTLE_ENDIAN__
397 unsigned word;
398 unsigned _pad_;
399 #endif
400 } single;
401 } data;
402 unsigned long ptr;
404 if (!address_ok(regs, ea, nb))
405 return -EFAULT;
406 if ((ea & 3) == 0)
407 return (*func)(rn, ea);
408 ptr = (unsigned long) &data.ul[0];
409 if (sizeof(unsigned long) == 8 || nb == 4) {
410 if (nb == 4)
411 ptr = (unsigned long)&(data.single.word);
412 err = (*func)(rn, ptr);
413 if (err)
414 return err;
415 err = write_mem_unaligned(data.ul[0], ea, nb, regs);
416 } else {
417 /* writing a double on 32-bit */
418 err = (*func)(rn, ptr);
419 if (err)
420 return err;
421 err = write_mem_unaligned(data.ul[0], ea, 4, regs);
422 if (!err)
423 err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
425 return err;
427 #endif
429 #ifdef CONFIG_ALTIVEC
430 /* For Altivec/VMX, no need to worry about alignment */
431 static int __kprobes do_vec_load(int rn, int (*func)(int, unsigned long),
432 unsigned long ea, struct pt_regs *regs)
434 if (!address_ok(regs, ea & ~0xfUL, 16))
435 return -EFAULT;
436 return (*func)(rn, ea);
439 static int __kprobes do_vec_store(int rn, int (*func)(int, unsigned long),
440 unsigned long ea, struct pt_regs *regs)
442 if (!address_ok(regs, ea & ~0xfUL, 16))
443 return -EFAULT;
444 return (*func)(rn, ea);
446 #endif /* CONFIG_ALTIVEC */
448 #ifdef CONFIG_VSX
449 static int __kprobes do_vsx_load(int rn, int (*func)(int, unsigned long),
450 unsigned long ea, struct pt_regs *regs)
452 int err;
453 unsigned long val[2];
455 if (!address_ok(regs, ea, 16))
456 return -EFAULT;
457 if ((ea & 3) == 0)
458 return (*func)(rn, ea);
459 err = read_mem_unaligned(&val[0], ea, 8, regs);
460 if (!err)
461 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
462 if (!err)
463 err = (*func)(rn, (unsigned long) &val[0]);
464 return err;
467 static int __kprobes do_vsx_store(int rn, int (*func)(int, unsigned long),
468 unsigned long ea, struct pt_regs *regs)
470 int err;
471 unsigned long val[2];
473 if (!address_ok(regs, ea, 16))
474 return -EFAULT;
475 if ((ea & 3) == 0)
476 return (*func)(rn, ea);
477 err = (*func)(rn, (unsigned long) &val[0]);
478 if (err)
479 return err;
480 err = write_mem_unaligned(val[0], ea, 8, regs);
481 if (!err)
482 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
483 return err;
485 #endif /* CONFIG_VSX */
487 #define __put_user_asmx(x, addr, err, op, cr) \
488 __asm__ __volatile__( \
489 "1: " op " %2,0,%3\n" \
490 " mfcr %1\n" \
491 "2:\n" \
492 ".section .fixup,\"ax\"\n" \
493 "3: li %0,%4\n" \
494 " b 2b\n" \
495 ".previous\n" \
496 ".section __ex_table,\"a\"\n" \
497 PPC_LONG_ALIGN "\n" \
498 PPC_LONG "1b,3b\n" \
499 ".previous" \
500 : "=r" (err), "=r" (cr) \
501 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
503 #define __get_user_asmx(x, addr, err, op) \
504 __asm__ __volatile__( \
505 "1: "op" %1,0,%2\n" \
506 "2:\n" \
507 ".section .fixup,\"ax\"\n" \
508 "3: li %0,%3\n" \
509 " b 2b\n" \
510 ".previous\n" \
511 ".section __ex_table,\"a\"\n" \
512 PPC_LONG_ALIGN "\n" \
513 PPC_LONG "1b,3b\n" \
514 ".previous" \
515 : "=r" (err), "=r" (x) \
516 : "r" (addr), "i" (-EFAULT), "0" (err))
518 #define __cacheop_user_asmx(addr, err, op) \
519 __asm__ __volatile__( \
520 "1: "op" 0,%1\n" \
521 "2:\n" \
522 ".section .fixup,\"ax\"\n" \
523 "3: li %0,%3\n" \
524 " b 2b\n" \
525 ".previous\n" \
526 ".section __ex_table,\"a\"\n" \
527 PPC_LONG_ALIGN "\n" \
528 PPC_LONG "1b,3b\n" \
529 ".previous" \
530 : "=r" (err) \
531 : "r" (addr), "i" (-EFAULT), "0" (err))
533 static void __kprobes set_cr0(struct pt_regs *regs, int rd)
535 long val = regs->gpr[rd];
537 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
538 #ifdef __powerpc64__
539 if (!(regs->msr & MSR_64BIT))
540 val = (int) val;
541 #endif
542 if (val < 0)
543 regs->ccr |= 0x80000000;
544 else if (val > 0)
545 regs->ccr |= 0x40000000;
546 else
547 regs->ccr |= 0x20000000;
550 static void __kprobes add_with_carry(struct pt_regs *regs, int rd,
551 unsigned long val1, unsigned long val2,
552 unsigned long carry_in)
554 unsigned long val = val1 + val2;
556 if (carry_in)
557 ++val;
558 regs->gpr[rd] = val;
559 #ifdef __powerpc64__
560 if (!(regs->msr & MSR_64BIT)) {
561 val = (unsigned int) val;
562 val1 = (unsigned int) val1;
564 #endif
565 if (val < val1 || (carry_in && val == val1))
566 regs->xer |= XER_CA;
567 else
568 regs->xer &= ~XER_CA;
571 static void __kprobes do_cmp_signed(struct pt_regs *regs, long v1, long v2,
572 int crfld)
574 unsigned int crval, shift;
576 crval = (regs->xer >> 31) & 1; /* get SO bit */
577 if (v1 < v2)
578 crval |= 8;
579 else if (v1 > v2)
580 crval |= 4;
581 else
582 crval |= 2;
583 shift = (7 - crfld) * 4;
584 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
587 static void __kprobes do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
588 unsigned long v2, int crfld)
590 unsigned int crval, shift;
592 crval = (regs->xer >> 31) & 1; /* get SO bit */
593 if (v1 < v2)
594 crval |= 8;
595 else if (v1 > v2)
596 crval |= 4;
597 else
598 crval |= 2;
599 shift = (7 - crfld) * 4;
600 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
603 static int __kprobes trap_compare(long v1, long v2)
605 int ret = 0;
607 if (v1 < v2)
608 ret |= 0x10;
609 else if (v1 > v2)
610 ret |= 0x08;
611 else
612 ret |= 0x04;
613 if ((unsigned long)v1 < (unsigned long)v2)
614 ret |= 0x02;
615 else if ((unsigned long)v1 > (unsigned long)v2)
616 ret |= 0x01;
617 return ret;
621 * Elements of 32-bit rotate and mask instructions.
623 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
624 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
625 #ifdef __powerpc64__
626 #define MASK64_L(mb) (~0UL >> (mb))
627 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
628 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
629 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
630 #else
631 #define DATA32(x) (x)
632 #endif
633 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
636 * Decode an instruction, and execute it if that can be done just by
637 * modifying *regs (i.e. integer arithmetic and logical instructions,
638 * branches, and barrier instructions).
639 * Returns 1 if the instruction has been executed, or 0 if not.
640 * Sets *op to indicate what the instruction does.
642 int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
643 unsigned int instr)
645 unsigned int opcode, ra, rb, rd, spr, u;
646 unsigned long int imm;
647 unsigned long int val, val2;
648 unsigned int mb, me, sh;
649 long ival;
651 op->type = COMPUTE;
653 opcode = instr >> 26;
654 switch (opcode) {
655 case 16: /* bc */
656 op->type = BRANCH;
657 imm = (signed short)(instr & 0xfffc);
658 if ((instr & 2) == 0)
659 imm += regs->nip;
660 regs->nip += 4;
661 regs->nip = truncate_if_32bit(regs->msr, regs->nip);
662 if (instr & 1)
663 regs->link = regs->nip;
664 if (branch_taken(instr, regs))
665 regs->nip = truncate_if_32bit(regs->msr, imm);
666 return 1;
667 #ifdef CONFIG_PPC64
668 case 17: /* sc */
669 if ((instr & 0xfe2) == 2)
670 op->type = SYSCALL;
671 else
672 op->type = UNKNOWN;
673 return 0;
674 #endif
675 case 18: /* b */
676 op->type = BRANCH;
677 imm = instr & 0x03fffffc;
678 if (imm & 0x02000000)
679 imm -= 0x04000000;
680 if ((instr & 2) == 0)
681 imm += regs->nip;
682 if (instr & 1)
683 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
684 imm = truncate_if_32bit(regs->msr, imm);
685 regs->nip = imm;
686 return 1;
687 case 19:
688 switch ((instr >> 1) & 0x3ff) {
689 case 0: /* mcrf */
690 rd = (instr >> 21) & 0x1c;
691 ra = (instr >> 16) & 0x1c;
692 val = (regs->ccr >> ra) & 0xf;
693 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
694 goto instr_done;
696 case 16: /* bclr */
697 case 528: /* bcctr */
698 op->type = BRANCH;
699 imm = (instr & 0x400)? regs->ctr: regs->link;
700 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
701 imm = truncate_if_32bit(regs->msr, imm);
702 if (instr & 1)
703 regs->link = regs->nip;
704 if (branch_taken(instr, regs))
705 regs->nip = imm;
706 return 1;
708 case 18: /* rfid, scary */
709 if (regs->msr & MSR_PR)
710 goto priv;
711 op->type = RFI;
712 return 0;
714 case 150: /* isync */
715 op->type = BARRIER;
716 isync();
717 goto instr_done;
719 case 33: /* crnor */
720 case 129: /* crandc */
721 case 193: /* crxor */
722 case 225: /* crnand */
723 case 257: /* crand */
724 case 289: /* creqv */
725 case 417: /* crorc */
726 case 449: /* cror */
727 ra = (instr >> 16) & 0x1f;
728 rb = (instr >> 11) & 0x1f;
729 rd = (instr >> 21) & 0x1f;
730 ra = (regs->ccr >> (31 - ra)) & 1;
731 rb = (regs->ccr >> (31 - rb)) & 1;
732 val = (instr >> (6 + ra * 2 + rb)) & 1;
733 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
734 (val << (31 - rd));
735 goto instr_done;
737 break;
738 case 31:
739 switch ((instr >> 1) & 0x3ff) {
740 case 598: /* sync */
741 op->type = BARRIER;
742 #ifdef __powerpc64__
743 switch ((instr >> 21) & 3) {
744 case 1: /* lwsync */
745 asm volatile("lwsync" : : : "memory");
746 goto instr_done;
747 case 2: /* ptesync */
748 asm volatile("ptesync" : : : "memory");
749 goto instr_done;
751 #endif
752 mb();
753 goto instr_done;
755 case 854: /* eieio */
756 op->type = BARRIER;
757 eieio();
758 goto instr_done;
760 break;
763 /* Following cases refer to regs->gpr[], so we need all regs */
764 if (!FULL_REGS(regs))
765 return 0;
767 rd = (instr >> 21) & 0x1f;
768 ra = (instr >> 16) & 0x1f;
769 rb = (instr >> 11) & 0x1f;
771 switch (opcode) {
772 #ifdef __powerpc64__
773 case 2: /* tdi */
774 if (rd & trap_compare(regs->gpr[ra], (short) instr))
775 goto trap;
776 goto instr_done;
777 #endif
778 case 3: /* twi */
779 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
780 goto trap;
781 goto instr_done;
783 case 7: /* mulli */
784 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
785 goto instr_done;
787 case 8: /* subfic */
788 imm = (short) instr;
789 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
790 goto instr_done;
792 case 10: /* cmpli */
793 imm = (unsigned short) instr;
794 val = regs->gpr[ra];
795 #ifdef __powerpc64__
796 if ((rd & 1) == 0)
797 val = (unsigned int) val;
798 #endif
799 do_cmp_unsigned(regs, val, imm, rd >> 2);
800 goto instr_done;
802 case 11: /* cmpi */
803 imm = (short) instr;
804 val = regs->gpr[ra];
805 #ifdef __powerpc64__
806 if ((rd & 1) == 0)
807 val = (int) val;
808 #endif
809 do_cmp_signed(regs, val, imm, rd >> 2);
810 goto instr_done;
812 case 12: /* addic */
813 imm = (short) instr;
814 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
815 goto instr_done;
817 case 13: /* addic. */
818 imm = (short) instr;
819 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
820 set_cr0(regs, rd);
821 goto instr_done;
823 case 14: /* addi */
824 imm = (short) instr;
825 if (ra)
826 imm += regs->gpr[ra];
827 regs->gpr[rd] = imm;
828 goto instr_done;
830 case 15: /* addis */
831 imm = ((short) instr) << 16;
832 if (ra)
833 imm += regs->gpr[ra];
834 regs->gpr[rd] = imm;
835 goto instr_done;
837 case 20: /* rlwimi */
838 mb = (instr >> 6) & 0x1f;
839 me = (instr >> 1) & 0x1f;
840 val = DATA32(regs->gpr[rd]);
841 imm = MASK32(mb, me);
842 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
843 goto logical_done;
845 case 21: /* rlwinm */
846 mb = (instr >> 6) & 0x1f;
847 me = (instr >> 1) & 0x1f;
848 val = DATA32(regs->gpr[rd]);
849 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
850 goto logical_done;
852 case 23: /* rlwnm */
853 mb = (instr >> 6) & 0x1f;
854 me = (instr >> 1) & 0x1f;
855 rb = regs->gpr[rb] & 0x1f;
856 val = DATA32(regs->gpr[rd]);
857 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
858 goto logical_done;
860 case 24: /* ori */
861 imm = (unsigned short) instr;
862 regs->gpr[ra] = regs->gpr[rd] | imm;
863 goto instr_done;
865 case 25: /* oris */
866 imm = (unsigned short) instr;
867 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
868 goto instr_done;
870 case 26: /* xori */
871 imm = (unsigned short) instr;
872 regs->gpr[ra] = regs->gpr[rd] ^ imm;
873 goto instr_done;
875 case 27: /* xoris */
876 imm = (unsigned short) instr;
877 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
878 goto instr_done;
880 case 28: /* andi. */
881 imm = (unsigned short) instr;
882 regs->gpr[ra] = regs->gpr[rd] & imm;
883 set_cr0(regs, ra);
884 goto instr_done;
886 case 29: /* andis. */
887 imm = (unsigned short) instr;
888 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
889 set_cr0(regs, ra);
890 goto instr_done;
892 #ifdef __powerpc64__
893 case 30: /* rld* */
894 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
895 val = regs->gpr[rd];
896 if ((instr & 0x10) == 0) {
897 sh = rb | ((instr & 2) << 4);
898 val = ROTATE(val, sh);
899 switch ((instr >> 2) & 3) {
900 case 0: /* rldicl */
901 regs->gpr[ra] = val & MASK64_L(mb);
902 goto logical_done;
903 case 1: /* rldicr */
904 regs->gpr[ra] = val & MASK64_R(mb);
905 goto logical_done;
906 case 2: /* rldic */
907 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
908 goto logical_done;
909 case 3: /* rldimi */
910 imm = MASK64(mb, 63 - sh);
911 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
912 (val & imm);
913 goto logical_done;
915 } else {
916 sh = regs->gpr[rb] & 0x3f;
917 val = ROTATE(val, sh);
918 switch ((instr >> 1) & 7) {
919 case 0: /* rldcl */
920 regs->gpr[ra] = val & MASK64_L(mb);
921 goto logical_done;
922 case 1: /* rldcr */
923 regs->gpr[ra] = val & MASK64_R(mb);
924 goto logical_done;
927 #endif
929 case 31:
930 switch ((instr >> 1) & 0x3ff) {
931 case 4: /* tw */
932 if (rd == 0x1f ||
933 (rd & trap_compare((int)regs->gpr[ra],
934 (int)regs->gpr[rb])))
935 goto trap;
936 goto instr_done;
937 #ifdef __powerpc64__
938 case 68: /* td */
939 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
940 goto trap;
941 goto instr_done;
942 #endif
943 case 83: /* mfmsr */
944 if (regs->msr & MSR_PR)
945 goto priv;
946 op->type = MFMSR;
947 op->reg = rd;
948 return 0;
949 case 146: /* mtmsr */
950 if (regs->msr & MSR_PR)
951 goto priv;
952 op->type = MTMSR;
953 op->reg = rd;
954 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
955 return 0;
956 #ifdef CONFIG_PPC64
957 case 178: /* mtmsrd */
958 if (regs->msr & MSR_PR)
959 goto priv;
960 op->type = MTMSR;
961 op->reg = rd;
962 /* only MSR_EE and MSR_RI get changed if bit 15 set */
963 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
964 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
965 op->val = imm;
966 return 0;
967 #endif
969 case 19: /* mfcr */
970 regs->gpr[rd] = regs->ccr;
971 regs->gpr[rd] &= 0xffffffffUL;
972 goto instr_done;
974 case 144: /* mtcrf */
975 imm = 0xf0000000UL;
976 val = regs->gpr[rd];
977 for (sh = 0; sh < 8; ++sh) {
978 if (instr & (0x80000 >> sh))
979 regs->ccr = (regs->ccr & ~imm) |
980 (val & imm);
981 imm >>= 4;
983 goto instr_done;
985 case 339: /* mfspr */
986 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
987 switch (spr) {
988 case SPRN_XER: /* mfxer */
989 regs->gpr[rd] = regs->xer;
990 regs->gpr[rd] &= 0xffffffffUL;
991 goto instr_done;
992 case SPRN_LR: /* mflr */
993 regs->gpr[rd] = regs->link;
994 goto instr_done;
995 case SPRN_CTR: /* mfctr */
996 regs->gpr[rd] = regs->ctr;
997 goto instr_done;
998 default:
999 op->type = MFSPR;
1000 op->reg = rd;
1001 op->spr = spr;
1002 return 0;
1004 break;
1006 case 467: /* mtspr */
1007 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1008 switch (spr) {
1009 case SPRN_XER: /* mtxer */
1010 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
1011 goto instr_done;
1012 case SPRN_LR: /* mtlr */
1013 regs->link = regs->gpr[rd];
1014 goto instr_done;
1015 case SPRN_CTR: /* mtctr */
1016 regs->ctr = regs->gpr[rd];
1017 goto instr_done;
1018 default:
1019 op->type = MTSPR;
1020 op->val = regs->gpr[rd];
1021 op->spr = spr;
1022 return 0;
1024 break;
1027 * Compare instructions
1029 case 0: /* cmp */
1030 val = regs->gpr[ra];
1031 val2 = regs->gpr[rb];
1032 #ifdef __powerpc64__
1033 if ((rd & 1) == 0) {
1034 /* word (32-bit) compare */
1035 val = (int) val;
1036 val2 = (int) val2;
1038 #endif
1039 do_cmp_signed(regs, val, val2, rd >> 2);
1040 goto instr_done;
1042 case 32: /* cmpl */
1043 val = regs->gpr[ra];
1044 val2 = regs->gpr[rb];
1045 #ifdef __powerpc64__
1046 if ((rd & 1) == 0) {
1047 /* word (32-bit) compare */
1048 val = (unsigned int) val;
1049 val2 = (unsigned int) val2;
1051 #endif
1052 do_cmp_unsigned(regs, val, val2, rd >> 2);
1053 goto instr_done;
1056 * Arithmetic instructions
1058 case 8: /* subfc */
1059 add_with_carry(regs, rd, ~regs->gpr[ra],
1060 regs->gpr[rb], 1);
1061 goto arith_done;
1062 #ifdef __powerpc64__
1063 case 9: /* mulhdu */
1064 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1065 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1066 goto arith_done;
1067 #endif
1068 case 10: /* addc */
1069 add_with_carry(regs, rd, regs->gpr[ra],
1070 regs->gpr[rb], 0);
1071 goto arith_done;
1073 case 11: /* mulhwu */
1074 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1075 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1076 goto arith_done;
1078 case 40: /* subf */
1079 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
1080 goto arith_done;
1081 #ifdef __powerpc64__
1082 case 73: /* mulhd */
1083 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
1084 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1085 goto arith_done;
1086 #endif
1087 case 75: /* mulhw */
1088 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
1089 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1090 goto arith_done;
1092 case 104: /* neg */
1093 regs->gpr[rd] = -regs->gpr[ra];
1094 goto arith_done;
1096 case 136: /* subfe */
1097 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
1098 regs->xer & XER_CA);
1099 goto arith_done;
1101 case 138: /* adde */
1102 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1103 regs->xer & XER_CA);
1104 goto arith_done;
1106 case 200: /* subfze */
1107 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1108 regs->xer & XER_CA);
1109 goto arith_done;
1111 case 202: /* addze */
1112 add_with_carry(regs, rd, regs->gpr[ra], 0L,
1113 regs->xer & XER_CA);
1114 goto arith_done;
1116 case 232: /* subfme */
1117 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1118 regs->xer & XER_CA);
1119 goto arith_done;
1120 #ifdef __powerpc64__
1121 case 233: /* mulld */
1122 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1123 goto arith_done;
1124 #endif
1125 case 234: /* addme */
1126 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1127 regs->xer & XER_CA);
1128 goto arith_done;
1130 case 235: /* mullw */
1131 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1132 (unsigned int) regs->gpr[rb];
1133 goto arith_done;
1135 case 266: /* add */
1136 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1137 goto arith_done;
1138 #ifdef __powerpc64__
1139 case 457: /* divdu */
1140 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1141 goto arith_done;
1142 #endif
1143 case 459: /* divwu */
1144 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1145 (unsigned int) regs->gpr[rb];
1146 goto arith_done;
1147 #ifdef __powerpc64__
1148 case 489: /* divd */
1149 regs->gpr[rd] = (long int) regs->gpr[ra] /
1150 (long int) regs->gpr[rb];
1151 goto arith_done;
1152 #endif
1153 case 491: /* divw */
1154 regs->gpr[rd] = (int) regs->gpr[ra] /
1155 (int) regs->gpr[rb];
1156 goto arith_done;
1160 * Logical instructions
1162 case 26: /* cntlzw */
1163 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1164 "r" (regs->gpr[rd]));
1165 goto logical_done;
1166 #ifdef __powerpc64__
1167 case 58: /* cntlzd */
1168 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1169 "r" (regs->gpr[rd]));
1170 goto logical_done;
1171 #endif
1172 case 28: /* and */
1173 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1174 goto logical_done;
1176 case 60: /* andc */
1177 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1178 goto logical_done;
1180 case 124: /* nor */
1181 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1182 goto logical_done;
1184 case 284: /* xor */
1185 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1186 goto logical_done;
1188 case 316: /* xor */
1189 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1190 goto logical_done;
1192 case 412: /* orc */
1193 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1194 goto logical_done;
1196 case 444: /* or */
1197 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1198 goto logical_done;
1200 case 476: /* nand */
1201 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1202 goto logical_done;
1204 case 922: /* extsh */
1205 regs->gpr[ra] = (signed short) regs->gpr[rd];
1206 goto logical_done;
1208 case 954: /* extsb */
1209 regs->gpr[ra] = (signed char) regs->gpr[rd];
1210 goto logical_done;
1211 #ifdef __powerpc64__
1212 case 986: /* extsw */
1213 regs->gpr[ra] = (signed int) regs->gpr[rd];
1214 goto logical_done;
1215 #endif
1218 * Shift instructions
1220 case 24: /* slw */
1221 sh = regs->gpr[rb] & 0x3f;
1222 if (sh < 32)
1223 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1224 else
1225 regs->gpr[ra] = 0;
1226 goto logical_done;
1228 case 536: /* srw */
1229 sh = regs->gpr[rb] & 0x3f;
1230 if (sh < 32)
1231 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1232 else
1233 regs->gpr[ra] = 0;
1234 goto logical_done;
1236 case 792: /* sraw */
1237 sh = regs->gpr[rb] & 0x3f;
1238 ival = (signed int) regs->gpr[rd];
1239 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1240 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1241 regs->xer |= XER_CA;
1242 else
1243 regs->xer &= ~XER_CA;
1244 goto logical_done;
1246 case 824: /* srawi */
1247 sh = rb;
1248 ival = (signed int) regs->gpr[rd];
1249 regs->gpr[ra] = ival >> sh;
1250 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1251 regs->xer |= XER_CA;
1252 else
1253 regs->xer &= ~XER_CA;
1254 goto logical_done;
1256 #ifdef __powerpc64__
1257 case 27: /* sld */
1258 sh = regs->gpr[rb] & 0x7f;
1259 if (sh < 64)
1260 regs->gpr[ra] = regs->gpr[rd] << sh;
1261 else
1262 regs->gpr[ra] = 0;
1263 goto logical_done;
1265 case 539: /* srd */
1266 sh = regs->gpr[rb] & 0x7f;
1267 if (sh < 64)
1268 regs->gpr[ra] = regs->gpr[rd] >> sh;
1269 else
1270 regs->gpr[ra] = 0;
1271 goto logical_done;
1273 case 794: /* srad */
1274 sh = regs->gpr[rb] & 0x7f;
1275 ival = (signed long int) regs->gpr[rd];
1276 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1277 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1278 regs->xer |= XER_CA;
1279 else
1280 regs->xer &= ~XER_CA;
1281 goto logical_done;
1283 case 826: /* sradi with sh_5 = 0 */
1284 case 827: /* sradi with sh_5 = 1 */
1285 sh = rb | ((instr & 2) << 4);
1286 ival = (signed long int) regs->gpr[rd];
1287 regs->gpr[ra] = ival >> sh;
1288 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1289 regs->xer |= XER_CA;
1290 else
1291 regs->xer &= ~XER_CA;
1292 goto logical_done;
1293 #endif /* __powerpc64__ */
1296 * Cache instructions
1298 case 54: /* dcbst */
1299 op->type = MKOP(CACHEOP, DCBST, 0);
1300 op->ea = xform_ea(instr, regs);
1301 return 0;
1303 case 86: /* dcbf */
1304 op->type = MKOP(CACHEOP, DCBF, 0);
1305 op->ea = xform_ea(instr, regs);
1306 return 0;
1308 case 246: /* dcbtst */
1309 op->type = MKOP(CACHEOP, DCBTST, 0);
1310 op->ea = xform_ea(instr, regs);
1311 op->reg = rd;
1312 return 0;
1314 case 278: /* dcbt */
1315 op->type = MKOP(CACHEOP, DCBTST, 0);
1316 op->ea = xform_ea(instr, regs);
1317 op->reg = rd;
1318 return 0;
1320 case 982: /* icbi */
1321 op->type = MKOP(CACHEOP, ICBI, 0);
1322 op->ea = xform_ea(instr, regs);
1323 return 0;
1325 break;
1329 * Loads and stores.
1331 op->type = UNKNOWN;
1332 op->update_reg = ra;
1333 op->reg = rd;
1334 op->val = regs->gpr[rd];
1335 u = (instr >> 20) & UPDATE;
1337 switch (opcode) {
1338 case 31:
1339 u = instr & UPDATE;
1340 op->ea = xform_ea(instr, regs);
1341 switch ((instr >> 1) & 0x3ff) {
1342 case 20: /* lwarx */
1343 op->type = MKOP(LARX, 0, 4);
1344 break;
1346 case 150: /* stwcx. */
1347 op->type = MKOP(STCX, 0, 4);
1348 break;
1350 #ifdef __powerpc64__
1351 case 84: /* ldarx */
1352 op->type = MKOP(LARX, 0, 8);
1353 break;
1355 case 214: /* stdcx. */
1356 op->type = MKOP(STCX, 0, 8);
1357 break;
1359 case 21: /* ldx */
1360 case 53: /* ldux */
1361 op->type = MKOP(LOAD, u, 8);
1362 break;
1363 #endif
1365 case 23: /* lwzx */
1366 case 55: /* lwzux */
1367 op->type = MKOP(LOAD, u, 4);
1368 break;
1370 case 87: /* lbzx */
1371 case 119: /* lbzux */
1372 op->type = MKOP(LOAD, u, 1);
1373 break;
1375 #ifdef CONFIG_ALTIVEC
1376 case 103: /* lvx */
1377 case 359: /* lvxl */
1378 if (!(regs->msr & MSR_VEC))
1379 goto vecunavail;
1380 op->type = MKOP(LOAD_VMX, 0, 16);
1381 break;
1383 case 231: /* stvx */
1384 case 487: /* stvxl */
1385 if (!(regs->msr & MSR_VEC))
1386 goto vecunavail;
1387 op->type = MKOP(STORE_VMX, 0, 16);
1388 break;
1389 #endif /* CONFIG_ALTIVEC */
1391 #ifdef __powerpc64__
1392 case 149: /* stdx */
1393 case 181: /* stdux */
1394 op->type = MKOP(STORE, u, 8);
1395 break;
1396 #endif
1398 case 151: /* stwx */
1399 case 183: /* stwux */
1400 op->type = MKOP(STORE, u, 4);
1401 break;
1403 case 215: /* stbx */
1404 case 247: /* stbux */
1405 op->type = MKOP(STORE, u, 1);
1406 break;
1408 case 279: /* lhzx */
1409 case 311: /* lhzux */
1410 op->type = MKOP(LOAD, u, 2);
1411 break;
1413 #ifdef __powerpc64__
1414 case 341: /* lwax */
1415 case 373: /* lwaux */
1416 op->type = MKOP(LOAD, SIGNEXT | u, 4);
1417 break;
1418 #endif
1420 case 343: /* lhax */
1421 case 375: /* lhaux */
1422 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1423 break;
1425 case 407: /* sthx */
1426 case 439: /* sthux */
1427 op->type = MKOP(STORE, u, 2);
1428 break;
1430 #ifdef __powerpc64__
1431 case 532: /* ldbrx */
1432 op->type = MKOP(LOAD, BYTEREV, 8);
1433 break;
1435 #endif
1436 case 533: /* lswx */
1437 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1438 break;
1440 case 534: /* lwbrx */
1441 op->type = MKOP(LOAD, BYTEREV, 4);
1442 break;
1444 case 597: /* lswi */
1445 if (rb == 0)
1446 rb = 32; /* # bytes to load */
1447 op->type = MKOP(LOAD_MULTI, 0, rb);
1448 op->ea = 0;
1449 if (ra)
1450 op->ea = truncate_if_32bit(regs->msr,
1451 regs->gpr[ra]);
1452 break;
1454 #ifdef CONFIG_PPC_FPU
1455 case 535: /* lfsx */
1456 case 567: /* lfsux */
1457 if (!(regs->msr & MSR_FP))
1458 goto fpunavail;
1459 op->type = MKOP(LOAD_FP, u, 4);
1460 break;
1462 case 599: /* lfdx */
1463 case 631: /* lfdux */
1464 if (!(regs->msr & MSR_FP))
1465 goto fpunavail;
1466 op->type = MKOP(LOAD_FP, u, 8);
1467 break;
1469 case 663: /* stfsx */
1470 case 695: /* stfsux */
1471 if (!(regs->msr & MSR_FP))
1472 goto fpunavail;
1473 op->type = MKOP(STORE_FP, u, 4);
1474 break;
1476 case 727: /* stfdx */
1477 case 759: /* stfdux */
1478 if (!(regs->msr & MSR_FP))
1479 goto fpunavail;
1480 op->type = MKOP(STORE_FP, u, 8);
1481 break;
1482 #endif
1484 #ifdef __powerpc64__
1485 case 660: /* stdbrx */
1486 op->type = MKOP(STORE, BYTEREV, 8);
1487 op->val = byterev_8(regs->gpr[rd]);
1488 break;
1490 #endif
1491 case 661: /* stswx */
1492 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1493 break;
1495 case 662: /* stwbrx */
1496 op->type = MKOP(STORE, BYTEREV, 4);
1497 op->val = byterev_4(regs->gpr[rd]);
1498 break;
1500 case 725:
1501 if (rb == 0)
1502 rb = 32; /* # bytes to store */
1503 op->type = MKOP(STORE_MULTI, 0, rb);
1504 op->ea = 0;
1505 if (ra)
1506 op->ea = truncate_if_32bit(regs->msr,
1507 regs->gpr[ra]);
1508 break;
1510 case 790: /* lhbrx */
1511 op->type = MKOP(LOAD, BYTEREV, 2);
1512 break;
1514 case 918: /* sthbrx */
1515 op->type = MKOP(STORE, BYTEREV, 2);
1516 op->val = byterev_2(regs->gpr[rd]);
1517 break;
1519 #ifdef CONFIG_VSX
1520 case 844: /* lxvd2x */
1521 case 876: /* lxvd2ux */
1522 if (!(regs->msr & MSR_VSX))
1523 goto vsxunavail;
1524 op->reg = rd | ((instr & 1) << 5);
1525 op->type = MKOP(LOAD_VSX, u, 16);
1526 break;
1528 case 972: /* stxvd2x */
1529 case 1004: /* stxvd2ux */
1530 if (!(regs->msr & MSR_VSX))
1531 goto vsxunavail;
1532 op->reg = rd | ((instr & 1) << 5);
1533 op->type = MKOP(STORE_VSX, u, 16);
1534 break;
1536 #endif /* CONFIG_VSX */
1538 break;
1540 case 32: /* lwz */
1541 case 33: /* lwzu */
1542 op->type = MKOP(LOAD, u, 4);
1543 op->ea = dform_ea(instr, regs);
1544 break;
1546 case 34: /* lbz */
1547 case 35: /* lbzu */
1548 op->type = MKOP(LOAD, u, 1);
1549 op->ea = dform_ea(instr, regs);
1550 break;
1552 case 36: /* stw */
1553 case 37: /* stwu */
1554 op->type = MKOP(STORE, u, 4);
1555 op->ea = dform_ea(instr, regs);
1556 break;
1558 case 38: /* stb */
1559 case 39: /* stbu */
1560 op->type = MKOP(STORE, u, 1);
1561 op->ea = dform_ea(instr, regs);
1562 break;
1564 case 40: /* lhz */
1565 case 41: /* lhzu */
1566 op->type = MKOP(LOAD, u, 2);
1567 op->ea = dform_ea(instr, regs);
1568 break;
1570 case 42: /* lha */
1571 case 43: /* lhau */
1572 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1573 op->ea = dform_ea(instr, regs);
1574 break;
1576 case 44: /* sth */
1577 case 45: /* sthu */
1578 op->type = MKOP(STORE, u, 2);
1579 op->ea = dform_ea(instr, regs);
1580 break;
1582 case 46: /* lmw */
1583 if (ra >= rd)
1584 break; /* invalid form, ra in range to load */
1585 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
1586 op->ea = dform_ea(instr, regs);
1587 break;
1589 case 47: /* stmw */
1590 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
1591 op->ea = dform_ea(instr, regs);
1592 break;
1594 #ifdef CONFIG_PPC_FPU
1595 case 48: /* lfs */
1596 case 49: /* lfsu */
1597 if (!(regs->msr & MSR_FP))
1598 goto fpunavail;
1599 op->type = MKOP(LOAD_FP, u, 4);
1600 op->ea = dform_ea(instr, regs);
1601 break;
1603 case 50: /* lfd */
1604 case 51: /* lfdu */
1605 if (!(regs->msr & MSR_FP))
1606 goto fpunavail;
1607 op->type = MKOP(LOAD_FP, u, 8);
1608 op->ea = dform_ea(instr, regs);
1609 break;
1611 case 52: /* stfs */
1612 case 53: /* stfsu */
1613 if (!(regs->msr & MSR_FP))
1614 goto fpunavail;
1615 op->type = MKOP(STORE_FP, u, 4);
1616 op->ea = dform_ea(instr, regs);
1617 break;
1619 case 54: /* stfd */
1620 case 55: /* stfdu */
1621 if (!(regs->msr & MSR_FP))
1622 goto fpunavail;
1623 op->type = MKOP(STORE_FP, u, 8);
1624 op->ea = dform_ea(instr, regs);
1625 break;
1626 #endif
1628 #ifdef __powerpc64__
1629 case 58: /* ld[u], lwa */
1630 op->ea = dsform_ea(instr, regs);
1631 switch (instr & 3) {
1632 case 0: /* ld */
1633 op->type = MKOP(LOAD, 0, 8);
1634 break;
1635 case 1: /* ldu */
1636 op->type = MKOP(LOAD, UPDATE, 8);
1637 break;
1638 case 2: /* lwa */
1639 op->type = MKOP(LOAD, SIGNEXT, 4);
1640 break;
1642 break;
1644 case 62: /* std[u] */
1645 op->ea = dsform_ea(instr, regs);
1646 switch (instr & 3) {
1647 case 0: /* std */
1648 op->type = MKOP(STORE, 0, 8);
1649 break;
1650 case 1: /* stdu */
1651 op->type = MKOP(STORE, UPDATE, 8);
1652 break;
1654 break;
1655 #endif /* __powerpc64__ */
1658 return 0;
1660 logical_done:
1661 if (instr & 1)
1662 set_cr0(regs, ra);
1663 goto instr_done;
1665 arith_done:
1666 if (instr & 1)
1667 set_cr0(regs, rd);
1669 instr_done:
1670 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1671 return 1;
1673 priv:
1674 op->type = INTERRUPT | 0x700;
1675 op->val = SRR1_PROGPRIV;
1676 return 0;
1678 trap:
1679 op->type = INTERRUPT | 0x700;
1680 op->val = SRR1_PROGTRAP;
1681 return 0;
1683 #ifdef CONFIG_PPC_FPU
1684 fpunavail:
1685 op->type = INTERRUPT | 0x800;
1686 return 0;
1687 #endif
1689 #ifdef CONFIG_ALTIVEC
1690 vecunavail:
1691 op->type = INTERRUPT | 0xf20;
1692 return 0;
1693 #endif
1695 #ifdef CONFIG_VSX
1696 vsxunavail:
1697 op->type = INTERRUPT | 0xf40;
1698 return 0;
1699 #endif
1701 EXPORT_SYMBOL_GPL(analyse_instr);
1704 * For PPC32 we always use stwu with r1 to change the stack pointer.
1705 * So this emulated store may corrupt the exception frame, now we
1706 * have to provide the exception frame trampoline, which is pushed
1707 * below the kprobed function stack. So we only update gpr[1] but
1708 * don't emulate the real store operation. We will do real store
1709 * operation safely in exception return code by checking this flag.
1711 static __kprobes int handle_stack_update(unsigned long ea, struct pt_regs *regs)
1713 #ifdef CONFIG_PPC32
1715 * Check if we will touch kernel stack overflow
1717 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1718 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
1719 return -EINVAL;
1721 #endif /* CONFIG_PPC32 */
1723 * Check if we already set since that means we'll
1724 * lose the previous value.
1726 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1727 set_thread_flag(TIF_EMULATE_STACK_STORE);
1728 return 0;
1731 static __kprobes void do_signext(unsigned long *valp, int size)
1733 switch (size) {
1734 case 2:
1735 *valp = (signed short) *valp;
1736 break;
1737 case 4:
1738 *valp = (signed int) *valp;
1739 break;
1743 static __kprobes void do_byterev(unsigned long *valp, int size)
1745 switch (size) {
1746 case 2:
1747 *valp = byterev_2(*valp);
1748 break;
1749 case 4:
1750 *valp = byterev_4(*valp);
1751 break;
1752 #ifdef __powerpc64__
1753 case 8:
1754 *valp = byterev_8(*valp);
1755 break;
1756 #endif
1761 * Emulate instructions that cause a transfer of control,
1762 * loads and stores, and a few other instructions.
1763 * Returns 1 if the step was emulated, 0 if not,
1764 * or -1 if the instruction is one that should not be stepped,
1765 * such as an rfid, or a mtmsrd that would clear MSR_RI.
1767 int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1769 struct instruction_op op;
1770 int r, err, size;
1771 unsigned long val;
1772 unsigned int cr;
1773 int i, rd, nb;
1775 r = analyse_instr(&op, regs, instr);
1776 if (r != 0)
1777 return r;
1779 err = 0;
1780 size = GETSIZE(op.type);
1781 switch (op.type & INSTR_TYPE_MASK) {
1782 case CACHEOP:
1783 if (!address_ok(regs, op.ea, 8))
1784 return 0;
1785 switch (op.type & CACHEOP_MASK) {
1786 case DCBST:
1787 __cacheop_user_asmx(op.ea, err, "dcbst");
1788 break;
1789 case DCBF:
1790 __cacheop_user_asmx(op.ea, err, "dcbf");
1791 break;
1792 case DCBTST:
1793 if (op.reg == 0)
1794 prefetchw((void *) op.ea);
1795 break;
1796 case DCBT:
1797 if (op.reg == 0)
1798 prefetch((void *) op.ea);
1799 break;
1800 case ICBI:
1801 __cacheop_user_asmx(op.ea, err, "icbi");
1802 break;
1804 if (err)
1805 return 0;
1806 goto instr_done;
1808 case LARX:
1809 if (regs->msr & MSR_LE)
1810 return 0;
1811 if (op.ea & (size - 1))
1812 break; /* can't handle misaligned */
1813 err = -EFAULT;
1814 if (!address_ok(regs, op.ea, size))
1815 goto ldst_done;
1816 err = 0;
1817 switch (size) {
1818 case 4:
1819 __get_user_asmx(val, op.ea, err, "lwarx");
1820 break;
1821 case 8:
1822 __get_user_asmx(val, op.ea, err, "ldarx");
1823 break;
1824 default:
1825 return 0;
1827 if (!err)
1828 regs->gpr[op.reg] = val;
1829 goto ldst_done;
1831 case STCX:
1832 if (regs->msr & MSR_LE)
1833 return 0;
1834 if (op.ea & (size - 1))
1835 break; /* can't handle misaligned */
1836 err = -EFAULT;
1837 if (!address_ok(regs, op.ea, size))
1838 goto ldst_done;
1839 err = 0;
1840 switch (size) {
1841 case 4:
1842 __put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
1843 break;
1844 case 8:
1845 __put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
1846 break;
1847 default:
1848 return 0;
1850 if (!err)
1851 regs->ccr = (regs->ccr & 0x0fffffff) |
1852 (cr & 0xe0000000) |
1853 ((regs->xer >> 3) & 0x10000000);
1854 goto ldst_done;
1856 case LOAD:
1857 if (regs->msr & MSR_LE)
1858 return 0;
1859 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
1860 if (!err) {
1861 if (op.type & SIGNEXT)
1862 do_signext(&regs->gpr[op.reg], size);
1863 if (op.type & BYTEREV)
1864 do_byterev(&regs->gpr[op.reg], size);
1866 goto ldst_done;
1868 #ifdef CONFIG_PPC_FPU
1869 case LOAD_FP:
1870 if (regs->msr & MSR_LE)
1871 return 0;
1872 if (size == 4)
1873 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1874 else
1875 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
1876 goto ldst_done;
1877 #endif
1878 #ifdef CONFIG_ALTIVEC
1879 case LOAD_VMX:
1880 if (regs->msr & MSR_LE)
1881 return 0;
1882 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1883 goto ldst_done;
1884 #endif
1885 #ifdef CONFIG_VSX
1886 case LOAD_VSX:
1887 if (regs->msr & MSR_LE)
1888 return 0;
1889 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1890 goto ldst_done;
1891 #endif
1892 case LOAD_MULTI:
1893 if (regs->msr & MSR_LE)
1894 return 0;
1895 rd = op.reg;
1896 for (i = 0; i < size; i += 4) {
1897 nb = size - i;
1898 if (nb > 4)
1899 nb = 4;
1900 err = read_mem(&regs->gpr[rd], op.ea, nb, regs);
1901 if (err)
1902 return 0;
1903 if (nb < 4) /* left-justify last bytes */
1904 regs->gpr[rd] <<= 32 - 8 * nb;
1905 op.ea += 4;
1906 ++rd;
1908 goto instr_done;
1910 case STORE:
1911 if (regs->msr & MSR_LE)
1912 return 0;
1913 if ((op.type & UPDATE) && size == sizeof(long) &&
1914 op.reg == 1 && op.update_reg == 1 &&
1915 !(regs->msr & MSR_PR) &&
1916 op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
1917 err = handle_stack_update(op.ea, regs);
1918 goto ldst_done;
1920 err = write_mem(op.val, op.ea, size, regs);
1921 goto ldst_done;
1923 #ifdef CONFIG_PPC_FPU
1924 case STORE_FP:
1925 if (regs->msr & MSR_LE)
1926 return 0;
1927 if (size == 4)
1928 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1929 else
1930 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
1931 goto ldst_done;
1932 #endif
1933 #ifdef CONFIG_ALTIVEC
1934 case STORE_VMX:
1935 if (regs->msr & MSR_LE)
1936 return 0;
1937 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1938 goto ldst_done;
1939 #endif
1940 #ifdef CONFIG_VSX
1941 case STORE_VSX:
1942 if (regs->msr & MSR_LE)
1943 return 0;
1944 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1945 goto ldst_done;
1946 #endif
1947 case STORE_MULTI:
1948 if (regs->msr & MSR_LE)
1949 return 0;
1950 rd = op.reg;
1951 for (i = 0; i < size; i += 4) {
1952 val = regs->gpr[rd];
1953 nb = size - i;
1954 if (nb > 4)
1955 nb = 4;
1956 else
1957 val >>= 32 - 8 * nb;
1958 err = write_mem(val, op.ea, nb, regs);
1959 if (err)
1960 return 0;
1961 op.ea += 4;
1962 ++rd;
1964 goto instr_done;
1966 case MFMSR:
1967 regs->gpr[op.reg] = regs->msr & MSR_MASK;
1968 goto instr_done;
1970 case MTMSR:
1971 val = regs->gpr[op.reg];
1972 if ((val & MSR_RI) == 0)
1973 /* can't step mtmsr[d] that would clear MSR_RI */
1974 return -1;
1975 /* here op.val is the mask of bits to change */
1976 regs->msr = (regs->msr & ~op.val) | (val & op.val);
1977 goto instr_done;
1979 #ifdef CONFIG_PPC64
1980 case SYSCALL: /* sc */
1982 * N.B. this uses knowledge about how the syscall
1983 * entry code works. If that is changed, this will
1984 * need to be changed also.
1986 if (regs->gpr[0] == 0x1ebe &&
1987 cpu_has_feature(CPU_FTR_REAL_LE)) {
1988 regs->msr ^= MSR_LE;
1989 goto instr_done;
1991 regs->gpr[9] = regs->gpr[13];
1992 regs->gpr[10] = MSR_KERNEL;
1993 regs->gpr[11] = regs->nip + 4;
1994 regs->gpr[12] = regs->msr & MSR_MASK;
1995 regs->gpr[13] = (unsigned long) get_paca();
1996 regs->nip = (unsigned long) &system_call_common;
1997 regs->msr = MSR_KERNEL;
1998 return 1;
2000 case RFI:
2001 return -1;
2002 #endif
2004 return 0;
2006 ldst_done:
2007 if (err)
2008 return 0;
2009 if (op.type & UPDATE)
2010 regs->gpr[op.update_reg] = op.ea;
2012 instr_done:
2013 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
2014 return 1;