Linux 4.13.16
[linux/fpc-iii.git] / arch / powerpc / lib / sstep.c
blobee33327686aec7a9c8784182383c709706e2394c
1 /*
2 * Single-step support.
4 * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/kprobes.h>
13 #include <linux/ptrace.h>
14 #include <linux/prefetch.h>
15 #include <asm/sstep.h>
16 #include <asm/processor.h>
17 #include <linux/uaccess.h>
18 #include <asm/cpu_has_feature.h>
19 #include <asm/cputable.h>
21 extern char system_call_common[];
23 #ifdef CONFIG_PPC64
24 /* Bits in SRR1 that are copied from MSR */
25 #define MSR_MASK 0xffffffff87c0ffffUL
26 #else
27 #define MSR_MASK 0x87c0ffff
28 #endif
30 /* Bits in XER */
31 #define XER_SO 0x80000000U
32 #define XER_OV 0x40000000U
33 #define XER_CA 0x20000000U
35 #ifdef CONFIG_PPC_FPU
37 * Functions in ldstfp.S
39 extern int do_lfs(int rn, unsigned long ea);
40 extern int do_lfd(int rn, unsigned long ea);
41 extern int do_stfs(int rn, unsigned long ea);
42 extern int do_stfd(int rn, unsigned long ea);
43 extern int do_lvx(int rn, unsigned long ea);
44 extern int do_stvx(int rn, unsigned long ea);
45 extern int do_lxvd2x(int rn, unsigned long ea);
46 extern int do_stxvd2x(int rn, unsigned long ea);
47 #endif
50 * Emulate the truncation of 64 bit values in 32-bit mode.
52 static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
53 unsigned long val)
55 #ifdef __powerpc64__
56 if ((msr & MSR_64BIT) == 0)
57 val &= 0xffffffffUL;
58 #endif
59 return val;
63 * Determine whether a conditional branch instruction would branch.
65 static nokprobe_inline int branch_taken(unsigned int instr, struct pt_regs *regs)
67 unsigned int bo = (instr >> 21) & 0x1f;
68 unsigned int bi;
70 if ((bo & 4) == 0) {
71 /* decrement counter */
72 --regs->ctr;
73 if (((bo >> 1) & 1) ^ (regs->ctr == 0))
74 return 0;
76 if ((bo & 0x10) == 0) {
77 /* check bit from CR */
78 bi = (instr >> 16) & 0x1f;
79 if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
80 return 0;
82 return 1;
85 static nokprobe_inline long address_ok(struct pt_regs *regs, unsigned long ea, int nb)
87 if (!user_mode(regs))
88 return 1;
89 return __access_ok(ea, nb, USER_DS);
93 * Calculate effective address for a D-form instruction
95 static nokprobe_inline unsigned long dform_ea(unsigned int instr, struct pt_regs *regs)
97 int ra;
98 unsigned long ea;
100 ra = (instr >> 16) & 0x1f;
101 ea = (signed short) instr; /* sign-extend */
102 if (ra)
103 ea += regs->gpr[ra];
105 return truncate_if_32bit(regs->msr, ea);
108 #ifdef __powerpc64__
110 * Calculate effective address for a DS-form instruction
112 static nokprobe_inline unsigned long dsform_ea(unsigned int instr, struct pt_regs *regs)
114 int ra;
115 unsigned long ea;
117 ra = (instr >> 16) & 0x1f;
118 ea = (signed short) (instr & ~3); /* sign-extend */
119 if (ra)
120 ea += regs->gpr[ra];
122 return truncate_if_32bit(regs->msr, ea);
124 #endif /* __powerpc64 */
127 * Calculate effective address for an X-form instruction
129 static nokprobe_inline unsigned long xform_ea(unsigned int instr,
130 struct pt_regs *regs)
132 int ra, rb;
133 unsigned long ea;
135 ra = (instr >> 16) & 0x1f;
136 rb = (instr >> 11) & 0x1f;
137 ea = regs->gpr[rb];
138 if (ra)
139 ea += regs->gpr[ra];
141 return truncate_if_32bit(regs->msr, ea);
145 * Return the largest power of 2, not greater than sizeof(unsigned long),
146 * such that x is a multiple of it.
148 static nokprobe_inline unsigned long max_align(unsigned long x)
150 x |= sizeof(unsigned long);
151 return x & -x; /* isolates rightmost bit */
155 static nokprobe_inline unsigned long byterev_2(unsigned long x)
157 return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
160 static nokprobe_inline unsigned long byterev_4(unsigned long x)
162 return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
163 ((x & 0xff00) << 8) | ((x & 0xff) << 24);
166 #ifdef __powerpc64__
167 static nokprobe_inline unsigned long byterev_8(unsigned long x)
169 return (byterev_4(x) << 32) | byterev_4(x >> 32);
171 #endif
173 static nokprobe_inline int read_mem_aligned(unsigned long *dest,
174 unsigned long ea, int nb)
176 int err = 0;
177 unsigned long x = 0;
179 switch (nb) {
180 case 1:
181 err = __get_user(x, (unsigned char __user *) ea);
182 break;
183 case 2:
184 err = __get_user(x, (unsigned short __user *) ea);
185 break;
186 case 4:
187 err = __get_user(x, (unsigned int __user *) ea);
188 break;
189 #ifdef __powerpc64__
190 case 8:
191 err = __get_user(x, (unsigned long __user *) ea);
192 break;
193 #endif
195 if (!err)
196 *dest = x;
197 return err;
200 static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
201 unsigned long ea, int nb, struct pt_regs *regs)
203 int err;
204 unsigned long x, b, c;
205 #ifdef __LITTLE_ENDIAN__
206 int len = nb; /* save a copy of the length for byte reversal */
207 #endif
209 /* unaligned, do this in pieces */
210 x = 0;
211 for (; nb > 0; nb -= c) {
212 #ifdef __LITTLE_ENDIAN__
213 c = 1;
214 #endif
215 #ifdef __BIG_ENDIAN__
216 c = max_align(ea);
217 #endif
218 if (c > nb)
219 c = max_align(nb);
220 err = read_mem_aligned(&b, ea, c);
221 if (err)
222 return err;
223 x = (x << (8 * c)) + b;
224 ea += c;
226 #ifdef __LITTLE_ENDIAN__
227 switch (len) {
228 case 2:
229 *dest = byterev_2(x);
230 break;
231 case 4:
232 *dest = byterev_4(x);
233 break;
234 #ifdef __powerpc64__
235 case 8:
236 *dest = byterev_8(x);
237 break;
238 #endif
240 #endif
241 #ifdef __BIG_ENDIAN__
242 *dest = x;
243 #endif
244 return 0;
248 * Read memory at address ea for nb bytes, return 0 for success
249 * or -EFAULT if an error occurred.
251 static int read_mem(unsigned long *dest, unsigned long ea, int nb,
252 struct pt_regs *regs)
254 if (!address_ok(regs, ea, nb))
255 return -EFAULT;
256 if ((ea & (nb - 1)) == 0)
257 return read_mem_aligned(dest, ea, nb);
258 return read_mem_unaligned(dest, ea, nb, regs);
260 NOKPROBE_SYMBOL(read_mem);
262 static nokprobe_inline int write_mem_aligned(unsigned long val,
263 unsigned long ea, int nb)
265 int err = 0;
267 switch (nb) {
268 case 1:
269 err = __put_user(val, (unsigned char __user *) ea);
270 break;
271 case 2:
272 err = __put_user(val, (unsigned short __user *) ea);
273 break;
274 case 4:
275 err = __put_user(val, (unsigned int __user *) ea);
276 break;
277 #ifdef __powerpc64__
278 case 8:
279 err = __put_user(val, (unsigned long __user *) ea);
280 break;
281 #endif
283 return err;
286 static nokprobe_inline int write_mem_unaligned(unsigned long val,
287 unsigned long ea, int nb, struct pt_regs *regs)
289 int err;
290 unsigned long c;
292 #ifdef __LITTLE_ENDIAN__
293 switch (nb) {
294 case 2:
295 val = byterev_2(val);
296 break;
297 case 4:
298 val = byterev_4(val);
299 break;
300 #ifdef __powerpc64__
301 case 8:
302 val = byterev_8(val);
303 break;
304 #endif
306 #endif
307 /* unaligned or little-endian, do this in pieces */
308 for (; nb > 0; nb -= c) {
309 #ifdef __LITTLE_ENDIAN__
310 c = 1;
311 #endif
312 #ifdef __BIG_ENDIAN__
313 c = max_align(ea);
314 #endif
315 if (c > nb)
316 c = max_align(nb);
317 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
318 if (err)
319 return err;
320 ea += c;
322 return 0;
326 * Write memory at address ea for nb bytes, return 0 for success
327 * or -EFAULT if an error occurred.
329 static int write_mem(unsigned long val, unsigned long ea, int nb,
330 struct pt_regs *regs)
332 if (!address_ok(regs, ea, nb))
333 return -EFAULT;
334 if ((ea & (nb - 1)) == 0)
335 return write_mem_aligned(val, ea, nb);
336 return write_mem_unaligned(val, ea, nb, regs);
338 NOKPROBE_SYMBOL(write_mem);
340 #ifdef CONFIG_PPC_FPU
342 * Check the address and alignment, and call func to do the actual
343 * load or store.
345 static int do_fp_load(int rn, int (*func)(int, unsigned long),
346 unsigned long ea, int nb,
347 struct pt_regs *regs)
349 int err;
350 union {
351 double dbl;
352 unsigned long ul[2];
353 struct {
354 #ifdef __BIG_ENDIAN__
355 unsigned _pad_;
356 unsigned word;
357 #endif
358 #ifdef __LITTLE_ENDIAN__
359 unsigned word;
360 unsigned _pad_;
361 #endif
362 } single;
363 } data;
364 unsigned long ptr;
366 if (!address_ok(regs, ea, nb))
367 return -EFAULT;
368 if ((ea & 3) == 0)
369 return (*func)(rn, ea);
370 ptr = (unsigned long) &data.ul;
371 if (sizeof(unsigned long) == 8 || nb == 4) {
372 err = read_mem_unaligned(&data.ul[0], ea, nb, regs);
373 if (nb == 4)
374 ptr = (unsigned long)&(data.single.word);
375 } else {
376 /* reading a double on 32-bit */
377 err = read_mem_unaligned(&data.ul[0], ea, 4, regs);
378 if (!err)
379 err = read_mem_unaligned(&data.ul[1], ea + 4, 4, regs);
381 if (err)
382 return err;
383 return (*func)(rn, ptr);
385 NOKPROBE_SYMBOL(do_fp_load);
387 static int do_fp_store(int rn, int (*func)(int, unsigned long),
388 unsigned long ea, int nb,
389 struct pt_regs *regs)
391 int err;
392 union {
393 double dbl;
394 unsigned long ul[2];
395 struct {
396 #ifdef __BIG_ENDIAN__
397 unsigned _pad_;
398 unsigned word;
399 #endif
400 #ifdef __LITTLE_ENDIAN__
401 unsigned word;
402 unsigned _pad_;
403 #endif
404 } single;
405 } data;
406 unsigned long ptr;
408 if (!address_ok(regs, ea, nb))
409 return -EFAULT;
410 if ((ea & 3) == 0)
411 return (*func)(rn, ea);
412 ptr = (unsigned long) &data.ul[0];
413 if (sizeof(unsigned long) == 8 || nb == 4) {
414 if (nb == 4)
415 ptr = (unsigned long)&(data.single.word);
416 err = (*func)(rn, ptr);
417 if (err)
418 return err;
419 err = write_mem_unaligned(data.ul[0], ea, nb, regs);
420 } else {
421 /* writing a double on 32-bit */
422 err = (*func)(rn, ptr);
423 if (err)
424 return err;
425 err = write_mem_unaligned(data.ul[0], ea, 4, regs);
426 if (!err)
427 err = write_mem_unaligned(data.ul[1], ea + 4, 4, regs);
429 return err;
431 NOKPROBE_SYMBOL(do_fp_store);
432 #endif
434 #ifdef CONFIG_ALTIVEC
435 /* For Altivec/VMX, no need to worry about alignment */
436 static nokprobe_inline int do_vec_load(int rn, int (*func)(int, unsigned long),
437 unsigned long ea, struct pt_regs *regs)
439 if (!address_ok(regs, ea & ~0xfUL, 16))
440 return -EFAULT;
441 return (*func)(rn, ea);
444 static nokprobe_inline int do_vec_store(int rn, int (*func)(int, unsigned long),
445 unsigned long ea, struct pt_regs *regs)
447 if (!address_ok(regs, ea & ~0xfUL, 16))
448 return -EFAULT;
449 return (*func)(rn, ea);
451 #endif /* CONFIG_ALTIVEC */
453 #ifdef CONFIG_VSX
454 static nokprobe_inline int do_vsx_load(int rn, int (*func)(int, unsigned long),
455 unsigned long ea, struct pt_regs *regs)
457 int err;
458 unsigned long val[2];
460 if (!address_ok(regs, ea, 16))
461 return -EFAULT;
462 if ((ea & 3) == 0)
463 return (*func)(rn, ea);
464 err = read_mem_unaligned(&val[0], ea, 8, regs);
465 if (!err)
466 err = read_mem_unaligned(&val[1], ea + 8, 8, regs);
467 if (!err)
468 err = (*func)(rn, (unsigned long) &val[0]);
469 return err;
472 static nokprobe_inline int do_vsx_store(int rn, int (*func)(int, unsigned long),
473 unsigned long ea, struct pt_regs *regs)
475 int err;
476 unsigned long val[2];
478 if (!address_ok(regs, ea, 16))
479 return -EFAULT;
480 if ((ea & 3) == 0)
481 return (*func)(rn, ea);
482 err = (*func)(rn, (unsigned long) &val[0]);
483 if (err)
484 return err;
485 err = write_mem_unaligned(val[0], ea, 8, regs);
486 if (!err)
487 err = write_mem_unaligned(val[1], ea + 8, 8, regs);
488 return err;
490 #endif /* CONFIG_VSX */
492 #define __put_user_asmx(x, addr, err, op, cr) \
493 __asm__ __volatile__( \
494 "1: " op " %2,0,%3\n" \
495 " mfcr %1\n" \
496 "2:\n" \
497 ".section .fixup,\"ax\"\n" \
498 "3: li %0,%4\n" \
499 " b 2b\n" \
500 ".previous\n" \
501 EX_TABLE(1b, 3b) \
502 : "=r" (err), "=r" (cr) \
503 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err))
505 #define __get_user_asmx(x, addr, err, op) \
506 __asm__ __volatile__( \
507 "1: "op" %1,0,%2\n" \
508 "2:\n" \
509 ".section .fixup,\"ax\"\n" \
510 "3: li %0,%3\n" \
511 " b 2b\n" \
512 ".previous\n" \
513 EX_TABLE(1b, 3b) \
514 : "=r" (err), "=r" (x) \
515 : "r" (addr), "i" (-EFAULT), "0" (err))
517 #define __cacheop_user_asmx(addr, err, op) \
518 __asm__ __volatile__( \
519 "1: "op" 0,%1\n" \
520 "2:\n" \
521 ".section .fixup,\"ax\"\n" \
522 "3: li %0,%3\n" \
523 " b 2b\n" \
524 ".previous\n" \
525 EX_TABLE(1b, 3b) \
526 : "=r" (err) \
527 : "r" (addr), "i" (-EFAULT), "0" (err))
529 static nokprobe_inline void set_cr0(struct pt_regs *regs, int rd)
531 long val = regs->gpr[rd];
533 regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
534 #ifdef __powerpc64__
535 if (!(regs->msr & MSR_64BIT))
536 val = (int) val;
537 #endif
538 if (val < 0)
539 regs->ccr |= 0x80000000;
540 else if (val > 0)
541 regs->ccr |= 0x40000000;
542 else
543 regs->ccr |= 0x20000000;
546 static nokprobe_inline void add_with_carry(struct pt_regs *regs, int rd,
547 unsigned long val1, unsigned long val2,
548 unsigned long carry_in)
550 unsigned long val = val1 + val2;
552 if (carry_in)
553 ++val;
554 regs->gpr[rd] = val;
555 #ifdef __powerpc64__
556 if (!(regs->msr & MSR_64BIT)) {
557 val = (unsigned int) val;
558 val1 = (unsigned int) val1;
560 #endif
561 if (val < val1 || (carry_in && val == val1))
562 regs->xer |= XER_CA;
563 else
564 regs->xer &= ~XER_CA;
567 static nokprobe_inline void do_cmp_signed(struct pt_regs *regs, long v1, long v2,
568 int crfld)
570 unsigned int crval, shift;
572 crval = (regs->xer >> 31) & 1; /* get SO bit */
573 if (v1 < v2)
574 crval |= 8;
575 else if (v1 > v2)
576 crval |= 4;
577 else
578 crval |= 2;
579 shift = (7 - crfld) * 4;
580 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
583 static nokprobe_inline void do_cmp_unsigned(struct pt_regs *regs, unsigned long v1,
584 unsigned long v2, int crfld)
586 unsigned int crval, shift;
588 crval = (regs->xer >> 31) & 1; /* get SO bit */
589 if (v1 < v2)
590 crval |= 8;
591 else if (v1 > v2)
592 crval |= 4;
593 else
594 crval |= 2;
595 shift = (7 - crfld) * 4;
596 regs->ccr = (regs->ccr & ~(0xf << shift)) | (crval << shift);
599 static nokprobe_inline int trap_compare(long v1, long v2)
601 int ret = 0;
603 if (v1 < v2)
604 ret |= 0x10;
605 else if (v1 > v2)
606 ret |= 0x08;
607 else
608 ret |= 0x04;
609 if ((unsigned long)v1 < (unsigned long)v2)
610 ret |= 0x02;
611 else if ((unsigned long)v1 > (unsigned long)v2)
612 ret |= 0x01;
613 return ret;
617 * Elements of 32-bit rotate and mask instructions.
619 #define MASK32(mb, me) ((0xffffffffUL >> (mb)) + \
620 ((signed long)-0x80000000L >> (me)) + ((me) >= (mb)))
621 #ifdef __powerpc64__
622 #define MASK64_L(mb) (~0UL >> (mb))
623 #define MASK64_R(me) ((signed long)-0x8000000000000000L >> (me))
624 #define MASK64(mb, me) (MASK64_L(mb) + MASK64_R(me) + ((me) >= (mb)))
625 #define DATA32(x) (((x) & 0xffffffffUL) | (((x) & 0xffffffffUL) << 32))
626 #else
627 #define DATA32(x) (x)
628 #endif
629 #define ROTATE(x, n) ((n) ? (((x) << (n)) | ((x) >> (8 * sizeof(long) - (n)))) : (x))
632 * Decode an instruction, and execute it if that can be done just by
633 * modifying *regs (i.e. integer arithmetic and logical instructions,
634 * branches, and barrier instructions).
635 * Returns 1 if the instruction has been executed, or 0 if not.
636 * Sets *op to indicate what the instruction does.
638 int analyse_instr(struct instruction_op *op, struct pt_regs *regs,
639 unsigned int instr)
641 unsigned int opcode, ra, rb, rd, spr, u;
642 unsigned long int imm;
643 unsigned long int val, val2;
644 unsigned int mb, me, sh;
645 long ival;
647 op->type = COMPUTE;
649 opcode = instr >> 26;
650 switch (opcode) {
651 case 16: /* bc */
652 op->type = BRANCH;
653 imm = (signed short)(instr & 0xfffc);
654 if ((instr & 2) == 0)
655 imm += regs->nip;
656 regs->nip += 4;
657 regs->nip = truncate_if_32bit(regs->msr, regs->nip);
658 if (instr & 1)
659 regs->link = regs->nip;
660 if (branch_taken(instr, regs))
661 regs->nip = truncate_if_32bit(regs->msr, imm);
662 return 1;
663 #ifdef CONFIG_PPC64
664 case 17: /* sc */
665 if ((instr & 0xfe2) == 2)
666 op->type = SYSCALL;
667 else
668 op->type = UNKNOWN;
669 return 0;
670 #endif
671 case 18: /* b */
672 op->type = BRANCH;
673 imm = instr & 0x03fffffc;
674 if (imm & 0x02000000)
675 imm -= 0x04000000;
676 if ((instr & 2) == 0)
677 imm += regs->nip;
678 if (instr & 1)
679 regs->link = truncate_if_32bit(regs->msr, regs->nip + 4);
680 imm = truncate_if_32bit(regs->msr, imm);
681 regs->nip = imm;
682 return 1;
683 case 19:
684 switch ((instr >> 1) & 0x3ff) {
685 case 0: /* mcrf */
686 rd = 7 - ((instr >> 23) & 0x7);
687 ra = 7 - ((instr >> 18) & 0x7);
688 rd *= 4;
689 ra *= 4;
690 val = (regs->ccr >> ra) & 0xf;
691 regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
692 goto instr_done;
694 case 16: /* bclr */
695 case 528: /* bcctr */
696 op->type = BRANCH;
697 imm = (instr & 0x400)? regs->ctr: regs->link;
698 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
699 imm = truncate_if_32bit(regs->msr, imm);
700 if (instr & 1)
701 regs->link = regs->nip;
702 if (branch_taken(instr, regs))
703 regs->nip = imm;
704 return 1;
706 case 18: /* rfid, scary */
707 if (regs->msr & MSR_PR)
708 goto priv;
709 op->type = RFI;
710 return 0;
712 case 150: /* isync */
713 op->type = BARRIER;
714 isync();
715 goto instr_done;
717 case 33: /* crnor */
718 case 129: /* crandc */
719 case 193: /* crxor */
720 case 225: /* crnand */
721 case 257: /* crand */
722 case 289: /* creqv */
723 case 417: /* crorc */
724 case 449: /* cror */
725 ra = (instr >> 16) & 0x1f;
726 rb = (instr >> 11) & 0x1f;
727 rd = (instr >> 21) & 0x1f;
728 ra = (regs->ccr >> (31 - ra)) & 1;
729 rb = (regs->ccr >> (31 - rb)) & 1;
730 val = (instr >> (6 + ra * 2 + rb)) & 1;
731 regs->ccr = (regs->ccr & ~(1UL << (31 - rd))) |
732 (val << (31 - rd));
733 goto instr_done;
735 break;
736 case 31:
737 switch ((instr >> 1) & 0x3ff) {
738 case 598: /* sync */
739 op->type = BARRIER;
740 #ifdef __powerpc64__
741 switch ((instr >> 21) & 3) {
742 case 1: /* lwsync */
743 asm volatile("lwsync" : : : "memory");
744 goto instr_done;
745 case 2: /* ptesync */
746 asm volatile("ptesync" : : : "memory");
747 goto instr_done;
749 #endif
750 mb();
751 goto instr_done;
753 case 854: /* eieio */
754 op->type = BARRIER;
755 eieio();
756 goto instr_done;
758 break;
761 /* Following cases refer to regs->gpr[], so we need all regs */
762 if (!FULL_REGS(regs))
763 return 0;
765 rd = (instr >> 21) & 0x1f;
766 ra = (instr >> 16) & 0x1f;
767 rb = (instr >> 11) & 0x1f;
769 switch (opcode) {
770 #ifdef __powerpc64__
771 case 2: /* tdi */
772 if (rd & trap_compare(regs->gpr[ra], (short) instr))
773 goto trap;
774 goto instr_done;
775 #endif
776 case 3: /* twi */
777 if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
778 goto trap;
779 goto instr_done;
781 case 7: /* mulli */
782 regs->gpr[rd] = regs->gpr[ra] * (short) instr;
783 goto instr_done;
785 case 8: /* subfic */
786 imm = (short) instr;
787 add_with_carry(regs, rd, ~regs->gpr[ra], imm, 1);
788 goto instr_done;
790 case 10: /* cmpli */
791 imm = (unsigned short) instr;
792 val = regs->gpr[ra];
793 #ifdef __powerpc64__
794 if ((rd & 1) == 0)
795 val = (unsigned int) val;
796 #endif
797 do_cmp_unsigned(regs, val, imm, rd >> 2);
798 goto instr_done;
800 case 11: /* cmpi */
801 imm = (short) instr;
802 val = regs->gpr[ra];
803 #ifdef __powerpc64__
804 if ((rd & 1) == 0)
805 val = (int) val;
806 #endif
807 do_cmp_signed(regs, val, imm, rd >> 2);
808 goto instr_done;
810 case 12: /* addic */
811 imm = (short) instr;
812 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
813 goto instr_done;
815 case 13: /* addic. */
816 imm = (short) instr;
817 add_with_carry(regs, rd, regs->gpr[ra], imm, 0);
818 set_cr0(regs, rd);
819 goto instr_done;
821 case 14: /* addi */
822 imm = (short) instr;
823 if (ra)
824 imm += regs->gpr[ra];
825 regs->gpr[rd] = imm;
826 goto instr_done;
828 case 15: /* addis */
829 imm = ((short) instr) << 16;
830 if (ra)
831 imm += regs->gpr[ra];
832 regs->gpr[rd] = imm;
833 goto instr_done;
835 case 20: /* rlwimi */
836 mb = (instr >> 6) & 0x1f;
837 me = (instr >> 1) & 0x1f;
838 val = DATA32(regs->gpr[rd]);
839 imm = MASK32(mb, me);
840 regs->gpr[ra] = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
841 goto logical_done;
843 case 21: /* rlwinm */
844 mb = (instr >> 6) & 0x1f;
845 me = (instr >> 1) & 0x1f;
846 val = DATA32(regs->gpr[rd]);
847 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
848 goto logical_done;
850 case 23: /* rlwnm */
851 mb = (instr >> 6) & 0x1f;
852 me = (instr >> 1) & 0x1f;
853 rb = regs->gpr[rb] & 0x1f;
854 val = DATA32(regs->gpr[rd]);
855 regs->gpr[ra] = ROTATE(val, rb) & MASK32(mb, me);
856 goto logical_done;
858 case 24: /* ori */
859 imm = (unsigned short) instr;
860 regs->gpr[ra] = regs->gpr[rd] | imm;
861 goto instr_done;
863 case 25: /* oris */
864 imm = (unsigned short) instr;
865 regs->gpr[ra] = regs->gpr[rd] | (imm << 16);
866 goto instr_done;
868 case 26: /* xori */
869 imm = (unsigned short) instr;
870 regs->gpr[ra] = regs->gpr[rd] ^ imm;
871 goto instr_done;
873 case 27: /* xoris */
874 imm = (unsigned short) instr;
875 regs->gpr[ra] = regs->gpr[rd] ^ (imm << 16);
876 goto instr_done;
878 case 28: /* andi. */
879 imm = (unsigned short) instr;
880 regs->gpr[ra] = regs->gpr[rd] & imm;
881 set_cr0(regs, ra);
882 goto instr_done;
884 case 29: /* andis. */
885 imm = (unsigned short) instr;
886 regs->gpr[ra] = regs->gpr[rd] & (imm << 16);
887 set_cr0(regs, ra);
888 goto instr_done;
890 #ifdef __powerpc64__
891 case 30: /* rld* */
892 mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
893 val = regs->gpr[rd];
894 if ((instr & 0x10) == 0) {
895 sh = rb | ((instr & 2) << 4);
896 val = ROTATE(val, sh);
897 switch ((instr >> 2) & 3) {
898 case 0: /* rldicl */
899 regs->gpr[ra] = val & MASK64_L(mb);
900 goto logical_done;
901 case 1: /* rldicr */
902 regs->gpr[ra] = val & MASK64_R(mb);
903 goto logical_done;
904 case 2: /* rldic */
905 regs->gpr[ra] = val & MASK64(mb, 63 - sh);
906 goto logical_done;
907 case 3: /* rldimi */
908 imm = MASK64(mb, 63 - sh);
909 regs->gpr[ra] = (regs->gpr[ra] & ~imm) |
910 (val & imm);
911 goto logical_done;
913 } else {
914 sh = regs->gpr[rb] & 0x3f;
915 val = ROTATE(val, sh);
916 switch ((instr >> 1) & 7) {
917 case 0: /* rldcl */
918 regs->gpr[ra] = val & MASK64_L(mb);
919 goto logical_done;
920 case 1: /* rldcr */
921 regs->gpr[ra] = val & MASK64_R(mb);
922 goto logical_done;
925 #endif
926 break; /* illegal instruction */
928 case 31:
929 switch ((instr >> 1) & 0x3ff) {
930 case 4: /* tw */
931 if (rd == 0x1f ||
932 (rd & trap_compare((int)regs->gpr[ra],
933 (int)regs->gpr[rb])))
934 goto trap;
935 goto instr_done;
936 #ifdef __powerpc64__
937 case 68: /* td */
938 if (rd & trap_compare(regs->gpr[ra], regs->gpr[rb]))
939 goto trap;
940 goto instr_done;
941 #endif
942 case 83: /* mfmsr */
943 if (regs->msr & MSR_PR)
944 goto priv;
945 op->type = MFMSR;
946 op->reg = rd;
947 return 0;
948 case 146: /* mtmsr */
949 if (regs->msr & MSR_PR)
950 goto priv;
951 op->type = MTMSR;
952 op->reg = rd;
953 op->val = 0xffffffff & ~(MSR_ME | MSR_LE);
954 return 0;
955 #ifdef CONFIG_PPC64
956 case 178: /* mtmsrd */
957 if (regs->msr & MSR_PR)
958 goto priv;
959 op->type = MTMSR;
960 op->reg = rd;
961 /* only MSR_EE and MSR_RI get changed if bit 15 set */
962 /* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
963 imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
964 op->val = imm;
965 return 0;
966 #endif
968 case 19: /* mfcr */
969 if ((instr >> 20) & 1) {
970 imm = 0xf0000000UL;
971 for (sh = 0; sh < 8; ++sh) {
972 if (instr & (0x80000 >> sh)) {
973 regs->gpr[rd] = regs->ccr & imm;
974 break;
976 imm >>= 4;
979 goto instr_done;
982 regs->gpr[rd] = regs->ccr;
983 regs->gpr[rd] &= 0xffffffffUL;
984 goto instr_done;
986 case 144: /* mtcrf */
987 imm = 0xf0000000UL;
988 val = regs->gpr[rd];
989 for (sh = 0; sh < 8; ++sh) {
990 if (instr & (0x80000 >> sh))
991 regs->ccr = (regs->ccr & ~imm) |
992 (val & imm);
993 imm >>= 4;
995 goto instr_done;
997 case 339: /* mfspr */
998 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
999 switch (spr) {
1000 case SPRN_XER: /* mfxer */
1001 regs->gpr[rd] = regs->xer;
1002 regs->gpr[rd] &= 0xffffffffUL;
1003 goto instr_done;
1004 case SPRN_LR: /* mflr */
1005 regs->gpr[rd] = regs->link;
1006 goto instr_done;
1007 case SPRN_CTR: /* mfctr */
1008 regs->gpr[rd] = regs->ctr;
1009 goto instr_done;
1010 default:
1011 op->type = MFSPR;
1012 op->reg = rd;
1013 op->spr = spr;
1014 return 0;
1016 break;
1018 case 467: /* mtspr */
1019 spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
1020 switch (spr) {
1021 case SPRN_XER: /* mtxer */
1022 regs->xer = (regs->gpr[rd] & 0xffffffffUL);
1023 goto instr_done;
1024 case SPRN_LR: /* mtlr */
1025 regs->link = regs->gpr[rd];
1026 goto instr_done;
1027 case SPRN_CTR: /* mtctr */
1028 regs->ctr = regs->gpr[rd];
1029 goto instr_done;
1030 default:
1031 op->type = MTSPR;
1032 op->val = regs->gpr[rd];
1033 op->spr = spr;
1034 return 0;
1036 break;
1039 * Compare instructions
1041 case 0: /* cmp */
1042 val = regs->gpr[ra];
1043 val2 = regs->gpr[rb];
1044 #ifdef __powerpc64__
1045 if ((rd & 1) == 0) {
1046 /* word (32-bit) compare */
1047 val = (int) val;
1048 val2 = (int) val2;
1050 #endif
1051 do_cmp_signed(regs, val, val2, rd >> 2);
1052 goto instr_done;
1054 case 32: /* cmpl */
1055 val = regs->gpr[ra];
1056 val2 = regs->gpr[rb];
1057 #ifdef __powerpc64__
1058 if ((rd & 1) == 0) {
1059 /* word (32-bit) compare */
1060 val = (unsigned int) val;
1061 val2 = (unsigned int) val2;
1063 #endif
1064 do_cmp_unsigned(regs, val, val2, rd >> 2);
1065 goto instr_done;
1068 * Arithmetic instructions
1070 case 8: /* subfc */
1071 add_with_carry(regs, rd, ~regs->gpr[ra],
1072 regs->gpr[rb], 1);
1073 goto arith_done;
1074 #ifdef __powerpc64__
1075 case 9: /* mulhdu */
1076 asm("mulhdu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1077 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1078 goto arith_done;
1079 #endif
1080 case 10: /* addc */
1081 add_with_carry(regs, rd, regs->gpr[ra],
1082 regs->gpr[rb], 0);
1083 goto arith_done;
1085 case 11: /* mulhwu */
1086 asm("mulhwu %0,%1,%2" : "=r" (regs->gpr[rd]) :
1087 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1088 goto arith_done;
1090 case 40: /* subf */
1091 regs->gpr[rd] = regs->gpr[rb] - regs->gpr[ra];
1092 goto arith_done;
1093 #ifdef __powerpc64__
1094 case 73: /* mulhd */
1095 asm("mulhd %0,%1,%2" : "=r" (regs->gpr[rd]) :
1096 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1097 goto arith_done;
1098 #endif
1099 case 75: /* mulhw */
1100 asm("mulhw %0,%1,%2" : "=r" (regs->gpr[rd]) :
1101 "r" (regs->gpr[ra]), "r" (regs->gpr[rb]));
1102 goto arith_done;
1104 case 104: /* neg */
1105 regs->gpr[rd] = -regs->gpr[ra];
1106 goto arith_done;
1108 case 136: /* subfe */
1109 add_with_carry(regs, rd, ~regs->gpr[ra], regs->gpr[rb],
1110 regs->xer & XER_CA);
1111 goto arith_done;
1113 case 138: /* adde */
1114 add_with_carry(regs, rd, regs->gpr[ra], regs->gpr[rb],
1115 regs->xer & XER_CA);
1116 goto arith_done;
1118 case 200: /* subfze */
1119 add_with_carry(regs, rd, ~regs->gpr[ra], 0L,
1120 regs->xer & XER_CA);
1121 goto arith_done;
1123 case 202: /* addze */
1124 add_with_carry(regs, rd, regs->gpr[ra], 0L,
1125 regs->xer & XER_CA);
1126 goto arith_done;
1128 case 232: /* subfme */
1129 add_with_carry(regs, rd, ~regs->gpr[ra], -1L,
1130 regs->xer & XER_CA);
1131 goto arith_done;
1132 #ifdef __powerpc64__
1133 case 233: /* mulld */
1134 regs->gpr[rd] = regs->gpr[ra] * regs->gpr[rb];
1135 goto arith_done;
1136 #endif
1137 case 234: /* addme */
1138 add_with_carry(regs, rd, regs->gpr[ra], -1L,
1139 regs->xer & XER_CA);
1140 goto arith_done;
1142 case 235: /* mullw */
1143 regs->gpr[rd] = (unsigned int) regs->gpr[ra] *
1144 (unsigned int) regs->gpr[rb];
1145 goto arith_done;
1147 case 266: /* add */
1148 regs->gpr[rd] = regs->gpr[ra] + regs->gpr[rb];
1149 goto arith_done;
1150 #ifdef __powerpc64__
1151 case 457: /* divdu */
1152 regs->gpr[rd] = regs->gpr[ra] / regs->gpr[rb];
1153 goto arith_done;
1154 #endif
1155 case 459: /* divwu */
1156 regs->gpr[rd] = (unsigned int) regs->gpr[ra] /
1157 (unsigned int) regs->gpr[rb];
1158 goto arith_done;
1159 #ifdef __powerpc64__
1160 case 489: /* divd */
1161 regs->gpr[rd] = (long int) regs->gpr[ra] /
1162 (long int) regs->gpr[rb];
1163 goto arith_done;
1164 #endif
1165 case 491: /* divw */
1166 regs->gpr[rd] = (int) regs->gpr[ra] /
1167 (int) regs->gpr[rb];
1168 goto arith_done;
1172 * Logical instructions
1174 case 26: /* cntlzw */
1175 asm("cntlzw %0,%1" : "=r" (regs->gpr[ra]) :
1176 "r" (regs->gpr[rd]));
1177 goto logical_done;
1178 #ifdef __powerpc64__
1179 case 58: /* cntlzd */
1180 asm("cntlzd %0,%1" : "=r" (regs->gpr[ra]) :
1181 "r" (regs->gpr[rd]));
1182 goto logical_done;
1183 #endif
1184 case 28: /* and */
1185 regs->gpr[ra] = regs->gpr[rd] & regs->gpr[rb];
1186 goto logical_done;
1188 case 60: /* andc */
1189 regs->gpr[ra] = regs->gpr[rd] & ~regs->gpr[rb];
1190 goto logical_done;
1192 case 124: /* nor */
1193 regs->gpr[ra] = ~(regs->gpr[rd] | regs->gpr[rb]);
1194 goto logical_done;
1196 case 284: /* xor */
1197 regs->gpr[ra] = ~(regs->gpr[rd] ^ regs->gpr[rb]);
1198 goto logical_done;
1200 case 316: /* xor */
1201 regs->gpr[ra] = regs->gpr[rd] ^ regs->gpr[rb];
1202 goto logical_done;
1204 case 412: /* orc */
1205 regs->gpr[ra] = regs->gpr[rd] | ~regs->gpr[rb];
1206 goto logical_done;
1208 case 444: /* or */
1209 regs->gpr[ra] = regs->gpr[rd] | regs->gpr[rb];
1210 goto logical_done;
1212 case 476: /* nand */
1213 regs->gpr[ra] = ~(regs->gpr[rd] & regs->gpr[rb]);
1214 goto logical_done;
1216 case 922: /* extsh */
1217 regs->gpr[ra] = (signed short) regs->gpr[rd];
1218 goto logical_done;
1220 case 954: /* extsb */
1221 regs->gpr[ra] = (signed char) regs->gpr[rd];
1222 goto logical_done;
1223 #ifdef __powerpc64__
1224 case 986: /* extsw */
1225 regs->gpr[ra] = (signed int) regs->gpr[rd];
1226 goto logical_done;
1227 #endif
1230 * Shift instructions
1232 case 24: /* slw */
1233 sh = regs->gpr[rb] & 0x3f;
1234 if (sh < 32)
1235 regs->gpr[ra] = (regs->gpr[rd] << sh) & 0xffffffffUL;
1236 else
1237 regs->gpr[ra] = 0;
1238 goto logical_done;
1240 case 536: /* srw */
1241 sh = regs->gpr[rb] & 0x3f;
1242 if (sh < 32)
1243 regs->gpr[ra] = (regs->gpr[rd] & 0xffffffffUL) >> sh;
1244 else
1245 regs->gpr[ra] = 0;
1246 goto logical_done;
1248 case 792: /* sraw */
1249 sh = regs->gpr[rb] & 0x3f;
1250 ival = (signed int) regs->gpr[rd];
1251 regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
1252 if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
1253 regs->xer |= XER_CA;
1254 else
1255 regs->xer &= ~XER_CA;
1256 goto logical_done;
1258 case 824: /* srawi */
1259 sh = rb;
1260 ival = (signed int) regs->gpr[rd];
1261 regs->gpr[ra] = ival >> sh;
1262 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1263 regs->xer |= XER_CA;
1264 else
1265 regs->xer &= ~XER_CA;
1266 goto logical_done;
1268 #ifdef __powerpc64__
1269 case 27: /* sld */
1270 sh = regs->gpr[rb] & 0x7f;
1271 if (sh < 64)
1272 regs->gpr[ra] = regs->gpr[rd] << sh;
1273 else
1274 regs->gpr[ra] = 0;
1275 goto logical_done;
1277 case 539: /* srd */
1278 sh = regs->gpr[rb] & 0x7f;
1279 if (sh < 64)
1280 regs->gpr[ra] = regs->gpr[rd] >> sh;
1281 else
1282 regs->gpr[ra] = 0;
1283 goto logical_done;
1285 case 794: /* srad */
1286 sh = regs->gpr[rb] & 0x7f;
1287 ival = (signed long int) regs->gpr[rd];
1288 regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
1289 if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
1290 regs->xer |= XER_CA;
1291 else
1292 regs->xer &= ~XER_CA;
1293 goto logical_done;
1295 case 826: /* sradi with sh_5 = 0 */
1296 case 827: /* sradi with sh_5 = 1 */
1297 sh = rb | ((instr & 2) << 4);
1298 ival = (signed long int) regs->gpr[rd];
1299 regs->gpr[ra] = ival >> sh;
1300 if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
1301 regs->xer |= XER_CA;
1302 else
1303 regs->xer &= ~XER_CA;
1304 goto logical_done;
1305 #endif /* __powerpc64__ */
1308 * Cache instructions
1310 case 54: /* dcbst */
1311 op->type = MKOP(CACHEOP, DCBST, 0);
1312 op->ea = xform_ea(instr, regs);
1313 return 0;
1315 case 86: /* dcbf */
1316 op->type = MKOP(CACHEOP, DCBF, 0);
1317 op->ea = xform_ea(instr, regs);
1318 return 0;
1320 case 246: /* dcbtst */
1321 op->type = MKOP(CACHEOP, DCBTST, 0);
1322 op->ea = xform_ea(instr, regs);
1323 op->reg = rd;
1324 return 0;
1326 case 278: /* dcbt */
1327 op->type = MKOP(CACHEOP, DCBTST, 0);
1328 op->ea = xform_ea(instr, regs);
1329 op->reg = rd;
1330 return 0;
1332 case 982: /* icbi */
1333 op->type = MKOP(CACHEOP, ICBI, 0);
1334 op->ea = xform_ea(instr, regs);
1335 return 0;
1337 break;
1341 * Loads and stores.
1343 op->type = UNKNOWN;
1344 op->update_reg = ra;
1345 op->reg = rd;
1346 op->val = regs->gpr[rd];
1347 u = (instr >> 20) & UPDATE;
1349 switch (opcode) {
1350 case 31:
1351 u = instr & UPDATE;
1352 op->ea = xform_ea(instr, regs);
1353 switch ((instr >> 1) & 0x3ff) {
1354 case 20: /* lwarx */
1355 op->type = MKOP(LARX, 0, 4);
1356 break;
1358 case 150: /* stwcx. */
1359 op->type = MKOP(STCX, 0, 4);
1360 break;
1362 #ifdef __powerpc64__
1363 case 84: /* ldarx */
1364 op->type = MKOP(LARX, 0, 8);
1365 break;
1367 case 214: /* stdcx. */
1368 op->type = MKOP(STCX, 0, 8);
1369 break;
1371 case 21: /* ldx */
1372 case 53: /* ldux */
1373 op->type = MKOP(LOAD, u, 8);
1374 break;
1375 #endif
1377 case 23: /* lwzx */
1378 case 55: /* lwzux */
1379 op->type = MKOP(LOAD, u, 4);
1380 break;
1382 case 87: /* lbzx */
1383 case 119: /* lbzux */
1384 op->type = MKOP(LOAD, u, 1);
1385 break;
1387 #ifdef CONFIG_ALTIVEC
1388 case 103: /* lvx */
1389 case 359: /* lvxl */
1390 if (!(regs->msr & MSR_VEC))
1391 goto vecunavail;
1392 op->type = MKOP(LOAD_VMX, 0, 16);
1393 break;
1395 case 231: /* stvx */
1396 case 487: /* stvxl */
1397 if (!(regs->msr & MSR_VEC))
1398 goto vecunavail;
1399 op->type = MKOP(STORE_VMX, 0, 16);
1400 break;
1401 #endif /* CONFIG_ALTIVEC */
1403 #ifdef __powerpc64__
1404 case 149: /* stdx */
1405 case 181: /* stdux */
1406 op->type = MKOP(STORE, u, 8);
1407 break;
1408 #endif
1410 case 151: /* stwx */
1411 case 183: /* stwux */
1412 op->type = MKOP(STORE, u, 4);
1413 break;
1415 case 215: /* stbx */
1416 case 247: /* stbux */
1417 op->type = MKOP(STORE, u, 1);
1418 break;
1420 case 279: /* lhzx */
1421 case 311: /* lhzux */
1422 op->type = MKOP(LOAD, u, 2);
1423 break;
1425 #ifdef __powerpc64__
1426 case 341: /* lwax */
1427 case 373: /* lwaux */
1428 op->type = MKOP(LOAD, SIGNEXT | u, 4);
1429 break;
1430 #endif
1432 case 343: /* lhax */
1433 case 375: /* lhaux */
1434 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1435 break;
1437 case 407: /* sthx */
1438 case 439: /* sthux */
1439 op->type = MKOP(STORE, u, 2);
1440 break;
1442 #ifdef __powerpc64__
1443 case 532: /* ldbrx */
1444 op->type = MKOP(LOAD, BYTEREV, 8);
1445 break;
1447 #endif
1448 case 533: /* lswx */
1449 op->type = MKOP(LOAD_MULTI, 0, regs->xer & 0x7f);
1450 break;
1452 case 534: /* lwbrx */
1453 op->type = MKOP(LOAD, BYTEREV, 4);
1454 break;
1456 case 597: /* lswi */
1457 if (rb == 0)
1458 rb = 32; /* # bytes to load */
1459 op->type = MKOP(LOAD_MULTI, 0, rb);
1460 op->ea = 0;
1461 if (ra)
1462 op->ea = truncate_if_32bit(regs->msr,
1463 regs->gpr[ra]);
1464 break;
1466 #ifdef CONFIG_PPC_FPU
1467 case 535: /* lfsx */
1468 case 567: /* lfsux */
1469 if (!(regs->msr & MSR_FP))
1470 goto fpunavail;
1471 op->type = MKOP(LOAD_FP, u, 4);
1472 break;
1474 case 599: /* lfdx */
1475 case 631: /* lfdux */
1476 if (!(regs->msr & MSR_FP))
1477 goto fpunavail;
1478 op->type = MKOP(LOAD_FP, u, 8);
1479 break;
1481 case 663: /* stfsx */
1482 case 695: /* stfsux */
1483 if (!(regs->msr & MSR_FP))
1484 goto fpunavail;
1485 op->type = MKOP(STORE_FP, u, 4);
1486 break;
1488 case 727: /* stfdx */
1489 case 759: /* stfdux */
1490 if (!(regs->msr & MSR_FP))
1491 goto fpunavail;
1492 op->type = MKOP(STORE_FP, u, 8);
1493 break;
1494 #endif
1496 #ifdef __powerpc64__
1497 case 660: /* stdbrx */
1498 op->type = MKOP(STORE, BYTEREV, 8);
1499 op->val = byterev_8(regs->gpr[rd]);
1500 break;
1502 #endif
1503 case 661: /* stswx */
1504 op->type = MKOP(STORE_MULTI, 0, regs->xer & 0x7f);
1505 break;
1507 case 662: /* stwbrx */
1508 op->type = MKOP(STORE, BYTEREV, 4);
1509 op->val = byterev_4(regs->gpr[rd]);
1510 break;
1512 case 725:
1513 if (rb == 0)
1514 rb = 32; /* # bytes to store */
1515 op->type = MKOP(STORE_MULTI, 0, rb);
1516 op->ea = 0;
1517 if (ra)
1518 op->ea = truncate_if_32bit(regs->msr,
1519 regs->gpr[ra]);
1520 break;
1522 case 790: /* lhbrx */
1523 op->type = MKOP(LOAD, BYTEREV, 2);
1524 break;
1526 case 918: /* sthbrx */
1527 op->type = MKOP(STORE, BYTEREV, 2);
1528 op->val = byterev_2(regs->gpr[rd]);
1529 break;
1531 #ifdef CONFIG_VSX
1532 case 844: /* lxvd2x */
1533 case 876: /* lxvd2ux */
1534 if (!(regs->msr & MSR_VSX))
1535 goto vsxunavail;
1536 op->reg = rd | ((instr & 1) << 5);
1537 op->type = MKOP(LOAD_VSX, u, 16);
1538 break;
1540 case 972: /* stxvd2x */
1541 case 1004: /* stxvd2ux */
1542 if (!(regs->msr & MSR_VSX))
1543 goto vsxunavail;
1544 op->reg = rd | ((instr & 1) << 5);
1545 op->type = MKOP(STORE_VSX, u, 16);
1546 break;
1548 #endif /* CONFIG_VSX */
1550 break;
1552 case 32: /* lwz */
1553 case 33: /* lwzu */
1554 op->type = MKOP(LOAD, u, 4);
1555 op->ea = dform_ea(instr, regs);
1556 break;
1558 case 34: /* lbz */
1559 case 35: /* lbzu */
1560 op->type = MKOP(LOAD, u, 1);
1561 op->ea = dform_ea(instr, regs);
1562 break;
1564 case 36: /* stw */
1565 case 37: /* stwu */
1566 op->type = MKOP(STORE, u, 4);
1567 op->ea = dform_ea(instr, regs);
1568 break;
1570 case 38: /* stb */
1571 case 39: /* stbu */
1572 op->type = MKOP(STORE, u, 1);
1573 op->ea = dform_ea(instr, regs);
1574 break;
1576 case 40: /* lhz */
1577 case 41: /* lhzu */
1578 op->type = MKOP(LOAD, u, 2);
1579 op->ea = dform_ea(instr, regs);
1580 break;
1582 case 42: /* lha */
1583 case 43: /* lhau */
1584 op->type = MKOP(LOAD, SIGNEXT | u, 2);
1585 op->ea = dform_ea(instr, regs);
1586 break;
1588 case 44: /* sth */
1589 case 45: /* sthu */
1590 op->type = MKOP(STORE, u, 2);
1591 op->ea = dform_ea(instr, regs);
1592 break;
1594 case 46: /* lmw */
1595 if (ra >= rd)
1596 break; /* invalid form, ra in range to load */
1597 op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
1598 op->ea = dform_ea(instr, regs);
1599 break;
1601 case 47: /* stmw */
1602 op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
1603 op->ea = dform_ea(instr, regs);
1604 break;
1606 #ifdef CONFIG_PPC_FPU
1607 case 48: /* lfs */
1608 case 49: /* lfsu */
1609 if (!(regs->msr & MSR_FP))
1610 goto fpunavail;
1611 op->type = MKOP(LOAD_FP, u, 4);
1612 op->ea = dform_ea(instr, regs);
1613 break;
1615 case 50: /* lfd */
1616 case 51: /* lfdu */
1617 if (!(regs->msr & MSR_FP))
1618 goto fpunavail;
1619 op->type = MKOP(LOAD_FP, u, 8);
1620 op->ea = dform_ea(instr, regs);
1621 break;
1623 case 52: /* stfs */
1624 case 53: /* stfsu */
1625 if (!(regs->msr & MSR_FP))
1626 goto fpunavail;
1627 op->type = MKOP(STORE_FP, u, 4);
1628 op->ea = dform_ea(instr, regs);
1629 break;
1631 case 54: /* stfd */
1632 case 55: /* stfdu */
1633 if (!(regs->msr & MSR_FP))
1634 goto fpunavail;
1635 op->type = MKOP(STORE_FP, u, 8);
1636 op->ea = dform_ea(instr, regs);
1637 break;
1638 #endif
1640 #ifdef __powerpc64__
1641 case 58: /* ld[u], lwa */
1642 op->ea = dsform_ea(instr, regs);
1643 switch (instr & 3) {
1644 case 0: /* ld */
1645 op->type = MKOP(LOAD, 0, 8);
1646 break;
1647 case 1: /* ldu */
1648 op->type = MKOP(LOAD, UPDATE, 8);
1649 break;
1650 case 2: /* lwa */
1651 op->type = MKOP(LOAD, SIGNEXT, 4);
1652 break;
1654 break;
1656 case 62: /* std[u] */
1657 op->ea = dsform_ea(instr, regs);
1658 switch (instr & 3) {
1659 case 0: /* std */
1660 op->type = MKOP(STORE, 0, 8);
1661 break;
1662 case 1: /* stdu */
1663 op->type = MKOP(STORE, UPDATE, 8);
1664 break;
1666 break;
1667 #endif /* __powerpc64__ */
1670 return 0;
1672 logical_done:
1673 if (instr & 1)
1674 set_cr0(regs, ra);
1675 goto instr_done;
1677 arith_done:
1678 if (instr & 1)
1679 set_cr0(regs, rd);
1681 instr_done:
1682 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
1683 return 1;
1685 priv:
1686 op->type = INTERRUPT | 0x700;
1687 op->val = SRR1_PROGPRIV;
1688 return 0;
1690 trap:
1691 op->type = INTERRUPT | 0x700;
1692 op->val = SRR1_PROGTRAP;
1693 return 0;
1695 #ifdef CONFIG_PPC_FPU
1696 fpunavail:
1697 op->type = INTERRUPT | 0x800;
1698 return 0;
1699 #endif
1701 #ifdef CONFIG_ALTIVEC
1702 vecunavail:
1703 op->type = INTERRUPT | 0xf20;
1704 return 0;
1705 #endif
1707 #ifdef CONFIG_VSX
1708 vsxunavail:
1709 op->type = INTERRUPT | 0xf40;
1710 return 0;
1711 #endif
1713 EXPORT_SYMBOL_GPL(analyse_instr);
1714 NOKPROBE_SYMBOL(analyse_instr);
1717 * For PPC32 we always use stwu with r1 to change the stack pointer.
1718 * So this emulated store may corrupt the exception frame, now we
1719 * have to provide the exception frame trampoline, which is pushed
1720 * below the kprobed function stack. So we only update gpr[1] but
1721 * don't emulate the real store operation. We will do real store
1722 * operation safely in exception return code by checking this flag.
1724 static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
1726 #ifdef CONFIG_PPC32
1728 * Check if we will touch kernel stack overflow
1730 if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
1731 printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
1732 return -EINVAL;
1734 #endif /* CONFIG_PPC32 */
1736 * Check if we already set since that means we'll
1737 * lose the previous value.
1739 WARN_ON(test_thread_flag(TIF_EMULATE_STACK_STORE));
1740 set_thread_flag(TIF_EMULATE_STACK_STORE);
1741 return 0;
1744 static nokprobe_inline void do_signext(unsigned long *valp, int size)
1746 switch (size) {
1747 case 2:
1748 *valp = (signed short) *valp;
1749 break;
1750 case 4:
1751 *valp = (signed int) *valp;
1752 break;
1756 static nokprobe_inline void do_byterev(unsigned long *valp, int size)
1758 switch (size) {
1759 case 2:
1760 *valp = byterev_2(*valp);
1761 break;
1762 case 4:
1763 *valp = byterev_4(*valp);
1764 break;
1765 #ifdef __powerpc64__
1766 case 8:
1767 *valp = byterev_8(*valp);
1768 break;
1769 #endif
1774 * Emulate instructions that cause a transfer of control,
1775 * loads and stores, and a few other instructions.
1776 * Returns 1 if the step was emulated, 0 if not,
1777 * or -1 if the instruction is one that should not be stepped,
1778 * such as an rfid, or a mtmsrd that would clear MSR_RI.
1780 int emulate_step(struct pt_regs *regs, unsigned int instr)
1782 struct instruction_op op;
1783 int r, err, size;
1784 unsigned long val;
1785 unsigned int cr;
1786 int i, rd, nb;
1788 r = analyse_instr(&op, regs, instr);
1789 if (r != 0)
1790 return r;
1792 err = 0;
1793 size = GETSIZE(op.type);
1794 switch (op.type & INSTR_TYPE_MASK) {
1795 case CACHEOP:
1796 if (!address_ok(regs, op.ea, 8))
1797 return 0;
1798 switch (op.type & CACHEOP_MASK) {
1799 case DCBST:
1800 __cacheop_user_asmx(op.ea, err, "dcbst");
1801 break;
1802 case DCBF:
1803 __cacheop_user_asmx(op.ea, err, "dcbf");
1804 break;
1805 case DCBTST:
1806 if (op.reg == 0)
1807 prefetchw((void *) op.ea);
1808 break;
1809 case DCBT:
1810 if (op.reg == 0)
1811 prefetch((void *) op.ea);
1812 break;
1813 case ICBI:
1814 __cacheop_user_asmx(op.ea, err, "icbi");
1815 break;
1817 if (err)
1818 return 0;
1819 goto instr_done;
1821 case LARX:
1822 if (op.ea & (size - 1))
1823 break; /* can't handle misaligned */
1824 if (!address_ok(regs, op.ea, size))
1825 return 0;
1826 err = 0;
1827 switch (size) {
1828 case 4:
1829 __get_user_asmx(val, op.ea, err, "lwarx");
1830 break;
1831 #ifdef __powerpc64__
1832 case 8:
1833 __get_user_asmx(val, op.ea, err, "ldarx");
1834 break;
1835 #endif
1836 default:
1837 return 0;
1839 if (!err)
1840 regs->gpr[op.reg] = val;
1841 goto ldst_done;
1843 case STCX:
1844 if (op.ea & (size - 1))
1845 break; /* can't handle misaligned */
1846 if (!address_ok(regs, op.ea, size))
1847 return 0;
1848 err = 0;
1849 switch (size) {
1850 case 4:
1851 __put_user_asmx(op.val, op.ea, err, "stwcx.", cr);
1852 break;
1853 #ifdef __powerpc64__
1854 case 8:
1855 __put_user_asmx(op.val, op.ea, err, "stdcx.", cr);
1856 break;
1857 #endif
1858 default:
1859 return 0;
1861 if (!err)
1862 regs->ccr = (regs->ccr & 0x0fffffff) |
1863 (cr & 0xe0000000) |
1864 ((regs->xer >> 3) & 0x10000000);
1865 goto ldst_done;
1867 case LOAD:
1868 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
1869 if (!err) {
1870 if (op.type & SIGNEXT)
1871 do_signext(&regs->gpr[op.reg], size);
1872 if (op.type & BYTEREV)
1873 do_byterev(&regs->gpr[op.reg], size);
1875 goto ldst_done;
1877 #ifdef CONFIG_PPC_FPU
1878 case LOAD_FP:
1879 if (size == 4)
1880 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1881 else
1882 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
1883 goto ldst_done;
1884 #endif
1885 #ifdef CONFIG_ALTIVEC
1886 case LOAD_VMX:
1887 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1888 goto ldst_done;
1889 #endif
1890 #ifdef CONFIG_VSX
1891 case LOAD_VSX:
1892 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1893 goto ldst_done;
1894 #endif
1895 case LOAD_MULTI:
1896 if (regs->msr & MSR_LE)
1897 return 0;
1898 rd = op.reg;
1899 for (i = 0; i < size; i += 4) {
1900 nb = size - i;
1901 if (nb > 4)
1902 nb = 4;
1903 err = read_mem(&regs->gpr[rd], op.ea, nb, regs);
1904 if (err)
1905 return 0;
1906 if (nb < 4) /* left-justify last bytes */
1907 regs->gpr[rd] <<= 32 - 8 * nb;
1908 op.ea += 4;
1909 ++rd;
1911 goto instr_done;
1913 case STORE:
1914 if ((op.type & UPDATE) && size == sizeof(long) &&
1915 op.reg == 1 && op.update_reg == 1 &&
1916 !(regs->msr & MSR_PR) &&
1917 op.ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) {
1918 err = handle_stack_update(op.ea, regs);
1919 goto ldst_done;
1921 err = write_mem(op.val, op.ea, size, regs);
1922 goto ldst_done;
1924 #ifdef CONFIG_PPC_FPU
1925 case STORE_FP:
1926 if (size == 4)
1927 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1928 else
1929 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
1930 goto ldst_done;
1931 #endif
1932 #ifdef CONFIG_ALTIVEC
1933 case STORE_VMX:
1934 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1935 goto ldst_done;
1936 #endif
1937 #ifdef CONFIG_VSX
1938 case STORE_VSX:
1939 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1940 goto ldst_done;
1941 #endif
1942 case STORE_MULTI:
1943 if (regs->msr & MSR_LE)
1944 return 0;
1945 rd = op.reg;
1946 for (i = 0; i < size; i += 4) {
1947 val = regs->gpr[rd];
1948 nb = size - i;
1949 if (nb > 4)
1950 nb = 4;
1951 else
1952 val >>= 32 - 8 * nb;
1953 err = write_mem(val, op.ea, nb, regs);
1954 if (err)
1955 return 0;
1956 op.ea += 4;
1957 ++rd;
1959 goto instr_done;
1961 case MFMSR:
1962 regs->gpr[op.reg] = regs->msr & MSR_MASK;
1963 goto instr_done;
1965 case MTMSR:
1966 val = regs->gpr[op.reg];
1967 if ((val & MSR_RI) == 0)
1968 /* can't step mtmsr[d] that would clear MSR_RI */
1969 return -1;
1970 /* here op.val is the mask of bits to change */
1971 regs->msr = (regs->msr & ~op.val) | (val & op.val);
1972 goto instr_done;
1974 #ifdef CONFIG_PPC64
1975 case SYSCALL: /* sc */
1977 * N.B. this uses knowledge about how the syscall
1978 * entry code works. If that is changed, this will
1979 * need to be changed also.
1981 if (regs->gpr[0] == 0x1ebe &&
1982 cpu_has_feature(CPU_FTR_REAL_LE)) {
1983 regs->msr ^= MSR_LE;
1984 goto instr_done;
1986 regs->gpr[9] = regs->gpr[13];
1987 regs->gpr[10] = MSR_KERNEL;
1988 regs->gpr[11] = regs->nip + 4;
1989 regs->gpr[12] = regs->msr & MSR_MASK;
1990 regs->gpr[13] = (unsigned long) get_paca();
1991 regs->nip = (unsigned long) &system_call_common;
1992 regs->msr = MSR_KERNEL;
1993 return 1;
1995 case RFI:
1996 return -1;
1997 #endif
1999 return 0;
2001 ldst_done:
2002 if (err)
2003 return 0;
2004 if (op.type & UPDATE)
2005 regs->gpr[op.update_reg] = op.ea;
2007 instr_done:
2008 regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
2009 return 1;
2011 NOKPROBE_SYMBOL(emulate_step);