2 * align.c - address exception handler for M32R
4 * Copyright (c) 2003 Hitoshi Yamamoto
7 #include <asm/ptrace.h>
8 #include <asm/uaccess.h>
10 static int get_reg(struct pt_regs
*regs
, int nr
)
15 val
= *(unsigned long *)(®s
->r0
+ nr
);
17 val
= *(unsigned long *)(®s
->r4
+ (nr
- 4));
19 val
= *(unsigned long *)(®s
->r7
+ (nr
- 7));
21 val
= *(unsigned long *)(®s
->fp
+ (nr
- 13));
26 static void set_reg(struct pt_regs
*regs
, int nr
, int val
)
29 *(unsigned long *)(®s
->r0
+ nr
) = val
;
31 *(unsigned long *)(®s
->r4
+ (nr
- 4)) = val
;
33 *(unsigned long *)(®s
->r7
+ (nr
- 7)) = val
;
35 *(unsigned long *)(®s
->fp
+ (nr
- 13)) = val
;
38 #define REG1(insn) (((insn) & 0x0f00) >> 8)
39 #define REG2(insn) ((insn) & 0x000f)
43 #define ISA_LD1 0x20c0 /* ld Rdest, @Rsrc */
44 #define ISA_LD2 0x20e0 /* ld Rdest, @Rsrc+ */
45 #define ISA_LDH 0x20a0 /* ldh Rdest, @Rsrc */
46 #define ISA_LDUH 0x20b0 /* lduh Rdest, @Rsrc */
47 #define ISA_ST1 0x2040 /* st Rsrc1, @Rsrc2 */
48 #define ISA_ST2 0x2060 /* st Rsrc1, @+Rsrc2 */
49 #define ISA_ST3 0x2070 /* st Rsrc1, @-Rsrc2 */
50 #define ISA_STH1 0x2020 /* sth Rsrc1, @Rsrc2 */
51 #define ISA_STH2 0x2030 /* sth Rsrc1, @Rsrc2+ */
53 #ifdef CONFIG_ISA_DUAL_ISSUE
56 #define ISA_ADD 0x00a0 /* add Rdest, Rsrc */
57 #define ISA_ADDI 0x4000 /* addi Rdest, #imm8 */
58 #define ISA_ADDX 0x0090 /* addx Rdest, Rsrc */
59 #define ISA_AND 0x00c0 /* and Rdest, Rsrc */
60 #define ISA_CMP 0x0040 /* cmp Rsrc1, Rsrc2 */
61 #define ISA_CMPEQ 0x0060 /* cmpeq Rsrc1, Rsrc2 */
62 #define ISA_CMPU 0x0050 /* cmpu Rsrc1, Rsrc2 */
63 #define ISA_CMPZ 0x0070 /* cmpz Rsrc */
64 #define ISA_LDI 0x6000 /* ldi Rdest, #imm8 */
65 #define ISA_MV 0x1080 /* mv Rdest, Rsrc */
66 #define ISA_NEG 0x0030 /* neg Rdest, Rsrc */
67 #define ISA_NOP 0x7000 /* nop */
68 #define ISA_NOT 0x00b0 /* not Rdest, Rsrc */
69 #define ISA_OR 0x00e0 /* or Rdest, Rsrc */
70 #define ISA_SUB 0x0020 /* sub Rdest, Rsrc */
71 #define ISA_SUBX 0x0010 /* subx Rdest, Rsrc */
72 #define ISA_XOR 0x00d0 /* xor Rdest, Rsrc */
75 #define ISA_MUL 0x1060 /* mul Rdest, Rsrc */
76 #define ISA_MULLO_A0 0x3010 /* mullo Rsrc1, Rsrc2, A0 */
77 #define ISA_MULLO_A1 0x3090 /* mullo Rsrc1, Rsrc2, A1 */
78 #define ISA_MVFACMI_A0 0x50f2 /* mvfacmi Rdest, A0 */
79 #define ISA_MVFACMI_A1 0x50f6 /* mvfacmi Rdest, A1 */
81 static int emu_addi(unsigned short insn
, struct pt_regs
*regs
)
83 char imm
= (char)(insn
& 0xff);
84 int dest
= REG1(insn
);
87 val
= get_reg(regs
, dest
);
89 set_reg(regs
, dest
, val
);
94 static int emu_ldi(unsigned short insn
, struct pt_regs
*regs
)
96 char imm
= (char)(insn
& 0xff);
98 set_reg(regs
, REG1(insn
), (int)imm
);
103 static int emu_add(unsigned short insn
, struct pt_regs
*regs
)
105 int dest
= REG1(insn
);
106 int src
= REG2(insn
);
109 val
= get_reg(regs
, dest
);
110 val
+= get_reg(regs
, src
);
111 set_reg(regs
, dest
, val
);
116 static int emu_addx(unsigned short insn
, struct pt_regs
*regs
)
118 int dest
= REG1(insn
);
119 unsigned int val
, tmp
;
121 val
= regs
->psw
& PSW_BC
? 1 : 0;
122 tmp
= get_reg(regs
, dest
);
124 val
+= (unsigned int)get_reg(regs
, REG2(insn
));
125 set_reg(regs
, dest
, val
);
131 regs
->psw
&= ~(PSW_BC
);
136 static int emu_and(unsigned short insn
, struct pt_regs
*regs
)
138 int dest
= REG1(insn
);
141 val
= get_reg(regs
, dest
);
142 val
&= get_reg(regs
, REG2(insn
));
143 set_reg(regs
, dest
, val
);
148 static int emu_cmp(unsigned short insn
, struct pt_regs
*regs
)
150 if (get_reg(regs
, REG1(insn
)) < get_reg(regs
, REG2(insn
)))
153 regs
->psw
&= ~(PSW_BC
);
158 static int emu_cmpeq(unsigned short insn
, struct pt_regs
*regs
)
160 if (get_reg(regs
, REG1(insn
)) == get_reg(regs
, REG2(insn
)))
163 regs
->psw
&= ~(PSW_BC
);
168 static int emu_cmpu(unsigned short insn
, struct pt_regs
*regs
)
170 if ((unsigned int)get_reg(regs
, REG1(insn
))
171 < (unsigned int)get_reg(regs
, REG2(insn
)))
174 regs
->psw
&= ~(PSW_BC
);
179 static int emu_cmpz(unsigned short insn
, struct pt_regs
*regs
)
181 if (!get_reg(regs
, REG2(insn
)))
184 regs
->psw
&= ~(PSW_BC
);
189 static int emu_mv(unsigned short insn
, struct pt_regs
*regs
)
193 val
= get_reg(regs
, REG2(insn
));
194 set_reg(regs
, REG1(insn
), val
);
199 static int emu_neg(unsigned short insn
, struct pt_regs
*regs
)
203 val
= get_reg(regs
, REG2(insn
));
204 set_reg(regs
, REG1(insn
), 0 - val
);
209 static int emu_not(unsigned short insn
, struct pt_regs
*regs
)
213 val
= get_reg(regs
, REG2(insn
));
214 set_reg(regs
, REG1(insn
), ~val
);
219 static int emu_or(unsigned short insn
, struct pt_regs
*regs
)
221 int dest
= REG1(insn
);
224 val
= get_reg(regs
, dest
);
225 val
|= get_reg(regs
, REG2(insn
));
226 set_reg(regs
, dest
, val
);
231 static int emu_sub(unsigned short insn
, struct pt_regs
*regs
)
233 int dest
= REG1(insn
);
236 val
= get_reg(regs
, dest
);
237 val
-= get_reg(regs
, REG2(insn
));
238 set_reg(regs
, dest
, val
);
243 static int emu_subx(unsigned short insn
, struct pt_regs
*regs
)
245 int dest
= REG1(insn
);
246 unsigned int val
, tmp
;
248 val
= tmp
= get_reg(regs
, dest
);
249 val
-= (unsigned int)get_reg(regs
, REG2(insn
));
250 val
-= regs
->psw
& PSW_BC
? 1 : 0;
251 set_reg(regs
, dest
, val
);
257 regs
->psw
&= ~(PSW_BC
);
262 static int emu_xor(unsigned short insn
, struct pt_regs
*regs
)
264 int dest
= REG1(insn
);
267 val
= (unsigned int)get_reg(regs
, dest
);
268 val
^= (unsigned int)get_reg(regs
, REG2(insn
));
269 set_reg(regs
, dest
, val
);
274 static int emu_mul(unsigned short insn
, struct pt_regs
*regs
)
276 int dest
= REG1(insn
);
279 reg1
= get_reg(regs
, dest
);
280 reg2
= get_reg(regs
, REG2(insn
));
282 __asm__
__volatile__ (
284 : "+r" (reg1
) : "r" (reg2
)
287 set_reg(regs
, dest
, reg1
);
292 static int emu_mullo_a0(unsigned short insn
, struct pt_regs
*regs
)
296 reg1
= get_reg(regs
, REG1(insn
));
297 reg2
= get_reg(regs
, REG2(insn
));
299 __asm__
__volatile__ (
300 "mullo %0, %1, a0; \n\t"
301 "mvfachi %0, a0; \n\t"
302 "mvfaclo %1, a0; \n\t"
303 : "+r" (reg1
), "+r" (reg2
)
312 static int emu_mullo_a1(unsigned short insn
, struct pt_regs
*regs
)
316 reg1
= get_reg(regs
, REG1(insn
));
317 reg2
= get_reg(regs
, REG2(insn
));
319 __asm__
__volatile__ (
320 "mullo %0, %1, a0; \n\t"
321 "mvfachi %0, a0; \n\t"
322 "mvfaclo %1, a0; \n\t"
323 : "+r" (reg1
), "+r" (reg2
)
332 static int emu_mvfacmi_a0(unsigned short insn
, struct pt_regs
*regs
)
336 val
= (regs
->acc0h
<< 16) | (regs
->acc0l
>> 16);
337 set_reg(regs
, REG1(insn
), (int)val
);
342 static int emu_mvfacmi_a1(unsigned short insn
, struct pt_regs
*regs
)
346 val
= (regs
->acc1h
<< 16) | (regs
->acc1l
>> 16);
347 set_reg(regs
, REG1(insn
), (int)val
);
352 static int emu_m32r2(unsigned short insn
, struct pt_regs
*regs
)
356 if ((insn
& 0x7fff) == ISA_NOP
) /* nop */
359 switch(insn
& 0x7000) {
360 case ISA_ADDI
: /* addi Rdest, #imm8 */
361 res
= emu_addi(insn
, regs
);
363 case ISA_LDI
: /* ldi Rdest, #imm8 */
364 res
= emu_ldi(insn
, regs
);
373 switch(insn
& 0x70f0) {
374 case ISA_ADD
: /* add Rdest, Rsrc */
375 res
= emu_add(insn
, regs
);
377 case ISA_ADDX
: /* addx Rdest, Rsrc */
378 res
= emu_addx(insn
, regs
);
380 case ISA_AND
: /* and Rdest, Rsrc */
381 res
= emu_and(insn
, regs
);
383 case ISA_CMP
: /* cmp Rsrc1, Rsrc2 */
384 res
= emu_cmp(insn
, regs
);
386 case ISA_CMPEQ
: /* cmpeq Rsrc1, Rsrc2 */
387 res
= emu_cmpeq(insn
, regs
);
389 case ISA_CMPU
: /* cmpu Rsrc1, Rsrc2 */
390 res
= emu_cmpu(insn
, regs
);
392 case ISA_CMPZ
: /* cmpz Rsrc */
393 res
= emu_cmpz(insn
, regs
);
395 case ISA_MV
: /* mv Rdest, Rsrc */
396 res
= emu_mv(insn
, regs
);
398 case ISA_NEG
: /* neg Rdest, Rsrc */
399 res
= emu_neg(insn
, regs
);
401 case ISA_NOT
: /* not Rdest, Rsrc */
402 res
= emu_not(insn
, regs
);
404 case ISA_OR
: /* or Rdest, Rsrc */
405 res
= emu_or(insn
, regs
);
407 case ISA_SUB
: /* sub Rdest, Rsrc */
408 res
= emu_sub(insn
, regs
);
410 case ISA_SUBX
: /* subx Rdest, Rsrc */
411 res
= emu_subx(insn
, regs
);
413 case ISA_XOR
: /* xor Rdest, Rsrc */
414 res
= emu_xor(insn
, regs
);
416 case ISA_MUL
: /* mul Rdest, Rsrc */
417 res
= emu_mul(insn
, regs
);
419 case ISA_MULLO_A0
: /* mullo Rsrc1, Rsrc2 */
420 res
= emu_mullo_a0(insn
, regs
);
422 case ISA_MULLO_A1
: /* mullo Rsrc1, Rsrc2 */
423 res
= emu_mullo_a1(insn
, regs
);
432 switch(insn
& 0x70ff) {
433 case ISA_MVFACMI_A0
: /* mvfacmi Rdest */
434 res
= emu_mvfacmi_a0(insn
, regs
);
436 case ISA_MVFACMI_A1
: /* mvfacmi Rdest */
437 res
= emu_mvfacmi_a1(insn
, regs
);
446 #endif /* CONFIG_ISA_DUAL_ISSUE */
449 * ld : ?010 dest 1100 src
450 * 0010 dest 1110 src : ld Rdest, @Rsrc+
451 * ldh : ?010 dest 1010 src
452 * lduh : ?010 dest 1011 src
453 * st : ?010 src1 0100 src2
454 * 0010 src1 0110 src2 : st Rsrc1, @+Rsrc2
455 * 0010 src1 0111 src2 : st Rsrc1, @-Rsrc2
456 * sth : ?010 src1 0010 src2
459 static int insn_check(unsigned long insn
, struct pt_regs
*regs
,
466 * ld Rdest, @(disp16, Rsrc)
467 * st Rdest, @(disp16, Rsrc)
469 if (insn
& 0x80000000) { /* 32bit insn */
470 *ucp
+= (short)(insn
& 0x0000ffff);
472 } else { /* 16bit insn */
473 #ifdef CONFIG_ISA_DUAL_ISSUE
474 /* parallel exec check */
475 if (!(regs
->bpc
& 0x2) && insn
& 0x8000) {
476 res
= emu_m32r2((unsigned short)insn
, regs
);
479 #endif /* CONFIG_ISA_DUAL_ISSUE */
486 static int emu_ld(unsigned long insn32
, struct pt_regs
*regs
)
490 unsigned short insn16
;
493 insn16
= insn32
>> 16;
495 ucp
= (unsigned char *)get_reg(regs
, src
);
497 if (insn_check(insn32
, regs
, &ucp
))
500 size
= insn16
& 0x0040 ? 4 : 2;
501 if (copy_from_user(&val
, ucp
, size
))
508 if ((insn16
& 0x00f0) == 0x00a0 && (val
& 0x8000))
511 set_reg(regs
, REG1(insn16
), val
);
513 /* ld increment check */
514 if ((insn16
& 0xf0f0) == ISA_LD2
) /* ld Rdest, @Rsrc+ */
515 set_reg(regs
, src
, (unsigned long)(ucp
+ 4));
520 static int emu_st(unsigned long insn32
, struct pt_regs
*regs
)
524 unsigned short insn16
;
527 insn16
= insn32
>> 16;
530 ucp
= (unsigned char *)get_reg(regs
, src2
);
532 if (insn_check(insn32
, regs
, &ucp
))
535 size
= insn16
& 0x0040 ? 4 : 2;
536 val
= get_reg(regs
, REG1(insn16
));
540 /* st inc/dec check */
541 if ((insn16
& 0xf0e0) == 0x2060) {
547 set_reg(regs
, src2
, (unsigned long)ucp
);
550 if (copy_to_user(ucp
, &val
, size
))
554 if ((insn16
& 0xf0f0) == ISA_STH2
) {
556 set_reg(regs
, src2
, (unsigned long)ucp
);
562 int handle_unaligned_access(unsigned long insn32
, struct pt_regs
*regs
)
564 unsigned short insn16
;
567 insn16
= insn32
>> 16;
570 if ((insn16
& 0x7000) != 0x2000)
573 /* insn alignment check */
574 if ((insn16
& 0x8000) && (regs
->bpc
& 3))
577 if (insn16
& 0x0080) /* ld */
578 res
= emu_ld(insn32
, regs
);
580 res
= emu_st(insn32
, regs
);