1 // SPDX-License-Identifier: GPL-2.0
3 * align.c - address exception handler for M32R
5 * Copyright (c) 2003 Hitoshi Yamamoto
8 #include <asm/ptrace.h>
9 #include <linux/uaccess.h>
11 static int get_reg(struct pt_regs
*regs
, int nr
)
16 val
= *(unsigned long *)(®s
->r0
+ nr
);
18 val
= *(unsigned long *)(®s
->r4
+ (nr
- 4));
20 val
= *(unsigned long *)(®s
->r7
+ (nr
- 7));
22 val
= *(unsigned long *)(®s
->fp
+ (nr
- 13));
27 static void set_reg(struct pt_regs
*regs
, int nr
, int val
)
30 *(unsigned long *)(®s
->r0
+ nr
) = val
;
32 *(unsigned long *)(®s
->r4
+ (nr
- 4)) = val
;
34 *(unsigned long *)(®s
->r7
+ (nr
- 7)) = val
;
36 *(unsigned long *)(®s
->fp
+ (nr
- 13)) = val
;
39 #define REG1(insn) (((insn) & 0x0f00) >> 8)
40 #define REG2(insn) ((insn) & 0x000f)
44 #define ISA_LD1 0x20c0 /* ld Rdest, @Rsrc */
45 #define ISA_LD2 0x20e0 /* ld Rdest, @Rsrc+ */
46 #define ISA_LDH 0x20a0 /* ldh Rdest, @Rsrc */
47 #define ISA_LDUH 0x20b0 /* lduh Rdest, @Rsrc */
48 #define ISA_ST1 0x2040 /* st Rsrc1, @Rsrc2 */
49 #define ISA_ST2 0x2060 /* st Rsrc1, @+Rsrc2 */
50 #define ISA_ST3 0x2070 /* st Rsrc1, @-Rsrc2 */
51 #define ISA_STH1 0x2020 /* sth Rsrc1, @Rsrc2 */
52 #define ISA_STH2 0x2030 /* sth Rsrc1, @Rsrc2+ */
54 #ifdef CONFIG_ISA_DUAL_ISSUE
57 #define ISA_ADD 0x00a0 /* add Rdest, Rsrc */
58 #define ISA_ADDI 0x4000 /* addi Rdest, #imm8 */
59 #define ISA_ADDX 0x0090 /* addx Rdest, Rsrc */
60 #define ISA_AND 0x00c0 /* and Rdest, Rsrc */
61 #define ISA_CMP 0x0040 /* cmp Rsrc1, Rsrc2 */
62 #define ISA_CMPEQ 0x0060 /* cmpeq Rsrc1, Rsrc2 */
63 #define ISA_CMPU 0x0050 /* cmpu Rsrc1, Rsrc2 */
64 #define ISA_CMPZ 0x0070 /* cmpz Rsrc */
65 #define ISA_LDI 0x6000 /* ldi Rdest, #imm8 */
66 #define ISA_MV 0x1080 /* mv Rdest, Rsrc */
67 #define ISA_NEG 0x0030 /* neg Rdest, Rsrc */
68 #define ISA_NOP 0x7000 /* nop */
69 #define ISA_NOT 0x00b0 /* not Rdest, Rsrc */
70 #define ISA_OR 0x00e0 /* or Rdest, Rsrc */
71 #define ISA_SUB 0x0020 /* sub Rdest, Rsrc */
72 #define ISA_SUBX 0x0010 /* subx Rdest, Rsrc */
73 #define ISA_XOR 0x00d0 /* xor Rdest, Rsrc */
76 #define ISA_MUL 0x1060 /* mul Rdest, Rsrc */
77 #define ISA_MULLO_A0 0x3010 /* mullo Rsrc1, Rsrc2, A0 */
78 #define ISA_MULLO_A1 0x3090 /* mullo Rsrc1, Rsrc2, A1 */
79 #define ISA_MVFACMI_A0 0x50f2 /* mvfacmi Rdest, A0 */
80 #define ISA_MVFACMI_A1 0x50f6 /* mvfacmi Rdest, A1 */
82 static int emu_addi(unsigned short insn
, struct pt_regs
*regs
)
84 char imm
= (char)(insn
& 0xff);
85 int dest
= REG1(insn
);
88 val
= get_reg(regs
, dest
);
90 set_reg(regs
, dest
, val
);
95 static int emu_ldi(unsigned short insn
, struct pt_regs
*regs
)
97 char imm
= (char)(insn
& 0xff);
99 set_reg(regs
, REG1(insn
), (int)imm
);
104 static int emu_add(unsigned short insn
, struct pt_regs
*regs
)
106 int dest
= REG1(insn
);
107 int src
= REG2(insn
);
110 val
= get_reg(regs
, dest
);
111 val
+= get_reg(regs
, src
);
112 set_reg(regs
, dest
, val
);
117 static int emu_addx(unsigned short insn
, struct pt_regs
*regs
)
119 int dest
= REG1(insn
);
120 unsigned int val
, tmp
;
122 val
= regs
->psw
& PSW_BC
? 1 : 0;
123 tmp
= get_reg(regs
, dest
);
125 val
+= (unsigned int)get_reg(regs
, REG2(insn
));
126 set_reg(regs
, dest
, val
);
132 regs
->psw
&= ~(PSW_BC
);
137 static int emu_and(unsigned short insn
, struct pt_regs
*regs
)
139 int dest
= REG1(insn
);
142 val
= get_reg(regs
, dest
);
143 val
&= get_reg(regs
, REG2(insn
));
144 set_reg(regs
, dest
, val
);
149 static int emu_cmp(unsigned short insn
, struct pt_regs
*regs
)
151 if (get_reg(regs
, REG1(insn
)) < get_reg(regs
, REG2(insn
)))
154 regs
->psw
&= ~(PSW_BC
);
159 static int emu_cmpeq(unsigned short insn
, struct pt_regs
*regs
)
161 if (get_reg(regs
, REG1(insn
)) == get_reg(regs
, REG2(insn
)))
164 regs
->psw
&= ~(PSW_BC
);
169 static int emu_cmpu(unsigned short insn
, struct pt_regs
*regs
)
171 if ((unsigned int)get_reg(regs
, REG1(insn
))
172 < (unsigned int)get_reg(regs
, REG2(insn
)))
175 regs
->psw
&= ~(PSW_BC
);
180 static int emu_cmpz(unsigned short insn
, struct pt_regs
*regs
)
182 if (!get_reg(regs
, REG2(insn
)))
185 regs
->psw
&= ~(PSW_BC
);
190 static int emu_mv(unsigned short insn
, struct pt_regs
*regs
)
194 val
= get_reg(regs
, REG2(insn
));
195 set_reg(regs
, REG1(insn
), val
);
200 static int emu_neg(unsigned short insn
, struct pt_regs
*regs
)
204 val
= get_reg(regs
, REG2(insn
));
205 set_reg(regs
, REG1(insn
), 0 - val
);
210 static int emu_not(unsigned short insn
, struct pt_regs
*regs
)
214 val
= get_reg(regs
, REG2(insn
));
215 set_reg(regs
, REG1(insn
), ~val
);
220 static int emu_or(unsigned short insn
, struct pt_regs
*regs
)
222 int dest
= REG1(insn
);
225 val
= get_reg(regs
, dest
);
226 val
|= get_reg(regs
, REG2(insn
));
227 set_reg(regs
, dest
, val
);
232 static int emu_sub(unsigned short insn
, struct pt_regs
*regs
)
234 int dest
= REG1(insn
);
237 val
= get_reg(regs
, dest
);
238 val
-= get_reg(regs
, REG2(insn
));
239 set_reg(regs
, dest
, val
);
244 static int emu_subx(unsigned short insn
, struct pt_regs
*regs
)
246 int dest
= REG1(insn
);
247 unsigned int val
, tmp
;
249 val
= tmp
= get_reg(regs
, dest
);
250 val
-= (unsigned int)get_reg(regs
, REG2(insn
));
251 val
-= regs
->psw
& PSW_BC
? 1 : 0;
252 set_reg(regs
, dest
, val
);
258 regs
->psw
&= ~(PSW_BC
);
263 static int emu_xor(unsigned short insn
, struct pt_regs
*regs
)
265 int dest
= REG1(insn
);
268 val
= (unsigned int)get_reg(regs
, dest
);
269 val
^= (unsigned int)get_reg(regs
, REG2(insn
));
270 set_reg(regs
, dest
, val
);
275 static int emu_mul(unsigned short insn
, struct pt_regs
*regs
)
277 int dest
= REG1(insn
);
280 reg1
= get_reg(regs
, dest
);
281 reg2
= get_reg(regs
, REG2(insn
));
283 __asm__
__volatile__ (
285 : "+r" (reg1
) : "r" (reg2
)
288 set_reg(regs
, dest
, reg1
);
293 static int emu_mullo_a0(unsigned short insn
, struct pt_regs
*regs
)
297 reg1
= get_reg(regs
, REG1(insn
));
298 reg2
= get_reg(regs
, REG2(insn
));
300 __asm__
__volatile__ (
301 "mullo %0, %1, a0; \n\t"
302 "mvfachi %0, a0; \n\t"
303 "mvfaclo %1, a0; \n\t"
304 : "+r" (reg1
), "+r" (reg2
)
313 static int emu_mullo_a1(unsigned short insn
, struct pt_regs
*regs
)
317 reg1
= get_reg(regs
, REG1(insn
));
318 reg2
= get_reg(regs
, REG2(insn
));
320 __asm__
__volatile__ (
321 "mullo %0, %1, a0; \n\t"
322 "mvfachi %0, a0; \n\t"
323 "mvfaclo %1, a0; \n\t"
324 : "+r" (reg1
), "+r" (reg2
)
333 static int emu_mvfacmi_a0(unsigned short insn
, struct pt_regs
*regs
)
337 val
= (regs
->acc0h
<< 16) | (regs
->acc0l
>> 16);
338 set_reg(regs
, REG1(insn
), (int)val
);
343 static int emu_mvfacmi_a1(unsigned short insn
, struct pt_regs
*regs
)
347 val
= (regs
->acc1h
<< 16) | (regs
->acc1l
>> 16);
348 set_reg(regs
, REG1(insn
), (int)val
);
353 static int emu_m32r2(unsigned short insn
, struct pt_regs
*regs
)
357 if ((insn
& 0x7fff) == ISA_NOP
) /* nop */
360 switch(insn
& 0x7000) {
361 case ISA_ADDI
: /* addi Rdest, #imm8 */
362 res
= emu_addi(insn
, regs
);
364 case ISA_LDI
: /* ldi Rdest, #imm8 */
365 res
= emu_ldi(insn
, regs
);
374 switch(insn
& 0x70f0) {
375 case ISA_ADD
: /* add Rdest, Rsrc */
376 res
= emu_add(insn
, regs
);
378 case ISA_ADDX
: /* addx Rdest, Rsrc */
379 res
= emu_addx(insn
, regs
);
381 case ISA_AND
: /* and Rdest, Rsrc */
382 res
= emu_and(insn
, regs
);
384 case ISA_CMP
: /* cmp Rsrc1, Rsrc2 */
385 res
= emu_cmp(insn
, regs
);
387 case ISA_CMPEQ
: /* cmpeq Rsrc1, Rsrc2 */
388 res
= emu_cmpeq(insn
, regs
);
390 case ISA_CMPU
: /* cmpu Rsrc1, Rsrc2 */
391 res
= emu_cmpu(insn
, regs
);
393 case ISA_CMPZ
: /* cmpz Rsrc */
394 res
= emu_cmpz(insn
, regs
);
396 case ISA_MV
: /* mv Rdest, Rsrc */
397 res
= emu_mv(insn
, regs
);
399 case ISA_NEG
: /* neg Rdest, Rsrc */
400 res
= emu_neg(insn
, regs
);
402 case ISA_NOT
: /* not Rdest, Rsrc */
403 res
= emu_not(insn
, regs
);
405 case ISA_OR
: /* or Rdest, Rsrc */
406 res
= emu_or(insn
, regs
);
408 case ISA_SUB
: /* sub Rdest, Rsrc */
409 res
= emu_sub(insn
, regs
);
411 case ISA_SUBX
: /* subx Rdest, Rsrc */
412 res
= emu_subx(insn
, regs
);
414 case ISA_XOR
: /* xor Rdest, Rsrc */
415 res
= emu_xor(insn
, regs
);
417 case ISA_MUL
: /* mul Rdest, Rsrc */
418 res
= emu_mul(insn
, regs
);
420 case ISA_MULLO_A0
: /* mullo Rsrc1, Rsrc2 */
421 res
= emu_mullo_a0(insn
, regs
);
423 case ISA_MULLO_A1
: /* mullo Rsrc1, Rsrc2 */
424 res
= emu_mullo_a1(insn
, regs
);
433 switch(insn
& 0x70ff) {
434 case ISA_MVFACMI_A0
: /* mvfacmi Rdest */
435 res
= emu_mvfacmi_a0(insn
, regs
);
437 case ISA_MVFACMI_A1
: /* mvfacmi Rdest */
438 res
= emu_mvfacmi_a1(insn
, regs
);
447 #endif /* CONFIG_ISA_DUAL_ISSUE */
450 * ld : ?010 dest 1100 src
451 * 0010 dest 1110 src : ld Rdest, @Rsrc+
452 * ldh : ?010 dest 1010 src
453 * lduh : ?010 dest 1011 src
454 * st : ?010 src1 0100 src2
455 * 0010 src1 0110 src2 : st Rsrc1, @+Rsrc2
456 * 0010 src1 0111 src2 : st Rsrc1, @-Rsrc2
457 * sth : ?010 src1 0010 src2
460 static int insn_check(unsigned long insn
, struct pt_regs
*regs
,
467 * ld Rdest, @(disp16, Rsrc)
468 * st Rdest, @(disp16, Rsrc)
470 if (insn
& 0x80000000) { /* 32bit insn */
471 *ucp
+= (short)(insn
& 0x0000ffff);
473 } else { /* 16bit insn */
474 #ifdef CONFIG_ISA_DUAL_ISSUE
475 /* parallel exec check */
476 if (!(regs
->bpc
& 0x2) && insn
& 0x8000) {
477 res
= emu_m32r2((unsigned short)insn
, regs
);
480 #endif /* CONFIG_ISA_DUAL_ISSUE */
487 static int emu_ld(unsigned long insn32
, struct pt_regs
*regs
)
491 unsigned short insn16
;
494 insn16
= insn32
>> 16;
496 ucp
= (unsigned char *)get_reg(regs
, src
);
498 if (insn_check(insn32
, regs
, &ucp
))
501 size
= insn16
& 0x0040 ? 4 : 2;
502 if (copy_from_user(&val
, ucp
, size
))
509 if ((insn16
& 0x00f0) == 0x00a0 && (val
& 0x8000))
512 set_reg(regs
, REG1(insn16
), val
);
514 /* ld increment check */
515 if ((insn16
& 0xf0f0) == ISA_LD2
) /* ld Rdest, @Rsrc+ */
516 set_reg(regs
, src
, (unsigned long)(ucp
+ 4));
521 static int emu_st(unsigned long insn32
, struct pt_regs
*regs
)
525 unsigned short insn16
;
528 insn16
= insn32
>> 16;
531 ucp
= (unsigned char *)get_reg(regs
, src2
);
533 if (insn_check(insn32
, regs
, &ucp
))
536 size
= insn16
& 0x0040 ? 4 : 2;
537 val
= get_reg(regs
, REG1(insn16
));
541 /* st inc/dec check */
542 if ((insn16
& 0xf0e0) == 0x2060) {
548 set_reg(regs
, src2
, (unsigned long)ucp
);
551 if (copy_to_user(ucp
, &val
, size
))
555 if ((insn16
& 0xf0f0) == ISA_STH2
) {
557 set_reg(regs
, src2
, (unsigned long)ucp
);
563 int handle_unaligned_access(unsigned long insn32
, struct pt_regs
*regs
)
565 unsigned short insn16
;
568 insn16
= insn32
>> 16;
571 if ((insn16
& 0x7000) != 0x2000)
574 /* insn alignment check */
575 if ((insn16
& 0x8000) && (regs
->bpc
& 3))
578 if (insn16
& 0x0080) /* ld */
579 res
= emu_ld(insn32
, regs
);
581 res
= emu_st(insn32
, regs
);