2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * A small micro-assembler. It is intentionally kept simple, does only
7 * support a subset of instructions, and does not try to hide pipeline
8 * effects like branch delay slots.
10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
11 * Copyright (C) 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
48 #define IMM_MASK 0xffff
50 #define JIMM_MASK 0x3ffffff
52 #define FUNC_MASK 0x3f
59 insn_addu
, insn_addiu
, insn_and
, insn_andi
, insn_beq
,
60 insn_beql
, insn_bgez
, insn_bgezl
, insn_bltz
, insn_bltzl
,
61 insn_bne
, insn_daddu
, insn_daddiu
, insn_dmfc0
, insn_dmtc0
,
62 insn_dsll
, insn_dsll32
, insn_dsra
, insn_dsrl
, insn_dsrl32
,
63 insn_dsubu
, insn_eret
, insn_j
, insn_jal
, insn_jr
, insn_ld
,
64 insn_ll
, insn_lld
, insn_lui
, insn_lw
, insn_mfc0
, insn_mtc0
,
65 insn_ori
, insn_rfe
, insn_sc
, insn_scd
, insn_sd
, insn_sll
,
66 insn_sra
, insn_srl
, insn_subu
, insn_sw
, insn_tlbp
, insn_tlbwi
,
67 insn_tlbwr
, insn_xor
, insn_xori
76 /* This macro sets the non-variable bits of an instruction. */
77 #define M(a, b, c, d, e, f) \
85 static struct insn insn_table
[] __cpuinitdata
= {
86 { insn_addiu
, M(addiu_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
87 { insn_addu
, M(spec_op
, 0, 0, 0, 0, addu_op
), RS
| RT
| RD
},
88 { insn_and
, M(spec_op
, 0, 0, 0, 0, and_op
), RS
| RT
| RD
},
89 { insn_andi
, M(andi_op
, 0, 0, 0, 0, 0), RS
| RT
| UIMM
},
90 { insn_beq
, M(beq_op
, 0, 0, 0, 0, 0), RS
| RT
| BIMM
},
91 { insn_beql
, M(beql_op
, 0, 0, 0, 0, 0), RS
| RT
| BIMM
},
92 { insn_bgez
, M(bcond_op
, 0, bgez_op
, 0, 0, 0), RS
| BIMM
},
93 { insn_bgezl
, M(bcond_op
, 0, bgezl_op
, 0, 0, 0), RS
| BIMM
},
94 { insn_bltz
, M(bcond_op
, 0, bltz_op
, 0, 0, 0), RS
| BIMM
},
95 { insn_bltzl
, M(bcond_op
, 0, bltzl_op
, 0, 0, 0), RS
| BIMM
},
96 { insn_bne
, M(bne_op
, 0, 0, 0, 0, 0), RS
| RT
| BIMM
},
97 { insn_daddiu
, M(daddiu_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
98 { insn_daddu
, M(spec_op
, 0, 0, 0, 0, daddu_op
), RS
| RT
| RD
},
99 { insn_dmfc0
, M(cop0_op
, dmfc_op
, 0, 0, 0, 0), RT
| RD
| SET
},
100 { insn_dmtc0
, M(cop0_op
, dmtc_op
, 0, 0, 0, 0), RT
| RD
| SET
},
101 { insn_dsll
, M(spec_op
, 0, 0, 0, 0, dsll_op
), RT
| RD
| RE
},
102 { insn_dsll32
, M(spec_op
, 0, 0, 0, 0, dsll32_op
), RT
| RD
| RE
},
103 { insn_dsra
, M(spec_op
, 0, 0, 0, 0, dsra_op
), RT
| RD
| RE
},
104 { insn_dsrl
, M(spec_op
, 0, 0, 0, 0, dsrl_op
), RT
| RD
| RE
},
105 { insn_dsrl32
, M(spec_op
, 0, 0, 0, 0, dsrl32_op
), RT
| RD
| RE
},
106 { insn_dsubu
, M(spec_op
, 0, 0, 0, 0, dsubu_op
), RS
| RT
| RD
},
107 { insn_eret
, M(cop0_op
, cop_op
, 0, 0, 0, eret_op
), 0 },
108 { insn_j
, M(j_op
, 0, 0, 0, 0, 0), JIMM
},
109 { insn_jal
, M(jal_op
, 0, 0, 0, 0, 0), JIMM
},
110 { insn_jr
, M(spec_op
, 0, 0, 0, 0, jr_op
), RS
},
111 { insn_ld
, M(ld_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
112 { insn_ll
, M(ll_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
113 { insn_lld
, M(lld_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
114 { insn_lui
, M(lui_op
, 0, 0, 0, 0, 0), RT
| SIMM
},
115 { insn_lw
, M(lw_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
116 { insn_mfc0
, M(cop0_op
, mfc_op
, 0, 0, 0, 0), RT
| RD
| SET
},
117 { insn_mtc0
, M(cop0_op
, mtc_op
, 0, 0, 0, 0), RT
| RD
| SET
},
118 { insn_ori
, M(ori_op
, 0, 0, 0, 0, 0), RS
| RT
| UIMM
},
119 { insn_rfe
, M(cop0_op
, cop_op
, 0, 0, 0, rfe_op
), 0 },
120 { insn_sc
, M(sc_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
121 { insn_scd
, M(scd_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
122 { insn_sd
, M(sd_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
123 { insn_sll
, M(spec_op
, 0, 0, 0, 0, sll_op
), RT
| RD
| RE
},
124 { insn_sra
, M(spec_op
, 0, 0, 0, 0, sra_op
), RT
| RD
| RE
},
125 { insn_srl
, M(spec_op
, 0, 0, 0, 0, srl_op
), RT
| RD
| RE
},
126 { insn_subu
, M(spec_op
, 0, 0, 0, 0, subu_op
), RS
| RT
| RD
},
127 { insn_sw
, M(sw_op
, 0, 0, 0, 0, 0), RS
| RT
| SIMM
},
128 { insn_tlbp
, M(cop0_op
, cop_op
, 0, 0, 0, tlbp_op
), 0 },
129 { insn_tlbwi
, M(cop0_op
, cop_op
, 0, 0, 0, tlbwi_op
), 0 },
130 { insn_tlbwr
, M(cop0_op
, cop_op
, 0, 0, 0, tlbwr_op
), 0 },
131 { insn_xor
, M(spec_op
, 0, 0, 0, 0, xor_op
), RS
| RT
| RD
},
132 { insn_xori
, M(xori_op
, 0, 0, 0, 0, 0), RS
| RT
| UIMM
},
133 { insn_invalid
, 0, 0 }
138 static inline __cpuinit u32
build_rs(u32 arg
)
141 printk(KERN_WARNING
"Micro-assembler field overflow\n");
143 return (arg
& RS_MASK
) << RS_SH
;
146 static inline __cpuinit u32
build_rt(u32 arg
)
149 printk(KERN_WARNING
"Micro-assembler field overflow\n");
151 return (arg
& RT_MASK
) << RT_SH
;
154 static inline __cpuinit u32
build_rd(u32 arg
)
157 printk(KERN_WARNING
"Micro-assembler field overflow\n");
159 return (arg
& RD_MASK
) << RD_SH
;
162 static inline __cpuinit u32
build_re(u32 arg
)
165 printk(KERN_WARNING
"Micro-assembler field overflow\n");
167 return (arg
& RE_MASK
) << RE_SH
;
170 static inline __cpuinit u32
build_simm(s32 arg
)
172 if (arg
> 0x7fff || arg
< -0x8000)
173 printk(KERN_WARNING
"Micro-assembler field overflow\n");
178 static inline __cpuinit u32
build_uimm(u32 arg
)
181 printk(KERN_WARNING
"Micro-assembler field overflow\n");
183 return arg
& IMM_MASK
;
186 static inline __cpuinit u32
build_bimm(s32 arg
)
188 if (arg
> 0x1ffff || arg
< -0x20000)
189 printk(KERN_WARNING
"Micro-assembler field overflow\n");
192 printk(KERN_WARNING
"Invalid micro-assembler branch target\n");
194 return ((arg
< 0) ? (1 << 15) : 0) | ((arg
>> 2) & 0x7fff);
197 static inline __cpuinit u32
build_jimm(u32 arg
)
199 if (arg
& ~((JIMM_MASK
) << 2))
200 printk(KERN_WARNING
"Micro-assembler field overflow\n");
202 return (arg
>> 2) & JIMM_MASK
;
205 static inline __cpuinit u32
build_func(u32 arg
)
207 if (arg
& ~FUNC_MASK
)
208 printk(KERN_WARNING
"Micro-assembler field overflow\n");
210 return arg
& FUNC_MASK
;
213 static inline __cpuinit u32
build_set(u32 arg
)
216 printk(KERN_WARNING
"Micro-assembler field overflow\n");
218 return arg
& SET_MASK
;
222 * The order of opcode arguments is implicitly left to right,
223 * starting with RS and ending with FUNC or IMM.
225 static void __cpuinit
build_insn(u32
**buf
, enum opcode opc
, ...)
227 struct insn
*ip
= NULL
;
232 for (i
= 0; insn_table
[i
].opcode
!= insn_invalid
; i
++)
233 if (insn_table
[i
].opcode
== opc
) {
238 if (!ip
|| (opc
== insn_daddiu
&& r4k_daddiu_bug()))
239 panic("Unsupported Micro-assembler instruction %d", opc
);
244 op
|= build_rs(va_arg(ap
, u32
));
246 op
|= build_rt(va_arg(ap
, u32
));
248 op
|= build_rd(va_arg(ap
, u32
));
250 op
|= build_re(va_arg(ap
, u32
));
251 if (ip
->fields
& SIMM
)
252 op
|= build_simm(va_arg(ap
, s32
));
253 if (ip
->fields
& UIMM
)
254 op
|= build_uimm(va_arg(ap
, u32
));
255 if (ip
->fields
& BIMM
)
256 op
|= build_bimm(va_arg(ap
, s32
));
257 if (ip
->fields
& JIMM
)
258 op
|= build_jimm(va_arg(ap
, u32
));
259 if (ip
->fields
& FUNC
)
260 op
|= build_func(va_arg(ap
, u32
));
261 if (ip
->fields
& SET
)
262 op
|= build_set(va_arg(ap
, u32
));
269 #define I_u1u2u3(op) \
272 build_insn(buf, insn##op, a, b, c); \
275 #define I_u2u1u3(op) \
278 build_insn(buf, insn##op, b, a, c); \
281 #define I_u3u1u2(op) \
284 build_insn(buf, insn##op, b, c, a); \
287 #define I_u1u2s3(op) \
290 build_insn(buf, insn##op, a, b, c); \
293 #define I_u2s3u1(op) \
296 build_insn(buf, insn##op, c, a, b); \
299 #define I_u2u1s3(op) \
302 build_insn(buf, insn##op, b, a, c); \
308 build_insn(buf, insn##op, a, b); \
314 build_insn(buf, insn##op, a, b); \
320 build_insn(buf, insn##op, a); \
326 build_insn(buf, insn##op); \
378 void __cpuinit
uasm_build_label(struct uasm_label
**lab
, u32
*addr
, int lid
)
385 int __cpuinit
uasm_in_compat_space_p(long addr
)
387 /* Is this address in 32bit compat space? */
389 return (((addr
) & 0xffffffff00000000L
) == 0xffffffff00000000L
);
395 int __cpuinit
uasm_rel_highest(long val
)
398 return ((((val
+ 0x800080008000L
) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
404 int __cpuinit
uasm_rel_higher(long val
)
407 return ((((val
+ 0x80008000L
) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
413 int __cpuinit
uasm_rel_hi(long val
)
415 return ((((val
+ 0x8000L
) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
418 int __cpuinit
uasm_rel_lo(long val
)
420 return ((val
& 0xffff) ^ 0x8000) - 0x8000;
423 void __cpuinit
UASM_i_LA_mostly(u32
**buf
, unsigned int rs
, long addr
)
425 if (!uasm_in_compat_space_p(addr
)) {
426 uasm_i_lui(buf
, rs
, uasm_rel_highest(addr
));
427 if (uasm_rel_higher(addr
))
428 uasm_i_daddiu(buf
, rs
, rs
, uasm_rel_higher(addr
));
429 if (uasm_rel_hi(addr
)) {
430 uasm_i_dsll(buf
, rs
, rs
, 16);
431 uasm_i_daddiu(buf
, rs
, rs
, uasm_rel_hi(addr
));
432 uasm_i_dsll(buf
, rs
, rs
, 16);
434 uasm_i_dsll32(buf
, rs
, rs
, 0);
436 uasm_i_lui(buf
, rs
, uasm_rel_hi(addr
));
439 void __cpuinit
UASM_i_LA(u32
**buf
, unsigned int rs
, long addr
)
441 UASM_i_LA_mostly(buf
, rs
, addr
);
442 if (uasm_rel_lo(addr
)) {
443 if (!uasm_in_compat_space_p(addr
))
444 uasm_i_daddiu(buf
, rs
, rs
, uasm_rel_lo(addr
));
446 uasm_i_addiu(buf
, rs
, rs
, uasm_rel_lo(addr
));
450 /* Handle relocations. */
452 uasm_r_mips_pc16(struct uasm_reloc
**rel
, u32
*addr
, int lid
)
455 (*rel
)->type
= R_MIPS_PC16
;
460 static inline void __cpuinit
461 __resolve_relocs(struct uasm_reloc
*rel
, struct uasm_label
*lab
)
463 long laddr
= (long)lab
->addr
;
464 long raddr
= (long)rel
->addr
;
468 *rel
->addr
|= build_bimm(laddr
- (raddr
+ 4));
472 panic("Unsupported Micro-assembler relocation %d",
478 uasm_resolve_relocs(struct uasm_reloc
*rel
, struct uasm_label
*lab
)
480 struct uasm_label
*l
;
482 for (; rel
->lab
!= UASM_LABEL_INVALID
; rel
++)
483 for (l
= lab
; l
->lab
!= UASM_LABEL_INVALID
; l
++)
484 if (rel
->lab
== l
->lab
)
485 __resolve_relocs(rel
, l
);
489 uasm_move_relocs(struct uasm_reloc
*rel
, u32
*first
, u32
*end
, long off
)
491 for (; rel
->lab
!= UASM_LABEL_INVALID
; rel
++)
492 if (rel
->addr
>= first
&& rel
->addr
< end
)
497 uasm_move_labels(struct uasm_label
*lab
, u32
*first
, u32
*end
, long off
)
499 for (; lab
->lab
!= UASM_LABEL_INVALID
; lab
++)
500 if (lab
->addr
>= first
&& lab
->addr
< end
)
505 uasm_copy_handler(struct uasm_reloc
*rel
, struct uasm_label
*lab
, u32
*first
,
506 u32
*end
, u32
*target
)
508 long off
= (long)(target
- first
);
510 memcpy(target
, first
, (end
- first
) * sizeof(u32
));
512 uasm_move_relocs(rel
, first
, end
, off
);
513 uasm_move_labels(lab
, first
, end
, off
);
516 int __cpuinit
uasm_insn_has_bdelay(struct uasm_reloc
*rel
, u32
*addr
)
518 for (; rel
->lab
!= UASM_LABEL_INVALID
; rel
++) {
519 if (rel
->addr
== addr
520 && (rel
->type
== R_MIPS_PC16
521 || rel
->type
== R_MIPS_26
))
528 /* Convenience functions for labeled branches. */
530 uasm_il_bltz(u32
**p
, struct uasm_reloc
**r
, unsigned int reg
, int lid
)
532 uasm_r_mips_pc16(r
, *p
, lid
);
533 uasm_i_bltz(p
, reg
, 0);
537 uasm_il_b(u32
**p
, struct uasm_reloc
**r
, int lid
)
539 uasm_r_mips_pc16(r
, *p
, lid
);
544 uasm_il_beqz(u32
**p
, struct uasm_reloc
**r
, unsigned int reg
, int lid
)
546 uasm_r_mips_pc16(r
, *p
, lid
);
547 uasm_i_beqz(p
, reg
, 0);
551 uasm_il_beqzl(u32
**p
, struct uasm_reloc
**r
, unsigned int reg
, int lid
)
553 uasm_r_mips_pc16(r
, *p
, lid
);
554 uasm_i_beqzl(p
, reg
, 0);
558 uasm_il_bnez(u32
**p
, struct uasm_reloc
**r
, unsigned int reg
, int lid
)
560 uasm_r_mips_pc16(r
, *p
, lid
);
561 uasm_i_bnez(p
, reg
, 0);
565 uasm_il_bgezl(u32
**p
, struct uasm_reloc
**r
, unsigned int reg
, int lid
)
567 uasm_r_mips_pc16(r
, *p
, lid
);
568 uasm_i_bgezl(p
, reg
, 0);
572 uasm_il_bgez(u32
**p
, struct uasm_reloc
**r
, unsigned int reg
, int lid
)
574 uasm_r_mips_pc16(r
, *p
, lid
);
575 uasm_i_bgez(p
, reg
, 0);