2 * Instruction-patching support.
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 #include <linux/init.h>
8 #include <linux/string.h>
10 #include <asm/paravirt.h>
11 #include <asm/patch.h>
12 #include <asm/processor.h>
13 #include <asm/sections.h>
14 #include <asm/system.h>
15 #include <asm/unistd.h>
18 * This was adapted from code written by Tony Luck:
20 * The 64-bit value in a "movl reg=value" is scattered between the two words of the bundle
24 * 3210987654321098765432109876543210987654321098765432109876543210
25 * ABBBBBBBBBBBBBBBBBBBBBBBCCCCCCCCCCCCCCCCCCDEEEEEFFFFFFFFFGGGGGGG
27 * CCCCCCCCCCCCCCCCCCxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
28 * xxxxAFFFFFFFFFEEEEEDxGGGGGGGxxxxxxxxxxxxxBBBBBBBBBBBBBBBBBBBBBBB
31 get_imm64 (u64 insn_addr
)
33 u64
*p
= (u64
*) (insn_addr
& -16); /* mask out slot number */
35 return ( (p
[1] & 0x0800000000000000UL
) << 4) | /*A*/
36 ((p
[1] & 0x00000000007fffffUL
) << 40) | /*B*/
37 ((p
[0] & 0xffffc00000000000UL
) >> 24) | /*C*/
38 ((p
[1] & 0x0000100000000000UL
) >> 23) | /*D*/
39 ((p
[1] & 0x0003e00000000000UL
) >> 29) | /*E*/
40 ((p
[1] & 0x07fc000000000000UL
) >> 43) | /*F*/
41 ((p
[1] & 0x000007f000000000UL
) >> 36); /*G*/
44 /* Patch instruction with "val" where "mask" has 1 bits. */
46 ia64_patch (u64 insn_addr
, u64 mask
, u64 val
)
48 u64 m0
, m1
, v0
, v1
, b0
, b1
, *b
= (u64
*) (insn_addr
& -16);
49 # define insn_mask ((1UL << 41) - 1)
53 shift
= 5 + 41 * (insn_addr
% 16); /* 5 bits of template, then 3 x 41-bit instructions */
55 m1
= mask
<< (shift
- 64);
56 v1
= val
<< (shift
- 64);
58 m0
= mask
<< shift
; m1
= mask
>> (64 - shift
);
59 v0
= val
<< shift
; v1
= val
>> (64 - shift
);
60 b
[0] = (b0
& ~m0
) | (v0
& m0
);
62 b
[1] = (b1
& ~m1
) | (v1
& m1
);
66 ia64_patch_imm64 (u64 insn_addr
, u64 val
)
68 /* The assembler may generate offset pointing to either slot 1
69 or slot 2 for a long (2-slot) instruction, occupying slots 1
72 ia64_patch(insn_addr
+ 2,
73 0x01fffefe000UL
, ( ((val
& 0x8000000000000000UL
) >> 27) /* bit 63 -> 36 */
74 | ((val
& 0x0000000000200000UL
) << 0) /* bit 21 -> 21 */
75 | ((val
& 0x00000000001f0000UL
) << 6) /* bit 16 -> 22 */
76 | ((val
& 0x000000000000ff80UL
) << 20) /* bit 7 -> 27 */
77 | ((val
& 0x000000000000007fUL
) << 13) /* bit 0 -> 13 */));
78 ia64_patch(insn_addr
+ 1, 0x1ffffffffffUL
, val
>> 22);
82 ia64_patch_imm60 (u64 insn_addr
, u64 val
)
84 /* The assembler may generate offset pointing to either slot 1
85 or slot 2 for a long (2-slot) instruction, occupying slots 1
88 ia64_patch(insn_addr
+ 2,
89 0x011ffffe000UL
, ( ((val
& 0x0800000000000000UL
) >> 23) /* bit 59 -> 36 */
90 | ((val
& 0x00000000000fffffUL
) << 13) /* bit 0 -> 13 */));
91 ia64_patch(insn_addr
+ 1, 0x1fffffffffcUL
, val
>> 18);
95 * We need sometimes to load the physical address of a kernel
96 * object. Often we can convert the virtual address to physical
97 * at execution time, but sometimes (either for performance reasons
98 * or during error recovery) we cannot to this. Patch the marked
99 * bundles to load the physical address.
102 ia64_patch_vtop (unsigned long start
, unsigned long end
)
104 s32
*offp
= (s32
*) start
;
107 while (offp
< (s32
*) end
) {
108 ip
= (u64
) offp
+ *offp
;
110 /* replace virtual address with corresponding physical address: */
111 ia64_patch_imm64(ip
, ia64_tpa(get_imm64(ip
)));
112 ia64_fc((void *) ip
);
120 * Disable the RSE workaround by turning the conditional branch
121 * that we tagged in each place the workaround was used into an
122 * unconditional branch.
125 ia64_patch_rse (unsigned long start
, unsigned long end
)
127 s32
*offp
= (s32
*) start
;
130 while (offp
< (s32
*) end
) {
131 ip
= (u64
) offp
+ *offp
;
133 b
= (u64
*)(ip
& -16);
135 ia64_fc((void *) ip
);
143 ia64_patch_mckinley_e9 (unsigned long start
, unsigned long end
)
145 static int first_time
= 1;
147 s32
*offp
= (s32
*) start
;
150 need_workaround
= (local_cpu_data
->family
== 0x1f && local_cpu_data
->model
== 0);
155 printk(KERN_INFO
"Leaving McKinley Errata 9 workaround enabled\n");
160 while (offp
< (s32
*) end
) {
161 wp
= (u64
*) ia64_imva((char *) offp
+ *offp
);
162 wp
[0] = 0x0000000100000011UL
; /* nop.m 0; nop.i 0; br.ret.sptk.many b6 */
163 wp
[1] = 0x0084006880000200UL
;
164 wp
[2] = 0x0000000100000000UL
; /* nop.m 0; nop.i 0; nop.i 0 */
165 wp
[3] = 0x0004000000000200UL
;
166 ia64_fc(wp
); ia64_fc(wp
+ 2);
173 extern unsigned long ia64_native_fsyscall_table
[NR_syscalls
];
174 extern char ia64_native_fsys_bubble_down
[];
175 struct pv_fsys_data pv_fsys_data __initdata
= {
176 .fsyscall_table
= (unsigned long *)ia64_native_fsyscall_table
,
177 .fsys_bubble_down
= (void *)ia64_native_fsys_bubble_down
,
180 unsigned long * __init
181 paravirt_get_fsyscall_table(void)
183 return pv_fsys_data
.fsyscall_table
;
187 paravirt_get_fsys_bubble_down(void)
189 return pv_fsys_data
.fsys_bubble_down
;
193 patch_fsyscall_table (unsigned long start
, unsigned long end
)
195 u64 fsyscall_table
= (u64
)paravirt_get_fsyscall_table();
196 s32
*offp
= (s32
*) start
;
199 while (offp
< (s32
*) end
) {
200 ip
= (u64
) ia64_imva((char *) offp
+ *offp
);
201 ia64_patch_imm64(ip
, fsyscall_table
);
202 ia64_fc((void *) ip
);
210 patch_brl_fsys_bubble_down (unsigned long start
, unsigned long end
)
212 u64 fsys_bubble_down
= (u64
)paravirt_get_fsys_bubble_down();
213 s32
*offp
= (s32
*) start
;
216 while (offp
< (s32
*) end
) {
217 ip
= (u64
) offp
+ *offp
;
218 ia64_patch_imm60((u64
) ia64_imva((void *) ip
),
219 (u64
) (fsys_bubble_down
- (ip
& -16)) / 16);
220 ia64_fc((void *) ip
);
228 ia64_patch_gate (void)
230 # define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name)
231 # define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name)
233 patch_fsyscall_table(START(FSYSCALL
), END(FSYSCALL
));
234 patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN
), END(BRL_FSYS_BUBBLE_DOWN
));
235 ia64_patch_vtop(START(VTOP
), END(VTOP
));
236 ia64_patch_mckinley_e9(START(MCKINLEY_E9
), END(MCKINLEY_E9
));
239 void ia64_patch_phys_stack_reg(unsigned long val
)
241 s32
* offp
= (s32
*) __start___phys_stack_reg_patchlist
;
242 s32
* end
= (s32
*) __end___phys_stack_reg_patchlist
;
245 /* see instruction format A4: adds r1 = imm13, r3 */
246 mask
= (0x3fUL
<< 27) | (0x7f << 13);
247 imm
= (((val
>> 7) & 0x3f) << 27) | (val
& 0x7f) << 13;
250 ip
= (u64
) offp
+ *offp
;
251 ia64_patch(ip
, mask
, imm
);