1 /* $Id: unaligned.c,v 1.18 1999/08/02 08:39:44 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
14 #include <asm/ptrace.h>
15 #include <asm/processor.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <asm/fpumacro.h>
21 #include <asm/bitops.h>
23 /* #define DEBUG_MNA */
26 load
, /* ld, ldd, ldh, ldsh */
27 store
, /* st, std, sth, stsh */
28 both
, /* Swap, ldstub, cas, ... */
35 static char *dirstrings
[] = {
36 "load", "store", "both", "fpload", "fpstore", "invalid"
40 static inline enum direction
decode_direction(unsigned int insn
)
42 unsigned long tmp
= (insn
>> 21) & 1;
47 switch ((insn
>>19)&0xf) {
56 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
57 static inline int decode_access_size(unsigned int insn
)
61 tmp
= ((insn
>> 19) & 0xf);
62 if (tmp
== 11 || tmp
== 14) /* ldx/stx */
68 return 16; /* ldd/std - Although it is actually 8 */
72 printk("Impossible unaligned trap. insn=%08x\n", insn
);
73 die_if_kernel("Byte sized unaligned access?!?!", current
->thread
.kregs
);
77 static inline int decode_asi(unsigned int insn
, struct pt_regs
*regs
)
79 if (insn
& 0x800000) {
81 return (unsigned char)(regs
->tstate
>> 24); /* %asi */
83 return (unsigned char)(insn
>> 5); /* imm_asi */
88 /* 0x400000 = signed, 0 = unsigned */
89 static inline int decode_signedness(unsigned int insn
)
91 return (insn
& 0x400000);
94 static inline void maybe_flush_windows(unsigned int rs1
, unsigned int rs2
,
95 unsigned int rd
, int from_kernel
)
97 if(rs2
>= 16 || rs1
>= 16 || rd
>= 16) {
99 __asm__
__volatile__("flushw");
105 static inline long sign_extend_imm13(long imm
)
107 return imm
<< 51 >> 51;
110 static unsigned long fetch_reg(unsigned int reg
, struct pt_regs
*regs
)
115 return (!reg
? 0 : regs
->u_regs
[reg
]);
116 if (regs
->tstate
& TSTATE_PRIV
) {
117 struct reg_window
*win
;
118 win
= (struct reg_window
*)(regs
->u_regs
[UREG_FP
] + STACK_BIAS
);
119 value
= win
->locals
[reg
- 16];
120 } else if (current
->thread
.flags
& SPARC_FLAG_32BIT
) {
121 struct reg_window32
*win32
;
122 win32
= (struct reg_window32
*)((unsigned long)((u32
)regs
->u_regs
[UREG_FP
]));
123 get_user(value
, &win32
->locals
[reg
- 16]);
125 struct reg_window
*win
;
126 win
= (struct reg_window
*)(regs
->u_regs
[UREG_FP
] + STACK_BIAS
);
127 get_user(value
, &win
->locals
[reg
- 16]);
132 static unsigned long *fetch_reg_addr(unsigned int reg
, struct pt_regs
*regs
)
135 return ®s
->u_regs
[reg
];
136 if (regs
->tstate
& TSTATE_PRIV
) {
137 struct reg_window
*win
;
138 win
= (struct reg_window
*)(regs
->u_regs
[UREG_FP
] + STACK_BIAS
);
139 return &win
->locals
[reg
- 16];
140 } else if (current
->thread
.flags
& SPARC_FLAG_32BIT
) {
141 struct reg_window32
*win32
;
142 win32
= (struct reg_window32
*)((unsigned long)((u32
)regs
->u_regs
[UREG_FP
]));
143 return (unsigned long *)&win32
->locals
[reg
- 16];
145 struct reg_window
*win
;
146 win
= (struct reg_window
*)(regs
->u_regs
[UREG_FP
] + STACK_BIAS
);
147 return &win
->locals
[reg
- 16];
151 static inline unsigned long compute_effective_address(struct pt_regs
*regs
,
152 unsigned int insn
, unsigned int rd
)
154 unsigned int rs1
= (insn
>> 14) & 0x1f;
155 unsigned int rs2
= insn
& 0x1f;
156 int from_kernel
= (regs
->tstate
& TSTATE_PRIV
) != 0;
159 maybe_flush_windows(rs1
, 0, rd
, from_kernel
);
160 return (fetch_reg(rs1
, regs
) + sign_extend_imm13(insn
));
162 maybe_flush_windows(rs1
, rs2
, rd
, from_kernel
);
163 return (fetch_reg(rs1
, regs
) + fetch_reg(rs2
, regs
));
167 /* This is just to make gcc think die_if_kernel does return... */
168 static void unaligned_panic(char *str
, struct pt_regs
*regs
)
170 die_if_kernel(str
, regs
);
173 #define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
174 __asm__ __volatile__ ( \
175 "wr %4, 0, %%asi\n\t" \
177 "bge,pn %%icc, 9f\n\t" \
179 "be,pt %%icc, 6f\n" \
180 "4:\t" " lduba [%2] %%asi, %%l1\n" \
181 "5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
182 "sll %%l1, 8, %%l1\n\t" \
183 "brz,pt %3, 3f\n\t" \
184 " add %%l1, %%l2, %%l1\n\t" \
185 "sllx %%l1, 48, %%l1\n\t" \
186 "srax %%l1, 48, %%l1\n" \
187 "3:\t" "ba,pt %%xcc, 0f\n\t" \
188 " stx %%l1, [%0]\n" \
189 "6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
190 "sll %%l1, 24, %%l1\n" \
191 "7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
192 "sll %%l2, 16, %%l2\n" \
193 "8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
194 "sll %%g7, 8, %%g7\n\t" \
195 "or %%l1, %%l2, %%l1\n\t" \
196 "or %%g7, %%g1, %%g7\n\t" \
197 "or %%l1, %%g7, %%l1\n\t" \
198 "brnz,a,pt %3, 3f\n\t" \
199 " sra %%l1, 0, %%l1\n" \
200 "3:\t" "ba,pt %%xcc, 0f\n\t" \
201 " stx %%l1, [%0]\n" \
202 "9:\t" "lduba [%2] %%asi, %%l1\n" \
203 "10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
204 "sllx %%l1, 56, %%l1\n" \
205 "11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
206 "sllx %%l2, 48, %%l2\n" \
207 "12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
208 "sllx %%g7, 40, %%g7\n\t" \
209 "sllx %%g1, 32, %%g1\n\t" \
210 "or %%l1, %%l2, %%l1\n\t" \
211 "or %%g7, %%g1, %%g7\n" \
212 "13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
213 "or %%l1, %%g7, %%g7\n" \
214 "14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
215 "sllx %%l2, 24, %%l2\n" \
216 "15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
217 "sllx %%g1, 16, %%g1\n\t" \
218 "or %%g7, %%l2, %%g7\n" \
219 "16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
220 "sllx %%l1, 8, %%l1\n\t" \
221 "or %%g7, %%g1, %%g7\n\t" \
222 "or %%l1, %%l2, %%l1\n\t" \
223 "or %%g7, %%l1, %%g7\n\t" \
225 "be,a,pt %%icc, 0f\n\t" \
226 " stx %%g7, [%0]\n\t" \
227 "srlx %%g7, 32, %%l1\n\t" \
228 "sra %%g7, 0, %%g7\n\t" \
229 "stx %%l1, [%0]\n\t" \
230 "stx %%g7, [%0 + 8]\n" \
232 "wr %%g0, %5, %%asi\n\n\t" \
233 ".section __ex_table\n\t" \
234 ".word 4b, " #errh "\n\t" \
235 ".word 5b, " #errh "\n\t" \
236 ".word 6b, " #errh "\n\t" \
237 ".word 7b, " #errh "\n\t" \
238 ".word 8b, " #errh "\n\t" \
239 ".word 9b, " #errh "\n\t" \
240 ".word 10b, " #errh "\n\t" \
241 ".word 11b, " #errh "\n\t" \
242 ".word 12b, " #errh "\n\t" \
243 ".word 13b, " #errh "\n\t" \
244 ".word 14b, " #errh "\n\t" \
245 ".word 15b, " #errh "\n\t" \
246 ".word 16b, " #errh "\n\n\t" \
248 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
249 "r" (asi), "i" (ASI_AIUS) \
250 : "l1", "l2", "g7", "g1", "cc"); \
253 #define store_common(dst_addr, size, src_val, asi, errh) ({ \
254 __asm__ __volatile__ ( \
255 "wr %3, 0, %%asi\n\t" \
258 "be,pn %%icc, 2f\n\t" \
260 "be,pt %%icc, 1f\n\t" \
261 " srlx %%l1, 24, %%l2\n\t" \
262 "srlx %%l1, 56, %%g1\n\t" \
263 "srlx %%l1, 48, %%g7\n" \
264 "4:\t" "stba %%g1, [%0] %%asi\n\t" \
265 "srlx %%l1, 40, %%g1\n" \
266 "5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
267 "srlx %%l1, 32, %%g7\n" \
268 "6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
269 "7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
270 "srlx %%l1, 16, %%g1\n" \
271 "8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
272 "srlx %%l1, 8, %%g7\n" \
273 "9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
274 "10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
275 "ba,pt %%xcc, 0f\n" \
276 "11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
277 "1:\t" "srl %%l1, 16, %%g7\n" \
278 "12:\t" "stba %%l2, [%0] %%asi\n\t" \
279 "srl %%l1, 8, %%l2\n" \
280 "13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
281 "14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
282 "ba,pt %%xcc, 0f\n" \
283 "15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
284 "2:\t" "srl %%l1, 8, %%l2\n" \
285 "16:\t" "stba %%l2, [%0] %%asi\n" \
286 "17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
288 "wr %%g0, %4, %%asi\n\n\t" \
289 ".section __ex_table\n\t" \
290 ".word 4b, " #errh "\n\t" \
291 ".word 5b, " #errh "\n\t" \
292 ".word 6b, " #errh "\n\t" \
293 ".word 7b, " #errh "\n\t" \
294 ".word 8b, " #errh "\n\t" \
295 ".word 9b, " #errh "\n\t" \
296 ".word 10b, " #errh "\n\t" \
297 ".word 11b, " #errh "\n\t" \
298 ".word 12b, " #errh "\n\t" \
299 ".word 13b, " #errh "\n\t" \
300 ".word 14b, " #errh "\n\t" \
301 ".word 15b, " #errh "\n\t" \
302 ".word 16b, " #errh "\n\t" \
303 ".word 17b, " #errh "\n\n\t" \
305 : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
306 : "l1", "l2", "g7", "g1", "cc"); \
309 #define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
310 unsigned long zero = 0; \
311 unsigned long *src_val = &zero; \
315 zero = (((long)(reg_num ? \
316 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
317 (unsigned)fetch_reg(reg_num + 1, regs); \
318 } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
319 store_common(dst_addr, size, src_val, asi, errh); \
322 /* XXX Need to capture/release other cpu's for SMP around this. */
323 #define do_atomic(srcdest_reg, mem, errh) ({ \
324 unsigned long flags, tmp; \
326 save_and_cli(flags); \
327 tmp = *srcdest_reg; \
328 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
329 store_common(mem, 4, &tmp, errh); \
330 restore_flags(flags); \
333 static inline void advance(struct pt_regs
*regs
)
335 regs
->tpc
= regs
->tnpc
;
339 static inline int floating_point_load_or_store_p(unsigned int insn
)
341 return (insn
>> 24) & 1;
344 static inline int ok_for_kernel(unsigned int insn
)
346 return !floating_point_load_or_store_p(insn
);
349 void kernel_mna_trap_fault(struct pt_regs
*regs
, unsigned int insn
) __asm__ ("kernel_mna_trap_fault");
351 void kernel_mna_trap_fault(struct pt_regs
*regs
, unsigned int insn
)
353 unsigned long g2
= regs
->u_regs
[UREG_G2
];
354 unsigned long fixup
= search_exception_table (regs
->tpc
, &g2
);
357 unsigned long address
= compute_effective_address(regs
, insn
, ((insn
>> 25) & 0x1f));
358 if(address
< PAGE_SIZE
) {
359 printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference in mna handler");
361 printk(KERN_ALERT
"Unable to handle kernel paging request in mna handler");
362 printk(KERN_ALERT
" at virtual address %016lx\n",address
);
363 printk(KERN_ALERT
"current->mm->context = %016lx\n",
364 (unsigned long) current
->mm
->context
);
365 printk(KERN_ALERT
"current->mm->pgd = %016lx\n",
366 (unsigned long) current
->mm
->pgd
);
367 die_if_kernel("Oops", regs
);
371 regs
->tnpc
= regs
->tpc
+ 4;
372 regs
->u_regs
[UREG_G2
] = g2
;
375 asmlinkage
void kernel_unaligned_trap(struct pt_regs
*regs
, unsigned int insn
, unsigned long sfar
, unsigned long sfsr
)
377 enum direction dir
= decode_direction(insn
);
378 int size
= decode_access_size(insn
);
380 if(!ok_for_kernel(insn
) || dir
== both
) {
381 printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
383 unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs
);
385 __asm__
__volatile__ ("\n"
386 "kernel_unaligned_trap_fault:\n\t"
388 "call kernel_mna_trap_fault\n\t"
391 : "r" (regs
), "r" (insn
)
392 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
393 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
395 unsigned long addr
= compute_effective_address(regs
, insn
, ((insn
>> 25) & 0x1f));
398 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
399 regs
->tpc
, dirstrings
[dir
], addr
, size
, regs
->u_regs
[UREG_RETPC
]);
403 do_integer_load(fetch_reg_addr(((insn
>>25)&0x1f), regs
),
404 size
, (unsigned long *) addr
,
405 decode_signedness(insn
), decode_asi(insn
, regs
),
406 kernel_unaligned_trap_fault
);
410 do_integer_store(((insn
>>25)&0x1f), size
,
411 (unsigned long *) addr
, regs
,
412 decode_asi(insn
, regs
),
413 kernel_unaligned_trap_fault
);
415 #if 0 /* unsupported */
417 do_atomic(fetch_reg_addr(((insn
>>25)&0x1f), regs
),
418 (unsigned long *) addr
,
419 kernel_unaligned_trap_fault
);
423 panic("Impossible kernel unaligned trap.");
430 static char popc_helper
[] = {
431 0, 1, 1, 2, 1, 2, 2, 3,
432 1, 2, 2, 3, 2, 3, 3, 4,
435 int handle_popc(u32 insn
, struct pt_regs
*regs
)
438 int ret
, i
, rd
= ((insn
>> 25) & 0x1f);
439 int from_kernel
= (regs
->tstate
& TSTATE_PRIV
) != 0;
442 maybe_flush_windows(0, 0, rd
, from_kernel
);
443 value
= sign_extend_imm13(insn
);
445 maybe_flush_windows(0, insn
& 0x1f, rd
, from_kernel
);
446 value
= fetch_reg(insn
& 0x1f, regs
);
448 for (ret
= 0, i
= 0; i
< 16; i
++) {
449 ret
+= popc_helper
[value
& 0xf];
454 regs
->u_regs
[rd
] = ret
;
456 if (current
->thread
.flags
& SPARC_FLAG_32BIT
) {
457 struct reg_window32
*win32
;
458 win32
= (struct reg_window32
*)((unsigned long)((u32
)regs
->u_regs
[UREG_FP
]));
459 put_user(ret
, &win32
->locals
[rd
- 16]);
461 struct reg_window
*win
;
462 win
= (struct reg_window
*)(regs
->u_regs
[UREG_FP
] + STACK_BIAS
);
463 put_user(ret
, &win
->locals
[rd
- 16]);
470 extern void do_fpother(struct pt_regs
*regs
);
471 extern void do_privact(struct pt_regs
*regs
);
472 extern void data_access_exception(struct pt_regs
*regs
);
474 int handle_ldf_stq(u32 insn
, struct pt_regs
*regs
)
476 unsigned long addr
= compute_effective_address(regs
, insn
, 0);
477 int freg
= ((insn
>> 25) & 0x1e) | ((insn
>> 20) & 0x20);
478 struct fpustate
*f
= FPUSTATE
;
479 int asi
= decode_asi(insn
, regs
);
480 int flag
= (freg
< 32) ? FPRS_DL
: FPRS_DU
;
482 save_and_clear_fpu();
483 current
->thread
.xfsr
[0] &= ~0x1c000;
485 current
->thread
.xfsr
[0] |= (6 << 14) /* invalid_fp_register */;
489 if (insn
& 0x200000) {
491 u64 first
= 0, second
= 0;
493 if (current
->thread
.fpsaved
[0] & flag
) {
494 first
= *(u64
*)&f
->regs
[freg
];
495 second
= *(u64
*)&f
->regs
[freg
+2];
507 /* Need to convert endians */
508 u64 tmp
= __swab64p(&first
);
510 first
= __swab64p(&second
);
515 data_access_exception(regs
);
518 if (put_user (first
>> 32, (u32
*)addr
) ||
519 __put_user ((u32
)first
, (u32
*)(addr
+ 4)) ||
520 __put_user (second
>> 32, (u32
*)(addr
+ 8)) ||
521 __put_user ((u32
)second
, (u32
*)(addr
+ 12))) {
522 data_access_exception(regs
);
526 /* LDF, LDDF, LDQF */
527 u32 data
[4] __attribute__ ((aligned(8)));
534 } else if (asi
> ASI_SNFL
) {
535 data_access_exception(regs
);
538 switch (insn
& 0x180000) {
539 case 0x000000: size
= 1; break;
540 case 0x100000: size
= 4; break;
541 default: size
= 2; break;
543 for (i
= 0; i
< size
; i
++)
546 err
= get_user (data
[0], (u32
*)addr
);
548 for (i
= 1; i
< size
; i
++)
549 err
|= __get_user (data
[i
], (u32
*)(addr
+ 4*i
));
551 if (err
&& !(asi
& 0x2 /* NF */)) {
552 data_access_exception(regs
);
555 if (asi
& 0x8) /* Little */ {
559 case 1: data
[0] = le32_to_cpup(data
+ 0); break;
560 default:*(u64
*)(data
+ 0) = le64_to_cpup((u64
*)(data
+ 0));
562 case 4: tmp
= le64_to_cpup((u64
*)(data
+ 0));
563 *(u64
*)(data
+ 0) = le64_to_cpup((u64
*)(data
+ 2));
564 *(u64
*)(data
+ 2) = tmp
;
568 if (!(current
->thread
.fpsaved
[0] & FPRS_FEF
)) {
569 current
->thread
.fpsaved
[0] = FPRS_FEF
;
570 current
->thread
.gsr
[0] = 0;
572 if (!(current
->thread
.fpsaved
[0] & flag
)) {
574 memset(f
->regs
, 0, 32*sizeof(u32
));
576 memset(f
->regs
+32, 0, 32*sizeof(u32
));
578 memcpy(f
->regs
+ freg
, data
, size
* 4);
579 current
->thread
.fpsaved
[0] |= flag
;
585 void handle_ld_nf(u32 insn
, struct pt_regs
*regs
)
587 int rd
= ((insn
>> 25) & 0x1f);
588 int from_kernel
= (regs
->tstate
& TSTATE_PRIV
) != 0;
591 maybe_flush_windows(0, 0, rd
, from_kernel
);
592 reg
= fetch_reg_addr(rd
, regs
);
593 if ((insn
& 0x780000) == 0x180000)
599 void handle_lddfmna(struct pt_regs
*regs
, unsigned long sfar
, unsigned long sfsr
)
601 unsigned long pc
= regs
->tpc
;
602 unsigned long tstate
= regs
->tstate
;
608 struct fpustate
*f
= FPUSTATE
;
610 if(tstate
& TSTATE_PRIV
)
611 die_if_kernel("lddfmna from kernel", regs
);
612 if(current
->thread
.flags
& SPARC_FLAG_32BIT
)
614 if (get_user(insn
, (u32
*)pc
) != -EFAULT
) {
616 if ((asi
> ASI_SNFL
) ||
619 if (get_user(first
, (u32
*)sfar
) ||
620 get_user(second
, (u32
*)(sfar
+ 4))) {
621 if (asi
& 0x2) /* NF */ {
622 first
= 0; second
= 0;
626 save_and_clear_fpu();
627 freg
= ((insn
>> 25) & 0x1e) | ((insn
>> 20) & 0x20);
628 value
= (((u64
)first
) << 32) | second
;
629 if (asi
& 0x8) /* Little */
630 value
= __swab64p(&value
);
631 flag
= (freg
< 32) ? FPRS_DL
: FPRS_DU
;
632 if (!(current
->thread
.fpsaved
[0] & FPRS_FEF
)) {
633 current
->thread
.fpsaved
[0] = FPRS_FEF
;
634 current
->thread
.gsr
[0] = 0;
636 if (!(current
->thread
.fpsaved
[0] & flag
)) {
638 memset(f
->regs
, 0, 32*sizeof(u32
));
640 memset(f
->regs
+32, 0, 32*sizeof(u32
));
642 *(u64
*)(f
->regs
+ freg
) = value
;
643 current
->thread
.fpsaved
[0] |= flag
;
645 daex
: data_access_exception(regs
);
652 void handle_stdfmna(struct pt_regs
*regs
, unsigned long sfar
, unsigned long sfsr
)
654 unsigned long pc
= regs
->tpc
;
655 unsigned long tstate
= regs
->tstate
;
660 struct fpustate
*f
= FPUSTATE
;
662 if(tstate
& TSTATE_PRIV
)
663 die_if_kernel("stdfmna from kernel", regs
);
664 if(current
->thread
.flags
& SPARC_FLAG_32BIT
)
666 if (get_user(insn
, (u32
*)pc
) != -EFAULT
) {
667 freg
= ((insn
>> 25) & 0x1e) | ((insn
>> 20) & 0x20);
670 flag
= (freg
< 32) ? FPRS_DL
: FPRS_DU
;
671 if ((asi
> ASI_SNFL
) ||
674 save_and_clear_fpu();
675 if (current
->thread
.fpsaved
[0] & flag
)
676 value
= *(u64
*)&f
->regs
[freg
];
682 value
= __swab64p(&value
); break;
685 if (put_user (value
>> 32, (u32
*)sfar
) ||
686 __put_user ((u32
)value
, (u32
*)(sfar
+ 4)))
689 daex
: data_access_exception(regs
);