* added 0.99 linux version
[mascara-docs.git] / i386 / linux / linux-2.3.21 / arch / sparc64 / kernel / unaligned.c
blob79233433608ba0377a1dc04fb4c4cd2542901cb9
1 /* $Id: unaligned.c,v 1.18 1999/08/02 08:39:44 davem Exp $
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <asm/asi.h>
14 #include <asm/ptrace.h>
15 #include <asm/processor.h>
16 #include <asm/system.h>
17 #include <asm/uaccess.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <asm/fpumacro.h>
21 #include <asm/bitops.h>
23 /* #define DEBUG_MNA */
25 enum direction {
26 load, /* ld, ldd, ldh, ldsh */
27 store, /* st, std, sth, stsh */
28 both, /* Swap, ldstub, cas, ... */
29 fpld,
30 fpst,
31 invalid,
34 #ifdef DEBUG_MNA
35 static char *dirstrings[] = {
36 "load", "store", "both", "fpload", "fpstore", "invalid"
38 #endif
40 static inline enum direction decode_direction(unsigned int insn)
42 unsigned long tmp = (insn >> 21) & 1;
44 if(!tmp)
45 return load;
46 else {
47 switch ((insn>>19)&0xf) {
48 case 15: /* swap* */
49 return both;
50 default:
51 return store;
56 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
57 static inline int decode_access_size(unsigned int insn)
59 unsigned int tmp;
61 tmp = ((insn >> 19) & 0xf);
62 if (tmp == 11 || tmp == 14) /* ldx/stx */
63 return 8;
64 tmp &= 3;
65 if(!tmp)
66 return 4;
67 else if(tmp == 3)
68 return 16; /* ldd/std - Although it is actually 8 */
69 else if(tmp == 2)
70 return 2;
71 else {
72 printk("Impossible unaligned trap. insn=%08x\n", insn);
73 die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
77 static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
79 if (insn & 0x800000) {
80 if (insn & 0x2000)
81 return (unsigned char)(regs->tstate >> 24); /* %asi */
82 else
83 return (unsigned char)(insn >> 5); /* imm_asi */
84 } else
85 return ASI_P;
88 /* 0x400000 = signed, 0 = unsigned */
89 static inline int decode_signedness(unsigned int insn)
91 return (insn & 0x400000);
94 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
95 unsigned int rd, int from_kernel)
97 if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
98 if(from_kernel != 0)
99 __asm__ __volatile__("flushw");
100 else
101 flushw_user();
105 static inline long sign_extend_imm13(long imm)
107 return imm << 51 >> 51;
110 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
112 unsigned long value;
114 if(reg < 16)
115 return (!reg ? 0 : regs->u_regs[reg]);
116 if (regs->tstate & TSTATE_PRIV) {
117 struct reg_window *win;
118 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
119 value = win->locals[reg - 16];
120 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
121 struct reg_window32 *win32;
122 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
123 get_user(value, &win32->locals[reg - 16]);
124 } else {
125 struct reg_window *win;
126 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
127 get_user(value, &win->locals[reg - 16]);
129 return value;
132 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
134 if(reg < 16)
135 return &regs->u_regs[reg];
136 if (regs->tstate & TSTATE_PRIV) {
137 struct reg_window *win;
138 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
139 return &win->locals[reg - 16];
140 } else if (current->thread.flags & SPARC_FLAG_32BIT) {
141 struct reg_window32 *win32;
142 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
143 return (unsigned long *)&win32->locals[reg - 16];
144 } else {
145 struct reg_window *win;
146 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
147 return &win->locals[reg - 16];
151 static inline unsigned long compute_effective_address(struct pt_regs *regs,
152 unsigned int insn, unsigned int rd)
154 unsigned int rs1 = (insn >> 14) & 0x1f;
155 unsigned int rs2 = insn & 0x1f;
156 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
158 if(insn & 0x2000) {
159 maybe_flush_windows(rs1, 0, rd, from_kernel);
160 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
161 } else {
162 maybe_flush_windows(rs1, rs2, rd, from_kernel);
163 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
167 /* This is just to make gcc think die_if_kernel does return... */
168 static void unaligned_panic(char *str, struct pt_regs *regs)
170 die_if_kernel(str, regs);
173 #define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \
174 __asm__ __volatile__ ( \
175 "wr %4, 0, %%asi\n\t" \
176 "cmp %1, 8\n\t" \
177 "bge,pn %%icc, 9f\n\t" \
178 " cmp %1, 4\n\t" \
179 "be,pt %%icc, 6f\n" \
180 "4:\t" " lduba [%2] %%asi, %%l1\n" \
181 "5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
182 "sll %%l1, 8, %%l1\n\t" \
183 "brz,pt %3, 3f\n\t" \
184 " add %%l1, %%l2, %%l1\n\t" \
185 "sllx %%l1, 48, %%l1\n\t" \
186 "srax %%l1, 48, %%l1\n" \
187 "3:\t" "ba,pt %%xcc, 0f\n\t" \
188 " stx %%l1, [%0]\n" \
189 "6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
190 "sll %%l1, 24, %%l1\n" \
191 "7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
192 "sll %%l2, 16, %%l2\n" \
193 "8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
194 "sll %%g7, 8, %%g7\n\t" \
195 "or %%l1, %%l2, %%l1\n\t" \
196 "or %%g7, %%g1, %%g7\n\t" \
197 "or %%l1, %%g7, %%l1\n\t" \
198 "brnz,a,pt %3, 3f\n\t" \
199 " sra %%l1, 0, %%l1\n" \
200 "3:\t" "ba,pt %%xcc, 0f\n\t" \
201 " stx %%l1, [%0]\n" \
202 "9:\t" "lduba [%2] %%asi, %%l1\n" \
203 "10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \
204 "sllx %%l1, 56, %%l1\n" \
205 "11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \
206 "sllx %%l2, 48, %%l2\n" \
207 "12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \
208 "sllx %%g7, 40, %%g7\n\t" \
209 "sllx %%g1, 32, %%g1\n\t" \
210 "or %%l1, %%l2, %%l1\n\t" \
211 "or %%g7, %%g1, %%g7\n" \
212 "13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \
213 "or %%l1, %%g7, %%g7\n" \
214 "14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \
215 "sllx %%l2, 24, %%l2\n" \
216 "15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \
217 "sllx %%g1, 16, %%g1\n\t" \
218 "or %%g7, %%l2, %%g7\n" \
219 "16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \
220 "sllx %%l1, 8, %%l1\n\t" \
221 "or %%g7, %%g1, %%g7\n\t" \
222 "or %%l1, %%l2, %%l1\n\t" \
223 "or %%g7, %%l1, %%g7\n\t" \
224 "cmp %1, 8\n\t" \
225 "be,a,pt %%icc, 0f\n\t" \
226 " stx %%g7, [%0]\n\t" \
227 "srlx %%g7, 32, %%l1\n\t" \
228 "sra %%g7, 0, %%g7\n\t" \
229 "stx %%l1, [%0]\n\t" \
230 "stx %%g7, [%0 + 8]\n" \
231 "0:\n\t" \
232 "wr %%g0, %5, %%asi\n\n\t" \
233 ".section __ex_table\n\t" \
234 ".word 4b, " #errh "\n\t" \
235 ".word 5b, " #errh "\n\t" \
236 ".word 6b, " #errh "\n\t" \
237 ".word 7b, " #errh "\n\t" \
238 ".word 8b, " #errh "\n\t" \
239 ".word 9b, " #errh "\n\t" \
240 ".word 10b, " #errh "\n\t" \
241 ".word 11b, " #errh "\n\t" \
242 ".word 12b, " #errh "\n\t" \
243 ".word 13b, " #errh "\n\t" \
244 ".word 14b, " #errh "\n\t" \
245 ".word 15b, " #errh "\n\t" \
246 ".word 16b, " #errh "\n\n\t" \
247 ".previous\n\t" \
248 : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \
249 "r" (asi), "i" (ASI_AIUS) \
250 : "l1", "l2", "g7", "g1", "cc"); \
253 #define store_common(dst_addr, size, src_val, asi, errh) ({ \
254 __asm__ __volatile__ ( \
255 "wr %3, 0, %%asi\n\t" \
256 "ldx [%2], %%l1\n" \
257 "cmp %1, 2\n\t" \
258 "be,pn %%icc, 2f\n\t" \
259 " cmp %1, 4\n\t" \
260 "be,pt %%icc, 1f\n\t" \
261 " srlx %%l1, 24, %%l2\n\t" \
262 "srlx %%l1, 56, %%g1\n\t" \
263 "srlx %%l1, 48, %%g7\n" \
264 "4:\t" "stba %%g1, [%0] %%asi\n\t" \
265 "srlx %%l1, 40, %%g1\n" \
266 "5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \
267 "srlx %%l1, 32, %%g7\n" \
268 "6:\t" "stba %%g1, [%0 + 2] %%asi\n" \
269 "7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \
270 "srlx %%l1, 16, %%g1\n" \
271 "8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \
272 "srlx %%l1, 8, %%g7\n" \
273 "9:\t" "stba %%g1, [%0 + 5] %%asi\n" \
274 "10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \
275 "ba,pt %%xcc, 0f\n" \
276 "11:\t" " stba %%l1, [%0 + 7] %%asi\n" \
277 "1:\t" "srl %%l1, 16, %%g7\n" \
278 "12:\t" "stba %%l2, [%0] %%asi\n\t" \
279 "srl %%l1, 8, %%l2\n" \
280 "13:\t" "stba %%g7, [%0 + 1] %%asi\n" \
281 "14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \
282 "ba,pt %%xcc, 0f\n" \
283 "15:\t" " stba %%l1, [%0 + 3] %%asi\n" \
284 "2:\t" "srl %%l1, 8, %%l2\n" \
285 "16:\t" "stba %%l2, [%0] %%asi\n" \
286 "17:\t" "stba %%l1, [%0 + 1] %%asi\n" \
287 "0:\n\t" \
288 "wr %%g0, %4, %%asi\n\n\t" \
289 ".section __ex_table\n\t" \
290 ".word 4b, " #errh "\n\t" \
291 ".word 5b, " #errh "\n\t" \
292 ".word 6b, " #errh "\n\t" \
293 ".word 7b, " #errh "\n\t" \
294 ".word 8b, " #errh "\n\t" \
295 ".word 9b, " #errh "\n\t" \
296 ".word 10b, " #errh "\n\t" \
297 ".word 11b, " #errh "\n\t" \
298 ".word 12b, " #errh "\n\t" \
299 ".word 13b, " #errh "\n\t" \
300 ".word 14b, " #errh "\n\t" \
301 ".word 15b, " #errh "\n\t" \
302 ".word 16b, " #errh "\n\t" \
303 ".word 17b, " #errh "\n\n\t" \
304 ".previous\n\t" \
305 : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\
306 : "l1", "l2", "g7", "g1", "cc"); \
309 #define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \
310 unsigned long zero = 0; \
311 unsigned long *src_val = &zero; \
313 if (size == 16) { \
314 size = 8; \
315 zero = (((long)(reg_num ? \
316 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \
317 (unsigned)fetch_reg(reg_num + 1, regs); \
318 } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \
319 store_common(dst_addr, size, src_val, asi, errh); \
322 /* XXX Need to capture/release other cpu's for SMP around this. */
323 #define do_atomic(srcdest_reg, mem, errh) ({ \
324 unsigned long flags, tmp; \
326 save_and_cli(flags); \
327 tmp = *srcdest_reg; \
328 do_integer_load(srcdest_reg, 4, mem, 0, errh); \
329 store_common(mem, 4, &tmp, errh); \
330 restore_flags(flags); \
333 static inline void advance(struct pt_regs *regs)
335 regs->tpc = regs->tnpc;
336 regs->tnpc += 4;
339 static inline int floating_point_load_or_store_p(unsigned int insn)
341 return (insn >> 24) & 1;
344 static inline int ok_for_kernel(unsigned int insn)
346 return !floating_point_load_or_store_p(insn);
349 void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault");
351 void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
353 unsigned long g2 = regs->u_regs [UREG_G2];
354 unsigned long fixup = search_exception_table (regs->tpc, &g2);
356 if (!fixup) {
357 unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
358 if(address < PAGE_SIZE) {
359 printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
360 } else
361 printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
362 printk(KERN_ALERT " at virtual address %016lx\n",address);
363 printk(KERN_ALERT "current->mm->context = %016lx\n",
364 (unsigned long) current->mm->context);
365 printk(KERN_ALERT "current->mm->pgd = %016lx\n",
366 (unsigned long) current->mm->pgd);
367 die_if_kernel("Oops", regs);
368 /* Not reached */
370 regs->tpc = fixup;
371 regs->tnpc = regs->tpc + 4;
372 regs->u_regs [UREG_G2] = g2;
375 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, unsigned long sfar, unsigned long sfsr)
377 enum direction dir = decode_direction(insn);
378 int size = decode_access_size(insn);
380 if(!ok_for_kernel(insn) || dir == both) {
381 printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n",
382 regs->tpc);
383 unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs);
385 __asm__ __volatile__ ("\n"
386 "kernel_unaligned_trap_fault:\n\t"
387 "mov %0, %%o0\n\t"
388 "call kernel_mna_trap_fault\n\t"
389 " mov %1, %%o1\n\t"
391 : "r" (regs), "r" (insn)
392 : "o0", "o1", "o2", "o3", "o4", "o5", "o7",
393 "g1", "g2", "g3", "g4", "g5", "g7", "cc");
394 } else {
395 unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f));
397 #ifdef DEBUG_MNA
398 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n",
399 regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
400 #endif
401 switch(dir) {
402 case load:
403 do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
404 size, (unsigned long *) addr,
405 decode_signedness(insn), decode_asi(insn, regs),
406 kernel_unaligned_trap_fault);
407 break;
409 case store:
410 do_integer_store(((insn>>25)&0x1f), size,
411 (unsigned long *) addr, regs,
412 decode_asi(insn, regs),
413 kernel_unaligned_trap_fault);
414 break;
415 #if 0 /* unsupported */
416 case both:
417 do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs),
418 (unsigned long *) addr,
419 kernel_unaligned_trap_fault);
420 break;
421 #endif
422 default:
423 panic("Impossible kernel unaligned trap.");
424 /* Not reached... */
426 advance(regs);
430 static char popc_helper[] = {
431 0, 1, 1, 2, 1, 2, 2, 3,
432 1, 2, 2, 3, 2, 3, 3, 4,
435 int handle_popc(u32 insn, struct pt_regs *regs)
437 u64 value;
438 int ret, i, rd = ((insn >> 25) & 0x1f);
439 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
441 if (insn & 0x2000) {
442 maybe_flush_windows(0, 0, rd, from_kernel);
443 value = sign_extend_imm13(insn);
444 } else {
445 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
446 value = fetch_reg(insn & 0x1f, regs);
448 for (ret = 0, i = 0; i < 16; i++) {
449 ret += popc_helper[value & 0xf];
450 value >>= 4;
452 if(rd < 16) {
453 if (rd)
454 regs->u_regs[rd] = ret;
455 } else {
456 if (current->thread.flags & SPARC_FLAG_32BIT) {
457 struct reg_window32 *win32;
458 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
459 put_user(ret, &win32->locals[rd - 16]);
460 } else {
461 struct reg_window *win;
462 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
463 put_user(ret, &win->locals[rd - 16]);
466 advance(regs);
467 return 1;
470 extern void do_fpother(struct pt_regs *regs);
471 extern void do_privact(struct pt_regs *regs);
472 extern void data_access_exception(struct pt_regs *regs);
474 int handle_ldf_stq(u32 insn, struct pt_regs *regs)
476 unsigned long addr = compute_effective_address(regs, insn, 0);
477 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
478 struct fpustate *f = FPUSTATE;
479 int asi = decode_asi(insn, regs);
480 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
482 save_and_clear_fpu();
483 current->thread.xfsr[0] &= ~0x1c000;
484 if (freg & 3) {
485 current->thread.xfsr[0] |= (6 << 14) /* invalid_fp_register */;
486 do_fpother(regs);
487 return 0;
489 if (insn & 0x200000) {
490 /* STQ */
491 u64 first = 0, second = 0;
493 if (current->thread.fpsaved[0] & flag) {
494 first = *(u64 *)&f->regs[freg];
495 second = *(u64 *)&f->regs[freg+2];
497 if (asi < 0x80) {
498 do_privact(regs);
499 return 1;
501 switch (asi) {
502 case ASI_P:
503 case ASI_S: break;
504 case ASI_PL:
505 case ASI_SL:
507 /* Need to convert endians */
508 u64 tmp = __swab64p(&first);
510 first = __swab64p(&second);
511 second = tmp;
512 break;
514 default:
515 data_access_exception(regs);
516 return 1;
518 if (put_user (first >> 32, (u32 *)addr) ||
519 __put_user ((u32)first, (u32 *)(addr + 4)) ||
520 __put_user (second >> 32, (u32 *)(addr + 8)) ||
521 __put_user ((u32)second, (u32 *)(addr + 12))) {
522 data_access_exception(regs);
523 return 1;
525 } else {
526 /* LDF, LDDF, LDQF */
527 u32 data[4] __attribute__ ((aligned(8)));
528 int size, i;
529 int err;
531 if (asi < 0x80) {
532 do_privact(regs);
533 return 1;
534 } else if (asi > ASI_SNFL) {
535 data_access_exception(regs);
536 return 1;
538 switch (insn & 0x180000) {
539 case 0x000000: size = 1; break;
540 case 0x100000: size = 4; break;
541 default: size = 2; break;
543 for (i = 0; i < size; i++)
544 data[i] = 0;
546 err = get_user (data[0], (u32 *)addr);
547 if (!err) {
548 for (i = 1; i < size; i++)
549 err |= __get_user (data[i], (u32 *)(addr + 4*i));
551 if (err && !(asi & 0x2 /* NF */)) {
552 data_access_exception(regs);
553 return 1;
555 if (asi & 0x8) /* Little */ {
556 u64 tmp;
558 switch (size) {
559 case 1: data[0] = le32_to_cpup(data + 0); break;
560 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
561 break;
562 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
563 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
564 *(u64 *)(data + 2) = tmp;
565 break;
568 if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
569 current->thread.fpsaved[0] = FPRS_FEF;
570 current->thread.gsr[0] = 0;
572 if (!(current->thread.fpsaved[0] & flag)) {
573 if (freg < 32)
574 memset(f->regs, 0, 32*sizeof(u32));
575 else
576 memset(f->regs+32, 0, 32*sizeof(u32));
578 memcpy(f->regs + freg, data, size * 4);
579 current->thread.fpsaved[0] |= flag;
581 advance(regs);
582 return 1;
585 void handle_ld_nf(u32 insn, struct pt_regs *regs)
587 int rd = ((insn >> 25) & 0x1f);
588 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
589 unsigned long *reg;
591 maybe_flush_windows(0, 0, rd, from_kernel);
592 reg = fetch_reg_addr(rd, regs);
593 if ((insn & 0x780000) == 0x180000)
594 reg[1] = 0;
595 reg[0] = 0;
596 advance(regs);
599 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
601 unsigned long pc = regs->tpc;
602 unsigned long tstate = regs->tstate;
603 u32 insn;
604 u32 first, second;
605 u64 value;
606 u8 asi, freg;
607 int flag;
608 struct fpustate *f = FPUSTATE;
610 if(tstate & TSTATE_PRIV)
611 die_if_kernel("lddfmna from kernel", regs);
612 if(current->thread.flags & SPARC_FLAG_32BIT)
613 pc = (u32)pc;
614 if (get_user(insn, (u32 *)pc) != -EFAULT) {
615 asi = sfsr >> 16;
616 if ((asi > ASI_SNFL) ||
617 (asi < ASI_P))
618 goto daex;
619 if (get_user(first, (u32 *)sfar) ||
620 get_user(second, (u32 *)(sfar + 4))) {
621 if (asi & 0x2) /* NF */ {
622 first = 0; second = 0;
623 } else
624 goto daex;
626 save_and_clear_fpu();
627 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
628 value = (((u64)first) << 32) | second;
629 if (asi & 0x8) /* Little */
630 value = __swab64p(&value);
631 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
632 if (!(current->thread.fpsaved[0] & FPRS_FEF)) {
633 current->thread.fpsaved[0] = FPRS_FEF;
634 current->thread.gsr[0] = 0;
636 if (!(current->thread.fpsaved[0] & flag)) {
637 if (freg < 32)
638 memset(f->regs, 0, 32*sizeof(u32));
639 else
640 memset(f->regs+32, 0, 32*sizeof(u32));
642 *(u64 *)(f->regs + freg) = value;
643 current->thread.fpsaved[0] |= flag;
644 } else {
645 daex: data_access_exception(regs);
646 return;
648 advance(regs);
649 return;
652 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
654 unsigned long pc = regs->tpc;
655 unsigned long tstate = regs->tstate;
656 u32 insn;
657 u64 value;
658 u8 asi, freg;
659 int flag;
660 struct fpustate *f = FPUSTATE;
662 if(tstate & TSTATE_PRIV)
663 die_if_kernel("stdfmna from kernel", regs);
664 if(current->thread.flags & SPARC_FLAG_32BIT)
665 pc = (u32)pc;
666 if (get_user(insn, (u32 *)pc) != -EFAULT) {
667 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
668 asi = sfsr >> 16;
669 value = 0;
670 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
671 if ((asi > ASI_SNFL) ||
672 (asi < ASI_P))
673 goto daex;
674 save_and_clear_fpu();
675 if (current->thread.fpsaved[0] & flag)
676 value = *(u64 *)&f->regs[freg];
677 switch (asi) {
678 case ASI_P:
679 case ASI_S: break;
680 case ASI_PL:
681 case ASI_SL:
682 value = __swab64p(&value); break;
683 default: goto daex;
685 if (put_user (value >> 32, (u32 *)sfar) ||
686 __put_user ((u32)value, (u32 *)(sfar + 4)))
687 goto daex;
688 } else {
689 daex: data_access_exception(regs);
690 return;
692 advance(regs);
693 return;