1 /* align.c - handle alignment exceptions for the Power PC.
3 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
4 * Copyright (c) 1998-1999 TiVo, Inc.
5 * PowerPC 403GCX modifications.
6 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
7 * PowerPC 403GCX/405GP modifications.
8 * Copyright (c) 2001-2002 PPC64 team, IBM Corp
9 * 64-bit and Power4 support
10 * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
11 * <benh@kernel.crashing.org>
12 * Merge ppc32 and ppc64 implementations
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
22 #include <asm/processor.h>
23 #include <asm/uaccess.h>
24 #include <asm/cache.h>
25 #include <asm/cputable.h>
26 #include <asm/emulated_ops.h>
27 #include <asm/switch_to.h>
28 #include <asm/disassemble.h>
36 #define INVALID { 0, 0 }
38 /* Bits in the flags field */
39 #define LD 0 /* load */
40 #define ST 1 /* store */
41 #define SE 2 /* sign-extend value, or FP ld/st as word */
42 #define F 4 /* to/from fp regs */
43 #define U 8 /* update index register */
44 #define M 0x10 /* multiple load/store */
45 #define SW 0x20 /* byte swap */
46 #define S 0x40 /* single-precision fp or... */
47 #define SX 0x40 /* ... byte count in XER */
48 #define HARD 0x80 /* string, stwcx. */
49 #define E4 0x40 /* SPE endianness is word */
50 #define E8 0x80 /* SPE endianness is double word */
51 #define SPLT 0x80 /* VSX SPLAT load */
53 /* DSISR bits reported for a DCBZ instruction: */
54 #define DCBZ 0x5f /* 8xx/82xx dcbz faults when cache not enabled */
57 * The PowerPC stores certain bits of the instruction that caused the
58 * alignment exception in the DSISR register. This array maps those
59 * bits to information about the operand length and what the
60 * instruction would do.
62 static struct aligninfo aligninfo
[128] = {
63 { 4, LD
}, /* 00 0 0000: lwz / lwarx */
64 INVALID
, /* 00 0 0001 */
65 { 4, ST
}, /* 00 0 0010: stw */
66 INVALID
, /* 00 0 0011 */
67 { 2, LD
}, /* 00 0 0100: lhz */
68 { 2, LD
+SE
}, /* 00 0 0101: lha */
69 { 2, ST
}, /* 00 0 0110: sth */
70 { 4, LD
+M
}, /* 00 0 0111: lmw */
71 { 4, LD
+F
+S
}, /* 00 0 1000: lfs */
72 { 8, LD
+F
}, /* 00 0 1001: lfd */
73 { 4, ST
+F
+S
}, /* 00 0 1010: stfs */
74 { 8, ST
+F
}, /* 00 0 1011: stfd */
75 { 16, LD
}, /* 00 0 1100: lq */
76 { 8, LD
}, /* 00 0 1101: ld/ldu/lwa */
77 INVALID
, /* 00 0 1110 */
78 { 8, ST
}, /* 00 0 1111: std/stdu */
79 { 4, LD
+U
}, /* 00 1 0000: lwzu */
80 INVALID
, /* 00 1 0001 */
81 { 4, ST
+U
}, /* 00 1 0010: stwu */
82 INVALID
, /* 00 1 0011 */
83 { 2, LD
+U
}, /* 00 1 0100: lhzu */
84 { 2, LD
+SE
+U
}, /* 00 1 0101: lhau */
85 { 2, ST
+U
}, /* 00 1 0110: sthu */
86 { 4, ST
+M
}, /* 00 1 0111: stmw */
87 { 4, LD
+F
+S
+U
}, /* 00 1 1000: lfsu */
88 { 8, LD
+F
+U
}, /* 00 1 1001: lfdu */
89 { 4, ST
+F
+S
+U
}, /* 00 1 1010: stfsu */
90 { 8, ST
+F
+U
}, /* 00 1 1011: stfdu */
91 { 16, LD
+F
}, /* 00 1 1100: lfdp */
92 INVALID
, /* 00 1 1101 */
93 { 16, ST
+F
}, /* 00 1 1110: stfdp */
94 INVALID
, /* 00 1 1111 */
95 { 8, LD
}, /* 01 0 0000: ldx */
96 INVALID
, /* 01 0 0001 */
97 { 8, ST
}, /* 01 0 0010: stdx */
98 INVALID
, /* 01 0 0011 */
99 INVALID
, /* 01 0 0100 */
100 { 4, LD
+SE
}, /* 01 0 0101: lwax */
101 INVALID
, /* 01 0 0110 */
102 INVALID
, /* 01 0 0111 */
103 { 4, LD
+M
+HARD
+SX
}, /* 01 0 1000: lswx */
104 { 4, LD
+M
+HARD
}, /* 01 0 1001: lswi */
105 { 4, ST
+M
+HARD
+SX
}, /* 01 0 1010: stswx */
106 { 4, ST
+M
+HARD
}, /* 01 0 1011: stswi */
107 INVALID
, /* 01 0 1100 */
108 { 8, LD
+U
}, /* 01 0 1101: ldu */
109 INVALID
, /* 01 0 1110 */
110 { 8, ST
+U
}, /* 01 0 1111: stdu */
111 { 8, LD
+U
}, /* 01 1 0000: ldux */
112 INVALID
, /* 01 1 0001 */
113 { 8, ST
+U
}, /* 01 1 0010: stdux */
114 INVALID
, /* 01 1 0011 */
115 INVALID
, /* 01 1 0100 */
116 { 4, LD
+SE
+U
}, /* 01 1 0101: lwaux */
117 INVALID
, /* 01 1 0110 */
118 INVALID
, /* 01 1 0111 */
119 INVALID
, /* 01 1 1000 */
120 INVALID
, /* 01 1 1001 */
121 INVALID
, /* 01 1 1010 */
122 INVALID
, /* 01 1 1011 */
123 INVALID
, /* 01 1 1100 */
124 INVALID
, /* 01 1 1101 */
125 INVALID
, /* 01 1 1110 */
126 INVALID
, /* 01 1 1111 */
127 INVALID
, /* 10 0 0000 */
128 INVALID
, /* 10 0 0001 */
129 INVALID
, /* 10 0 0010: stwcx. */
130 INVALID
, /* 10 0 0011 */
131 INVALID
, /* 10 0 0100 */
132 INVALID
, /* 10 0 0101 */
133 INVALID
, /* 10 0 0110 */
134 INVALID
, /* 10 0 0111 */
135 { 4, LD
+SW
}, /* 10 0 1000: lwbrx */
136 INVALID
, /* 10 0 1001 */
137 { 4, ST
+SW
}, /* 10 0 1010: stwbrx */
138 INVALID
, /* 10 0 1011 */
139 { 2, LD
+SW
}, /* 10 0 1100: lhbrx */
140 { 4, LD
+SE
}, /* 10 0 1101 lwa */
141 { 2, ST
+SW
}, /* 10 0 1110: sthbrx */
142 { 16, ST
}, /* 10 0 1111: stq */
143 INVALID
, /* 10 1 0000 */
144 INVALID
, /* 10 1 0001 */
145 INVALID
, /* 10 1 0010 */
146 INVALID
, /* 10 1 0011 */
147 INVALID
, /* 10 1 0100 */
148 INVALID
, /* 10 1 0101 */
149 INVALID
, /* 10 1 0110 */
150 INVALID
, /* 10 1 0111 */
151 INVALID
, /* 10 1 1000 */
152 INVALID
, /* 10 1 1001 */
153 INVALID
, /* 10 1 1010 */
154 INVALID
, /* 10 1 1011 */
155 INVALID
, /* 10 1 1100 */
156 INVALID
, /* 10 1 1101 */
157 INVALID
, /* 10 1 1110 */
158 { 0, ST
+HARD
}, /* 10 1 1111: dcbz */
159 { 4, LD
}, /* 11 0 0000: lwzx */
160 INVALID
, /* 11 0 0001 */
161 { 4, ST
}, /* 11 0 0010: stwx */
162 INVALID
, /* 11 0 0011 */
163 { 2, LD
}, /* 11 0 0100: lhzx */
164 { 2, LD
+SE
}, /* 11 0 0101: lhax */
165 { 2, ST
}, /* 11 0 0110: sthx */
166 INVALID
, /* 11 0 0111 */
167 { 4, LD
+F
+S
}, /* 11 0 1000: lfsx */
168 { 8, LD
+F
}, /* 11 0 1001: lfdx */
169 { 4, ST
+F
+S
}, /* 11 0 1010: stfsx */
170 { 8, ST
+F
}, /* 11 0 1011: stfdx */
171 { 16, LD
+F
}, /* 11 0 1100: lfdpx */
172 { 4, LD
+F
+SE
}, /* 11 0 1101: lfiwax */
173 { 16, ST
+F
}, /* 11 0 1110: stfdpx */
174 { 4, ST
+F
}, /* 11 0 1111: stfiwx */
175 { 4, LD
+U
}, /* 11 1 0000: lwzux */
176 INVALID
, /* 11 1 0001 */
177 { 4, ST
+U
}, /* 11 1 0010: stwux */
178 INVALID
, /* 11 1 0011 */
179 { 2, LD
+U
}, /* 11 1 0100: lhzux */
180 { 2, LD
+SE
+U
}, /* 11 1 0101: lhaux */
181 { 2, ST
+U
}, /* 11 1 0110: sthux */
182 INVALID
, /* 11 1 0111 */
183 { 4, LD
+F
+S
+U
}, /* 11 1 1000: lfsux */
184 { 8, LD
+F
+U
}, /* 11 1 1001: lfdux */
185 { 4, ST
+F
+S
+U
}, /* 11 1 1010: stfsux */
186 { 8, ST
+F
+U
}, /* 11 1 1011: stfdux */
187 INVALID
, /* 11 1 1100 */
188 { 4, LD
+F
}, /* 11 1 1101: lfiwzx */
189 INVALID
, /* 11 1 1110 */
190 INVALID
, /* 11 1 1111 */
194 * The dcbz (data cache block zero) instruction
195 * gives an alignment fault if used on non-cacheable
196 * memory. We handle the fault mainly for the
197 * case when we are running with the cache disabled
200 static int emulate_dcbz(struct pt_regs
*regs
, unsigned char __user
*addr
)
206 size
= ppc64_caches
.dline_size
;
208 size
= L1_CACHE_BYTES
;
210 p
= (long __user
*) (regs
->dar
& -size
);
211 if (user_mode(regs
) && !access_ok(VERIFY_WRITE
, p
, size
))
213 for (i
= 0; i
< size
/ sizeof(long); ++i
)
214 if (__put_user_inatomic(0, p
+i
))
220 * Emulate load & store multiple instructions
221 * On 64-bit machines, these instructions only affect/use the
222 * bottom 4 bytes of each register, and the loads clear the
223 * top 4 bytes of the affected register.
225 #ifdef __BIG_ENDIAN__
227 #define REG_BYTE(rp, i) *((u8 *)((rp) + ((i) >> 2)) + ((i) & 3) + 4)
229 #define REG_BYTE(rp, i) *((u8 *)(rp) + (i))
233 #ifdef __LITTLE_ENDIAN__
234 #define REG_BYTE(rp, i) (*(((u8 *)((rp) + ((i)>>2)) + ((i)&3))))
237 #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
239 static int emulate_multiple(struct pt_regs
*regs
, unsigned char __user
*addr
,
240 unsigned int reg
, unsigned int nb
,
241 unsigned int flags
, unsigned int instr
,
245 unsigned int nb0
, i
, bswiz
;
249 * We do not try to emulate 8 bytes multiple as they aren't really
250 * available in our operating environments and we don't try to
251 * emulate multiples operations in kernel land as they should never
252 * be used/generated there at least not on unaligned boundaries
254 if (unlikely((nb
> 4) || !user_mode(regs
)))
257 /* lmw, stmw, lswi/x, stswi/x */
261 nb
= regs
->xer
& 127;
265 unsigned long pc
= regs
->nip
^ (swiz
& 4);
267 if (__get_user_inatomic(instr
,
268 (unsigned int __user
*)pc
))
270 if (swiz
== 0 && (flags
& SW
))
271 instr
= cpu_to_le32(instr
);
272 nb
= (instr
>> 11) & 0x1f;
276 if (nb
+ reg
* 4 > 128) {
277 nb0
= nb
+ reg
* 4 - 128;
280 #ifdef __LITTLE_ENDIAN__
282 * String instructions are endian neutral but the code
283 * below is not. Force byte swapping on so that the
284 * effects of swizzling are undone in the load/store
294 if (!access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
), addr
, nb
+nb0
))
295 return -EFAULT
; /* bad address */
297 rptr
= ®s
->gpr
[reg
];
298 p
= (unsigned long) addr
;
299 bswiz
= (flags
& SW
)? 3: 0;
303 * This zeroes the top 4 bytes of the affected registers
304 * in 64-bit mode, and also zeroes out any remaining
305 * bytes of the last register for lsw*.
307 memset(rptr
, 0, ((nb
+ 3) / 4) * sizeof(unsigned long));
309 memset(®s
->gpr
[0], 0,
310 ((nb0
+ 3) / 4) * sizeof(unsigned long));
312 for (i
= 0; i
< nb
; ++i
, ++p
)
313 if (__get_user_inatomic(REG_BYTE(rptr
, i
^ bswiz
),
317 rptr
= ®s
->gpr
[0];
319 for (i
= 0; i
< nb0
; ++i
, ++p
)
320 if (__get_user_inatomic(REG_BYTE(rptr
,
327 for (i
= 0; i
< nb
; ++i
, ++p
)
328 if (__put_user_inatomic(REG_BYTE(rptr
, i
^ bswiz
),
332 rptr
= ®s
->gpr
[0];
334 for (i
= 0; i
< nb0
; ++i
, ++p
)
335 if (__put_user_inatomic(REG_BYTE(rptr
,
345 * Emulate floating-point pair loads and stores.
346 * Only POWER6 has these instructions, and it does true little-endian,
347 * so we don't need the address swizzling.
349 static int emulate_fp_pair(unsigned char __user
*addr
, unsigned int reg
,
352 char *ptr0
= (char *) ¤t
->thread
.TS_FPR(reg
);
353 char *ptr1
= (char *) ¤t
->thread
.TS_FPR(reg
+1);
357 return 0; /* invalid form: FRS/FRT must be even */
361 for (i
= 0; i
< 8; ++i
) {
363 ret
|= __get_user(ptr0
[i
^sw
], addr
+ i
);
364 ret
|= __get_user(ptr1
[i
^sw
], addr
+ i
+ 8);
366 ret
|= __put_user(ptr0
[i
^sw
], addr
+ i
);
367 ret
|= __put_user(ptr1
[i
^sw
], addr
+ i
+ 8);
372 return 1; /* exception handled and fixed up */
376 static int emulate_lq_stq(struct pt_regs
*regs
, unsigned char __user
*addr
,
377 unsigned int reg
, unsigned int flags
)
379 char *ptr0
= (char *)®s
->gpr
[reg
];
380 char *ptr1
= (char *)®s
->gpr
[reg
+1];
384 return 0; /* invalid form: GPR must be even */
388 for (i
= 0; i
< 8; ++i
) {
390 ret
|= __get_user(ptr0
[i
^sw
], addr
+ i
);
391 ret
|= __get_user(ptr1
[i
^sw
], addr
+ i
+ 8);
393 ret
|= __put_user(ptr0
[i
^sw
], addr
+ i
);
394 ret
|= __put_user(ptr1
[i
^sw
], addr
+ i
+ 8);
399 return 1; /* exception handled and fixed up */
401 #endif /* CONFIG_PPC64 */
405 static struct aligninfo spe_aligninfo
[32] = {
406 { 8, LD
+E8
}, /* 0 00 00: evldd[x] */
407 { 8, LD
+E4
}, /* 0 00 01: evldw[x] */
408 { 8, LD
}, /* 0 00 10: evldh[x] */
409 INVALID
, /* 0 00 11 */
410 { 2, LD
}, /* 0 01 00: evlhhesplat[x] */
411 INVALID
, /* 0 01 01 */
412 { 2, LD
}, /* 0 01 10: evlhhousplat[x] */
413 { 2, LD
+SE
}, /* 0 01 11: evlhhossplat[x] */
414 { 4, LD
}, /* 0 10 00: evlwhe[x] */
415 INVALID
, /* 0 10 01 */
416 { 4, LD
}, /* 0 10 10: evlwhou[x] */
417 { 4, LD
+SE
}, /* 0 10 11: evlwhos[x] */
418 { 4, LD
+E4
}, /* 0 11 00: evlwwsplat[x] */
419 INVALID
, /* 0 11 01 */
420 { 4, LD
}, /* 0 11 10: evlwhsplat[x] */
421 INVALID
, /* 0 11 11 */
423 { 8, ST
+E8
}, /* 1 00 00: evstdd[x] */
424 { 8, ST
+E4
}, /* 1 00 01: evstdw[x] */
425 { 8, ST
}, /* 1 00 10: evstdh[x] */
426 INVALID
, /* 1 00 11 */
427 INVALID
, /* 1 01 00 */
428 INVALID
, /* 1 01 01 */
429 INVALID
, /* 1 01 10 */
430 INVALID
, /* 1 01 11 */
431 { 4, ST
}, /* 1 10 00: evstwhe[x] */
432 INVALID
, /* 1 10 01 */
433 { 4, ST
}, /* 1 10 10: evstwho[x] */
434 INVALID
, /* 1 10 11 */
435 { 4, ST
+E4
}, /* 1 11 00: evstwwe[x] */
436 INVALID
, /* 1 11 01 */
437 { 4, ST
+E4
}, /* 1 11 10: evstwwo[x] */
438 INVALID
, /* 1 11 11 */
444 #define EVLHHESPLAT 0x04
445 #define EVLHHOUSPLAT 0x06
446 #define EVLHHOSSPLAT 0x07
450 #define EVLWWSPLAT 0x0C
451 #define EVLWHSPLAT 0x0E
461 * Emulate SPE loads and stores.
462 * Only Book-E has these instructions, and it does true little-endian,
463 * so we don't need the address swizzling.
465 static int emulate_spe(struct pt_regs
*regs
, unsigned int reg
,
475 unsigned char __user
*p
, *addr
;
476 unsigned long *evr
= ¤t
->thread
.evr
[reg
];
477 unsigned int nb
, flags
;
479 instr
= (instr
>> 1) & 0x1f;
481 /* DAR has the operand effective address */
482 addr
= (unsigned char __user
*)regs
->dar
;
484 nb
= spe_aligninfo
[instr
].len
;
485 flags
= spe_aligninfo
[instr
].flags
;
487 /* Verify the address of the operand */
488 if (unlikely(user_mode(regs
) &&
489 !access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
),
494 if (unlikely(!user_mode(regs
)))
497 flush_spe_to_thread(current
);
499 /* If we are loading, get the data from user space, else
500 * get it from register values
509 data
.w
[1] = regs
->gpr
[reg
];
512 data
.h
[2] = *evr
>> 16;
513 data
.h
[3] = regs
->gpr
[reg
] >> 16;
516 data
.h
[2] = *evr
& 0xffff;
517 data
.h
[3] = regs
->gpr
[reg
] & 0xffff;
523 data
.w
[1] = regs
->gpr
[reg
];
529 temp
.ll
= data
.ll
= 0;
535 ret
|= __get_user_inatomic(temp
.v
[0], p
++);
536 ret
|= __get_user_inatomic(temp
.v
[1], p
++);
537 ret
|= __get_user_inatomic(temp
.v
[2], p
++);
538 ret
|= __get_user_inatomic(temp
.v
[3], p
++);
540 ret
|= __get_user_inatomic(temp
.v
[4], p
++);
541 ret
|= __get_user_inatomic(temp
.v
[5], p
++);
543 ret
|= __get_user_inatomic(temp
.v
[6], p
++);
544 ret
|= __get_user_inatomic(temp
.v
[7], p
++);
556 data
.h
[0] = temp
.h
[3];
557 data
.h
[2] = temp
.h
[3];
561 data
.h
[1] = temp
.h
[3];
562 data
.h
[3] = temp
.h
[3];
565 data
.h
[0] = temp
.h
[2];
566 data
.h
[2] = temp
.h
[3];
570 data
.h
[1] = temp
.h
[2];
571 data
.h
[3] = temp
.h
[3];
574 data
.w
[0] = temp
.w
[1];
575 data
.w
[1] = temp
.w
[1];
578 data
.h
[0] = temp
.h
[2];
579 data
.h
[1] = temp
.h
[2];
580 data
.h
[2] = temp
.h
[3];
581 data
.h
[3] = temp
.h
[3];
589 switch (flags
& 0xf0) {
591 data
.ll
= swab64(data
.ll
);
594 data
.w
[0] = swab32(data
.w
[0]);
595 data
.w
[1] = swab32(data
.w
[1]);
597 /* Its half word endian */
599 data
.h
[0] = swab16(data
.h
[0]);
600 data
.h
[1] = swab16(data
.h
[1]);
601 data
.h
[2] = swab16(data
.h
[2]);
602 data
.h
[3] = swab16(data
.h
[3]);
608 data
.w
[0] = (s16
)data
.h
[1];
609 data
.w
[1] = (s16
)data
.h
[3];
612 /* Store result to memory or update registers */
618 ret
|= __put_user_inatomic(data
.v
[0], p
++);
619 ret
|= __put_user_inatomic(data
.v
[1], p
++);
620 ret
|= __put_user_inatomic(data
.v
[2], p
++);
621 ret
|= __put_user_inatomic(data
.v
[3], p
++);
623 ret
|= __put_user_inatomic(data
.v
[4], p
++);
624 ret
|= __put_user_inatomic(data
.v
[5], p
++);
626 ret
|= __put_user_inatomic(data
.v
[6], p
++);
627 ret
|= __put_user_inatomic(data
.v
[7], p
++);
633 regs
->gpr
[reg
] = data
.w
[1];
638 #endif /* CONFIG_SPE */
642 * Emulate VSX instructions...
644 static int emulate_vsx(unsigned char __user
*addr
, unsigned int reg
,
645 unsigned int areg
, struct pt_regs
*regs
,
646 unsigned int flags
, unsigned int length
,
656 if (unlikely(!user_mode(regs
)))
659 flush_vsx_to_thread(current
);
662 ptr
= (char *) ¤t
->thread
.fp_state
.fpr
[reg
][0];
664 ptr
= (char *) ¤t
->thread
.vr_state
.vr
[reg
- 32];
666 lptr
= (unsigned long *) ptr
;
668 #ifdef __LITTLE_ENDIAN__
674 * The elements are BE ordered, even in LE mode, so process
675 * them in reverse order.
677 addr
+= length
- elsize
;
679 /* 8 byte memory accesses go in the top 8 bytes of the VR */
688 for (j
= 0; j
< length
; j
+= elsize
) {
689 for (i
= 0; i
< elsize
; ++i
) {
691 ret
|= __put_user(ptr
[i
^sw
], addr
+ i
);
693 ret
|= __get_user(ptr
[i
^sw
], addr
+ i
);
696 #ifdef __LITTLE_ENDIAN__
703 #ifdef __BIG_ENDIAN__
713 regs
->gpr
[areg
] = regs
->dar
;
715 /* Splat load copies the same data to top and bottom 8 bytes */
717 lptr
[VSX_LO
] = lptr
[VSX_HI
];
718 /* For 8 byte loads, zero the low 8 bytes */
719 else if (!(flags
& ST
) && (8 == length
))
729 * Called on alignment exception. Attempts to fixup
731 * Return 1 on success
732 * Return 0 if unable to handle the interrupt
733 * Return -EFAULT if data address is bad
736 int fix_alignment(struct pt_regs
*regs
)
738 unsigned int instr
, nb
, flags
, instruction
= 0;
739 unsigned int reg
, areg
;
741 unsigned char __user
*addr
;
742 unsigned long p
, swiz
;
749 #ifdef __LITTLE_ENDIAN__
758 #ifdef __LITTLE_ENDIAN__
760 unsigned char hi48
[6];
762 unsigned char hi48
[6];
769 * We require a complete register set, if not, then our assembly
772 CHECK_FULL_REGS(regs
);
776 /* Some processors don't provide us with a DSISR we can use here,
777 * let's make one up from the instruction
779 if (cpu_has_feature(CPU_FTR_NODSISRALIGN
)) {
780 unsigned long pc
= regs
->nip
;
782 if (cpu_has_feature(CPU_FTR_PPC_LE
) && (regs
->msr
& MSR_LE
))
784 if (unlikely(__get_user_inatomic(instr
,
785 (unsigned int __user
*)pc
)))
787 if (cpu_has_feature(CPU_FTR_REAL_LE
) && (regs
->msr
& MSR_LE
))
788 instr
= cpu_to_le32(instr
);
789 dsisr
= make_dsisr(instr
);
793 /* extract the operation and registers from the dsisr */
794 reg
= (dsisr
>> 5) & 0x1f; /* source/dest register */
795 areg
= dsisr
& 0x1f; /* register to update */
798 if ((instr
>> 26) == 0x4) {
799 PPC_WARN_ALIGNMENT(spe
, regs
);
800 return emulate_spe(regs
, reg
, instr
);
804 instr
= (dsisr
>> 10) & 0x7f;
805 instr
|= (dsisr
>> 13) & 0x60;
807 /* Lookup the operation in our table */
808 nb
= aligninfo
[instr
].len
;
809 flags
= aligninfo
[instr
].flags
;
811 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
812 if (IS_XFORM(instruction
) && ((instruction
>> 1) & 0x3ff) == 532) {
815 } else if (IS_XFORM(instruction
) &&
816 ((instruction
>> 1) & 0x3ff) == 660) {
821 /* Byteswap little endian loads and stores */
823 if ((regs
->msr
& MSR_LE
) != (MSR_KERNEL
& MSR_LE
)) {
825 #ifdef __BIG_ENDIAN__
827 * So-called "PowerPC little endian" mode works by
828 * swizzling addresses rather than by actually doing
829 * any byte-swapping. To emulate this, we XOR each
830 * byte address with 7. We also byte-swap, because
831 * the processor's address swizzling depends on the
832 * operand size (it xors the address with 7 for bytes,
833 * 6 for halfwords, 4 for words, 0 for doublewords) but
834 * we will xor with 7 and load/store each byte separately.
836 if (cpu_has_feature(CPU_FTR_PPC_LE
))
841 /* DAR has the operand effective address */
842 addr
= (unsigned char __user
*)regs
->dar
;
845 if ((instruction
& 0xfc00003e) == 0x7c000018) {
848 /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */
849 reg
|= (instruction
& 0x1) << 5;
850 /* Simple inline decoder instead of a table */
851 /* VSX has only 8 and 16 byte memory accesses */
853 if (instruction
& 0x200)
856 /* Vector stores in little-endian mode swap individual
857 elements, so process them separately */
859 if (instruction
& 0x80)
863 if ((regs
->msr
& MSR_LE
) != (MSR_KERNEL
& MSR_LE
))
865 if (instruction
& 0x100)
867 if (instruction
& 0x040)
869 /* splat load needs a special decoder */
870 if ((instruction
& 0x400) == 0){
874 PPC_WARN_ALIGNMENT(vsx
, regs
);
875 return emulate_vsx(addr
, reg
, areg
, regs
, flags
, nb
, elsize
);
878 /* A size of 0 indicates an instruction we don't support, with
879 * the exception of DCBZ which is handled as a special case here
882 PPC_WARN_ALIGNMENT(dcbz
, regs
);
883 return emulate_dcbz(regs
, addr
);
885 if (unlikely(nb
== 0))
888 /* Load/Store Multiple instructions are handled in their own
892 PPC_WARN_ALIGNMENT(multiple
, regs
);
893 return emulate_multiple(regs
, addr
, reg
, nb
,
897 /* Verify the address of the operand */
898 if (unlikely(user_mode(regs
) &&
899 !access_ok((flags
& ST
? VERIFY_WRITE
: VERIFY_READ
),
903 /* Force the fprs into the save area so we can reference them */
906 if (unlikely(!user_mode(regs
)))
908 flush_fp_to_thread(current
);
913 /* Special case for 16-byte FP loads and stores */
914 PPC_WARN_ALIGNMENT(fp_pair
, regs
);
915 return emulate_fp_pair(addr
, reg
, flags
);
918 /* Special case for 16-byte loads and stores */
919 PPC_WARN_ALIGNMENT(lq_stq
, regs
);
920 return emulate_lq_stq(regs
, addr
, reg
, flags
);
927 PPC_WARN_ALIGNMENT(unaligned
, regs
);
929 /* If we are loading, get the data from user space, else
930 * get it from register values
933 unsigned int start
= 0;
937 start
= offsetof(union data
, x32
.low32
);
940 start
= offsetof(union data
, x16
.low16
);
946 p
= (unsigned long)addr
;
948 for (i
= 0; i
< nb
; i
++)
949 ret
|= __get_user_inatomic(data
.v
[start
+ i
],
955 } else if (flags
& F
) {
956 data
.ll
= current
->thread
.TS_FPR(reg
);
958 /* Single-precision FP store requires conversion... */
959 #ifdef CONFIG_PPC_FPU
962 cvt_df(&data
.dd
, (float *)&data
.x32
.low32
);
969 data
.ll
= regs
->gpr
[reg
];
974 data
.ll
= swab64(data
.ll
);
977 data
.x32
.low32
= swab32(data
.x32
.low32
);
980 data
.x16
.low16
= swab16(data
.x16
.low16
);
985 /* Perform other misc operations like sign extension
986 * or floating point single precision conversion
988 switch (flags
& ~(U
|SW
)) {
989 case LD
+SE
: /* sign extending integer loads */
990 case LD
+F
+SE
: /* sign extend for lfiwax */
992 data
.ll
= data
.x16
.low16
;
993 else /* nb must be 4 */
994 data
.ll
= data
.x32
.low32
;
997 /* Single-precision FP load requires conversion... */
999 #ifdef CONFIG_PPC_FPU
1002 cvt_fd((float *)&data
.x32
.low32
, &data
.dd
);
1010 /* Store result to memory or update registers */
1012 unsigned int start
= 0;
1016 start
= offsetof(union data
, x32
.low32
);
1019 start
= offsetof(union data
, x16
.low16
);
1024 p
= (unsigned long)addr
;
1026 for (i
= 0; i
< nb
; i
++)
1027 ret
|= __put_user_inatomic(data
.v
[start
+ i
],
1032 } else if (flags
& F
)
1033 current
->thread
.TS_FPR(reg
) = data
.ll
;
1035 regs
->gpr
[reg
] = data
.ll
;
1037 /* Update RA as needed */
1039 regs
->gpr
[areg
] = regs
->dar
;