perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / arch / nds32 / mm / alignment.c
blobe1aed9dc692dd3bac752720a82880de865e7cb2d
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/proc_fs.h>
5 #include <linux/uaccess.h>
6 #include <linux/sysctl.h>
7 #include <asm/unaligned.h>
9 #define DEBUG(enable, tagged, ...) \
10 do{ \
11 if (enable) { \
12 if (tagged) \
13 pr_warn("[ %30s() ] ", __func__); \
14 pr_warn(__VA_ARGS__); \
15 } \
16 } while (0)
18 #define RT(inst) (((inst) >> 20) & 0x1FUL)
19 #define RA(inst) (((inst) >> 15) & 0x1FUL)
20 #define RB(inst) (((inst) >> 10) & 0x1FUL)
21 #define SV(inst) (((inst) >> 8) & 0x3UL)
22 #define IMM(inst) (((inst) >> 0) & 0x7FFFUL)
24 #define RA3(inst) (((inst) >> 3) & 0x7UL)
25 #define RT3(inst) (((inst) >> 6) & 0x7UL)
26 #define IMM3U(inst) (((inst) >> 0) & 0x7UL)
28 #define RA5(inst) (((inst) >> 0) & 0x1FUL)
29 #define RT4(inst) (((inst) >> 5) & 0xFUL)
31 #define GET_IMMSVAL(imm_value) \
32 (((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value)
34 #define __get8_data(val,addr,err) \
35 __asm__( \
36 "1: lbi.bi %1, [%2], #1\n" \
37 "2:\n" \
38 " .pushsection .text.fixup,\"ax\"\n" \
39 " .align 2\n" \
40 "3: movi %0, #1\n" \
41 " j 2b\n" \
42 " .popsection\n" \
43 " .pushsection __ex_table,\"a\"\n" \
44 " .align 3\n" \
45 " .long 1b, 3b\n" \
46 " .popsection\n" \
47 : "=r" (err), "=&r" (val), "=r" (addr) \
48 : "0" (err), "2" (addr))
50 #define get16_data(addr, val_ptr) \
51 do { \
52 unsigned int err = 0, v, a = addr; \
53 __get8_data(v,a,err); \
54 *val_ptr = v << 0; \
55 __get8_data(v,a,err); \
56 *val_ptr |= v << 8; \
57 if (err) \
58 goto fault; \
59 *val_ptr = le16_to_cpu(*val_ptr); \
60 } while(0)
62 #define get32_data(addr, val_ptr) \
63 do { \
64 unsigned int err = 0, v, a = addr; \
65 __get8_data(v,a,err); \
66 *val_ptr = v << 0; \
67 __get8_data(v,a,err); \
68 *val_ptr |= v << 8; \
69 __get8_data(v,a,err); \
70 *val_ptr |= v << 16; \
71 __get8_data(v,a,err); \
72 *val_ptr |= v << 24; \
73 if (err) \
74 goto fault; \
75 *val_ptr = le32_to_cpu(*val_ptr); \
76 } while(0)
78 #define get_data(addr, val_ptr, len) \
79 if (len == 2) \
80 get16_data(addr, val_ptr); \
81 else \
82 get32_data(addr, val_ptr);
84 #define set16_data(addr, val) \
85 do { \
86 unsigned int err = 0, *ptr = addr ; \
87 val = le32_to_cpu(val); \
88 __asm__( \
89 "1: sbi.bi %2, [%1], #1\n" \
90 " srli %2, %2, #8\n" \
91 "2: sbi %2, [%1]\n" \
92 "3:\n" \
93 " .pushsection .text.fixup,\"ax\"\n" \
94 " .align 2\n" \
95 "4: movi %0, #1\n" \
96 " j 3b\n" \
97 " .popsection\n" \
98 " .pushsection __ex_table,\"a\"\n" \
99 " .align 3\n" \
100 " .long 1b, 4b\n" \
101 " .long 2b, 4b\n" \
102 " .popsection\n" \
103 : "=r" (err), "+r" (ptr), "+r" (val) \
104 : "0" (err) \
105 ); \
106 if (err) \
107 goto fault; \
108 } while(0)
110 #define set32_data(addr, val) \
111 do { \
112 unsigned int err = 0, *ptr = addr ; \
113 val = le32_to_cpu(val); \
114 __asm__( \
115 "1: sbi.bi %2, [%1], #1\n" \
116 " srli %2, %2, #8\n" \
117 "2: sbi.bi %2, [%1], #1\n" \
118 " srli %2, %2, #8\n" \
119 "3: sbi.bi %2, [%1], #1\n" \
120 " srli %2, %2, #8\n" \
121 "4: sbi %2, [%1]\n" \
122 "5:\n" \
123 " .pushsection .text.fixup,\"ax\"\n" \
124 " .align 2\n" \
125 "6: movi %0, #1\n" \
126 " j 5b\n" \
127 " .popsection\n" \
128 " .pushsection __ex_table,\"a\"\n" \
129 " .align 3\n" \
130 " .long 1b, 6b\n" \
131 " .long 2b, 6b\n" \
132 " .long 3b, 6b\n" \
133 " .long 4b, 6b\n" \
134 " .popsection\n" \
135 : "=r" (err), "+r" (ptr), "+r" (val) \
136 : "0" (err) \
137 ); \
138 if (err) \
139 goto fault; \
140 } while(0)
141 #define set_data(addr, val, len) \
142 if (len == 2) \
143 set16_data(addr, val); \
144 else \
145 set32_data(addr, val);
146 #define NDS32_16BIT_INSTRUCTION 0x80000000
148 extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
149 extern pte_t va_kernel_present(unsigned long addr);
150 extern int va_readable(struct pt_regs *regs, unsigned long addr);
151 extern int va_writable(struct pt_regs *regs, unsigned long addr);
153 int unalign_access_mode = 0, unalign_access_debug = 0;
155 static inline unsigned long *idx_to_addr(struct pt_regs *regs, int idx)
157 /* this should be consistent with ptrace.h */
158 if (idx >= 0 && idx <= 25) /* R0-R25 */
159 return &regs->uregs[0] + idx;
160 else if (idx >= 28 && idx <= 30) /* FP, GP, LP */
161 return &regs->fp + (idx - 28);
162 else if (idx == 31) /* SP */
163 return &regs->sp;
164 else
165 return NULL; /* cause a segfault */
168 static inline unsigned long get_inst(unsigned long addr)
170 return be32_to_cpu(get_unaligned((u32 *) addr));
173 static inline unsigned long sign_extend(unsigned long val, int len)
175 unsigned long ret = 0;
176 unsigned char *s, *t;
177 int i = 0;
179 val = cpu_to_le32(val);
181 s = (void *)&val;
182 t = (void *)&ret;
184 while (i++ < len)
185 *t++ = *s++;
187 if (((*(t - 1)) & 0x80) && (i < 4)) {
189 while (i++ <= 4)
190 *t++ = 0xff;
193 return le32_to_cpu(ret);
196 static inline int do_16(unsigned long inst, struct pt_regs *regs)
198 int imm, regular, load, len, addr_mode, idx_mode;
199 unsigned long unaligned_addr, target_val, source_idx, target_idx,
200 shift = 0;
201 switch ((inst >> 9) & 0x3F) {
203 case 0x12: /* LHI333 */
204 imm = 1;
205 regular = 1;
206 load = 1;
207 len = 2;
208 addr_mode = 3;
209 idx_mode = 3;
210 break;
211 case 0x10: /* LWI333 */
212 imm = 1;
213 regular = 1;
214 load = 1;
215 len = 4;
216 addr_mode = 3;
217 idx_mode = 3;
218 break;
219 case 0x11: /* LWI333.bi */
220 imm = 1;
221 regular = 0;
222 load = 1;
223 len = 4;
224 addr_mode = 3;
225 idx_mode = 3;
226 break;
227 case 0x1A: /* LWI450 */
228 imm = 0;
229 regular = 1;
230 load = 1;
231 len = 4;
232 addr_mode = 5;
233 idx_mode = 4;
234 break;
235 case 0x16: /* SHI333 */
236 imm = 1;
237 regular = 1;
238 load = 0;
239 len = 2;
240 addr_mode = 3;
241 idx_mode = 3;
242 break;
243 case 0x14: /* SWI333 */
244 imm = 1;
245 regular = 1;
246 load = 0;
247 len = 4;
248 addr_mode = 3;
249 idx_mode = 3;
250 break;
251 case 0x15: /* SWI333.bi */
252 imm = 1;
253 regular = 0;
254 load = 0;
255 len = 4;
256 addr_mode = 3;
257 idx_mode = 3;
258 break;
259 case 0x1B: /* SWI450 */
260 imm = 0;
261 regular = 1;
262 load = 0;
263 len = 4;
264 addr_mode = 5;
265 idx_mode = 4;
266 break;
268 default:
269 return -EFAULT;
272 if (addr_mode == 3) {
273 unaligned_addr = *idx_to_addr(regs, RA3(inst));
274 source_idx = RA3(inst);
275 } else {
276 unaligned_addr = *idx_to_addr(regs, RA5(inst));
277 source_idx = RA5(inst);
280 if (idx_mode == 3)
281 target_idx = RT3(inst);
282 else
283 target_idx = RT4(inst);
285 if (imm)
286 shift = IMM3U(inst) * len;
288 if (regular)
289 unaligned_addr += shift;
291 if (load) {
292 if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len))
293 return -EACCES;
295 get_data(unaligned_addr, &target_val, len);
296 *idx_to_addr(regs, target_idx) = target_val;
297 } else {
298 if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len))
299 return -EACCES;
300 target_val = *idx_to_addr(regs, target_idx);
301 set_data((void *)unaligned_addr, target_val, len);
304 if (!regular)
305 *idx_to_addr(regs, source_idx) = unaligned_addr + shift;
306 regs->ipc += 2;
308 return 0;
309 fault:
310 return -EACCES;
313 static inline int do_32(unsigned long inst, struct pt_regs *regs)
315 int imm, regular, load, len, sign_ext;
316 unsigned long unaligned_addr, target_val, shift;
318 unaligned_addr = *idx_to_addr(regs, RA(inst));
320 switch ((inst >> 25) << 1) {
322 case 0x02: /* LHI */
323 imm = 1;
324 regular = 1;
325 load = 1;
326 len = 2;
327 sign_ext = 0;
328 break;
329 case 0x0A: /* LHI.bi */
330 imm = 1;
331 regular = 0;
332 load = 1;
333 len = 2;
334 sign_ext = 0;
335 break;
336 case 0x22: /* LHSI */
337 imm = 1;
338 regular = 1;
339 load = 1;
340 len = 2;
341 sign_ext = 1;
342 break;
343 case 0x2A: /* LHSI.bi */
344 imm = 1;
345 regular = 0;
346 load = 1;
347 len = 2;
348 sign_ext = 1;
349 break;
350 case 0x04: /* LWI */
351 imm = 1;
352 regular = 1;
353 load = 1;
354 len = 4;
355 sign_ext = 0;
356 break;
357 case 0x0C: /* LWI.bi */
358 imm = 1;
359 regular = 0;
360 load = 1;
361 len = 4;
362 sign_ext = 0;
363 break;
364 case 0x12: /* SHI */
365 imm = 1;
366 regular = 1;
367 load = 0;
368 len = 2;
369 sign_ext = 0;
370 break;
371 case 0x1A: /* SHI.bi */
372 imm = 1;
373 regular = 0;
374 load = 0;
375 len = 2;
376 sign_ext = 0;
377 break;
378 case 0x14: /* SWI */
379 imm = 1;
380 regular = 1;
381 load = 0;
382 len = 4;
383 sign_ext = 0;
384 break;
385 case 0x1C: /* SWI.bi */
386 imm = 1;
387 regular = 0;
388 load = 0;
389 len = 4;
390 sign_ext = 0;
391 break;
393 default:
394 switch (inst & 0xff) {
396 case 0x01: /* LH */
397 imm = 0;
398 regular = 1;
399 load = 1;
400 len = 2;
401 sign_ext = 0;
402 break;
403 case 0x05: /* LH.bi */
404 imm = 0;
405 regular = 0;
406 load = 1;
407 len = 2;
408 sign_ext = 0;
409 break;
410 case 0x11: /* LHS */
411 imm = 0;
412 regular = 1;
413 load = 1;
414 len = 2;
415 sign_ext = 1;
416 break;
417 case 0x15: /* LHS.bi */
418 imm = 0;
419 regular = 0;
420 load = 1;
421 len = 2;
422 sign_ext = 1;
423 break;
424 case 0x02: /* LW */
425 imm = 0;
426 regular = 1;
427 load = 1;
428 len = 4;
429 sign_ext = 0;
430 break;
431 case 0x06: /* LW.bi */
432 imm = 0;
433 regular = 0;
434 load = 1;
435 len = 4;
436 sign_ext = 0;
437 break;
438 case 0x09: /* SH */
439 imm = 0;
440 regular = 1;
441 load = 0;
442 len = 2;
443 sign_ext = 0;
444 break;
445 case 0x0D: /* SH.bi */
446 imm = 0;
447 regular = 0;
448 load = 0;
449 len = 2;
450 sign_ext = 0;
451 break;
452 case 0x0A: /* SW */
453 imm = 0;
454 regular = 1;
455 load = 0;
456 len = 4;
457 sign_ext = 0;
458 break;
459 case 0x0E: /* SW.bi */
460 imm = 0;
461 regular = 0;
462 load = 0;
463 len = 4;
464 sign_ext = 0;
465 break;
467 default:
468 return -EFAULT;
472 if (imm)
473 shift = GET_IMMSVAL(IMM(inst)) * len;
474 else
475 shift = *idx_to_addr(regs, RB(inst)) << SV(inst);
477 if (regular)
478 unaligned_addr += shift;
480 if (load) {
482 if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len))
483 return -EACCES;
485 get_data(unaligned_addr, &target_val, len);
487 if (sign_ext)
488 *idx_to_addr(regs, RT(inst)) =
489 sign_extend(target_val, len);
490 else
491 *idx_to_addr(regs, RT(inst)) = target_val;
492 } else {
494 if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len))
495 return -EACCES;
497 target_val = *idx_to_addr(regs, RT(inst));
498 set_data((void *)unaligned_addr, target_val, len);
501 if (!regular)
502 *idx_to_addr(regs, RA(inst)) = unaligned_addr + shift;
504 regs->ipc += 4;
506 return 0;
507 fault:
508 return -EACCES;
511 int do_unaligned_access(unsigned long addr, struct pt_regs *regs)
513 unsigned long inst;
514 int ret = -EFAULT;
515 mm_segment_t seg = get_fs();
517 inst = get_inst(regs->ipc);
519 DEBUG((unalign_access_debug > 0), 1,
520 "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr,
521 regs->ipc, inst);
523 set_fs(USER_DS);
525 if (inst & NDS32_16BIT_INSTRUCTION)
526 ret = do_16((inst >> 16) & 0xffff, regs);
527 else
528 ret = do_32(inst, regs);
529 set_fs(seg);
531 return ret;
534 #ifdef CONFIG_PROC_FS
536 static struct ctl_table alignment_tbl[3] = {
538 .procname = "enable",
539 .data = &unalign_access_mode,
540 .maxlen = sizeof(unalign_access_mode),
541 .mode = 0666,
542 .proc_handler = &proc_dointvec
546 .procname = "debug_info",
547 .data = &unalign_access_debug,
548 .maxlen = sizeof(unalign_access_debug),
549 .mode = 0644,
550 .proc_handler = &proc_dointvec
556 static struct ctl_table nds32_sysctl_table[2] = {
558 .procname = "unaligned_access",
559 .mode = 0555,
560 .child = alignment_tbl},
564 static struct ctl_path nds32_path[2] = {
565 {.procname = "nds32"},
570 * Initialize nds32 alignment-correction interface
572 static int __init nds32_sysctl_init(void)
574 register_sysctl_paths(nds32_path, nds32_sysctl_table);
575 return 0;
578 __initcall(nds32_sysctl_init);
579 #endif /* CONFIG_PROC_FS */