cfg80211: Fix array-bounds warning in fragment copy
[linux/fpc-iii.git] / arch / mips / kernel / process.c
blobfb6b6b650719adf6943c9e8ed6524d7ccfbd5983
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/tick.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/export.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
25 #include <linux/personality.h>
26 #include <linux/sys.h>
27 #include <linux/init.h>
28 #include <linux/completion.h>
29 #include <linux/kallsyms.h>
30 #include <linux/random.h>
31 #include <linux/prctl.h>
33 #include <asm/asm.h>
34 #include <asm/bootinfo.h>
35 #include <asm/cpu.h>
36 #include <asm/dsemul.h>
37 #include <asm/dsp.h>
38 #include <asm/fpu.h>
39 #include <asm/irq.h>
40 #include <asm/msa.h>
41 #include <asm/pgtable.h>
42 #include <asm/mipsregs.h>
43 #include <asm/processor.h>
44 #include <asm/reg.h>
45 #include <linux/uaccess.h>
46 #include <asm/io.h>
47 #include <asm/elf.h>
48 #include <asm/isadep.h>
49 #include <asm/inst.h>
50 #include <asm/stacktrace.h>
51 #include <asm/irq_regs.h>
53 #ifdef CONFIG_HOTPLUG_CPU
54 void arch_cpu_idle_dead(void)
56 play_dead();
58 #endif
60 asmlinkage void ret_from_fork(void);
61 asmlinkage void ret_from_kernel_thread(void);
63 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
65 unsigned long status;
67 /* New thread loses kernel privileges. */
68 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
69 status |= KU_USER;
70 regs->cp0_status = status;
71 lose_fpu(0);
72 clear_thread_flag(TIF_MSA_CTX_LIVE);
73 clear_used_math();
74 atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
75 init_dsp();
76 regs->cp0_epc = pc;
77 regs->regs[29] = sp;
80 void exit_thread(struct task_struct *tsk)
83 * User threads may have allocated a delay slot emulation frame.
84 * If so, clean up that allocation.
86 if (!(current->flags & PF_KTHREAD))
87 dsemul_thread_cleanup(tsk);
90 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
93 * Save any process state which is live in hardware registers to the
94 * parent context prior to duplication. This prevents the new child
95 * state becoming stale if the parent is preempted before copy_thread()
96 * gets a chance to save the parent's live hardware registers to the
97 * child context.
99 preempt_disable();
101 if (is_msa_enabled())
102 save_msa(current);
103 else if (is_fpu_owner())
104 _save_fp(current);
106 save_dsp(current);
108 preempt_enable();
110 *dst = *src;
111 return 0;
115 * Copy architecture-specific thread state
117 int copy_thread(unsigned long clone_flags, unsigned long usp,
118 unsigned long kthread_arg, struct task_struct *p)
120 struct thread_info *ti = task_thread_info(p);
121 struct pt_regs *childregs, *regs = current_pt_regs();
122 unsigned long childksp;
123 p->set_child_tid = p->clear_child_tid = NULL;
125 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
127 /* set up new TSS. */
128 childregs = (struct pt_regs *) childksp - 1;
129 /* Put the stack after the struct pt_regs. */
130 childksp = (unsigned long) childregs;
131 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
132 if (unlikely(p->flags & PF_KTHREAD)) {
133 /* kernel thread */
134 unsigned long status = p->thread.cp0_status;
135 memset(childregs, 0, sizeof(struct pt_regs));
136 ti->addr_limit = KERNEL_DS;
137 p->thread.reg16 = usp; /* fn */
138 p->thread.reg17 = kthread_arg;
139 p->thread.reg29 = childksp;
140 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
141 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
142 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
143 ((status & (ST0_KUC | ST0_IEC)) << 2);
144 #else
145 status |= ST0_EXL;
146 #endif
147 childregs->cp0_status = status;
148 return 0;
151 /* user thread */
152 *childregs = *regs;
153 childregs->regs[7] = 0; /* Clear error flag */
154 childregs->regs[2] = 0; /* Child gets zero as return value */
155 if (usp)
156 childregs->regs[29] = usp;
157 ti->addr_limit = USER_DS;
159 p->thread.reg29 = (unsigned long) childregs;
160 p->thread.reg31 = (unsigned long) ret_from_fork;
163 * New tasks lose permission to use the fpu. This accelerates context
164 * switching for most programs since they don't use the fpu.
166 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
168 clear_tsk_thread_flag(p, TIF_USEDFPU);
169 clear_tsk_thread_flag(p, TIF_USEDMSA);
170 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
172 #ifdef CONFIG_MIPS_MT_FPAFF
173 clear_tsk_thread_flag(p, TIF_FPUBOUND);
174 #endif /* CONFIG_MIPS_MT_FPAFF */
176 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
178 if (clone_flags & CLONE_SETTLS)
179 ti->tp_value = regs->regs[7];
181 return 0;
184 #ifdef CONFIG_CC_STACKPROTECTOR
185 #include <linux/stackprotector.h>
186 unsigned long __stack_chk_guard __read_mostly;
187 EXPORT_SYMBOL(__stack_chk_guard);
188 #endif
190 struct mips_frame_info {
191 void *func;
192 unsigned long func_size;
193 int frame_size;
194 int pc_offset;
197 #define J_TARGET(pc,target) \
198 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
200 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
202 #ifdef CONFIG_CPU_MICROMIPS
204 * swsp ra,offset
205 * swm16 reglist,offset(sp)
206 * swm32 reglist,offset(sp)
207 * sw32 ra,offset(sp)
208 * jradiussp - NOT SUPPORTED
210 * microMIPS is way more fun...
212 if (mm_insn_16bit(ip->halfword[1])) {
213 switch (ip->mm16_r5_format.opcode) {
214 case mm_swsp16_op:
215 if (ip->mm16_r5_format.rt != 31)
216 return 0;
218 *poff = ip->mm16_r5_format.simmediate;
219 *poff = (*poff << 2) / sizeof(ulong);
220 return 1;
222 case mm_pool16c_op:
223 switch (ip->mm16_m_format.func) {
224 case mm_swm16_op:
225 *poff = ip->mm16_m_format.imm;
226 *poff += 1 + ip->mm16_m_format.rlist;
227 *poff = (*poff << 2) / sizeof(ulong);
228 return 1;
230 default:
231 return 0;
234 default:
235 return 0;
239 switch (ip->i_format.opcode) {
240 case mm_sw32_op:
241 if (ip->i_format.rs != 29)
242 return 0;
243 if (ip->i_format.rt != 31)
244 return 0;
246 *poff = ip->i_format.simmediate / sizeof(ulong);
247 return 1;
249 case mm_pool32b_op:
250 switch (ip->mm_m_format.func) {
251 case mm_swm32_func:
252 if (ip->mm_m_format.rd < 0x10)
253 return 0;
254 if (ip->mm_m_format.base != 29)
255 return 0;
257 *poff = ip->mm_m_format.simmediate;
258 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
259 *poff /= sizeof(ulong);
260 return 1;
261 default:
262 return 0;
265 default:
266 return 0;
268 #else
269 /* sw / sd $ra, offset($sp) */
270 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
271 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
272 *poff = ip->i_format.simmediate / sizeof(ulong);
273 return 1;
276 return 0;
277 #endif
280 static inline int is_jump_ins(union mips_instruction *ip)
282 #ifdef CONFIG_CPU_MICROMIPS
284 * jr16,jrc,jalr16,jalr16
285 * jal
286 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
287 * jraddiusp - NOT SUPPORTED
289 * microMIPS is kind of more fun...
291 if (mm_insn_16bit(ip->halfword[1])) {
292 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
293 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
294 return 1;
295 return 0;
298 if (ip->j_format.opcode == mm_j32_op)
299 return 1;
300 if (ip->j_format.opcode == mm_jal32_op)
301 return 1;
302 if (ip->r_format.opcode != mm_pool32a_op ||
303 ip->r_format.func != mm_pool32axf_op)
304 return 0;
305 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
306 #else
307 if (ip->j_format.opcode == j_op)
308 return 1;
309 if (ip->j_format.opcode == jal_op)
310 return 1;
311 if (ip->r_format.opcode != spec_op)
312 return 0;
313 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
314 #endif
317 static inline int is_sp_move_ins(union mips_instruction *ip)
319 #ifdef CONFIG_CPU_MICROMIPS
321 * addiusp -imm
322 * addius5 sp,-imm
323 * addiu32 sp,sp,-imm
324 * jradiussp - NOT SUPPORTED
326 * microMIPS is not more fun...
328 if (mm_insn_16bit(ip->halfword[1])) {
329 return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
330 ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
331 (ip->mm16_r5_format.opcode == mm_pool16d_op &&
332 ip->mm16_r5_format.rt == 29);
335 return ip->mm_i_format.opcode == mm_addiu32_op &&
336 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
337 #else
338 /* addiu/daddiu sp,sp,-imm */
339 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
340 return 0;
341 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
342 return 1;
343 #endif
344 return 0;
347 static int get_frame_info(struct mips_frame_info *info)
349 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
350 union mips_instruction insn, *ip, *ip_end;
351 const unsigned int max_insns = 128;
352 unsigned int i;
354 info->pc_offset = -1;
355 info->frame_size = 0;
357 ip = (void *)msk_isa16_mode((ulong)info->func);
358 if (!ip)
359 goto err;
361 ip_end = (void *)ip + info->func_size;
363 for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
364 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
365 insn.halfword[0] = 0;
366 insn.halfword[1] = ip->halfword[0];
367 } else if (is_mmips) {
368 insn.halfword[0] = ip->halfword[1];
369 insn.halfword[1] = ip->halfword[0];
370 } else {
371 insn.word = ip->word;
374 if (is_jump_ins(&insn))
375 break;
377 if (!info->frame_size) {
378 if (is_sp_move_ins(&insn))
380 #ifdef CONFIG_CPU_MICROMIPS
381 if (mm_insn_16bit(ip->halfword[0]))
383 unsigned short tmp;
385 if (ip->halfword[0] & mm_addiusp_func)
387 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
388 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
389 } else {
390 tmp = (ip->halfword[0] >> 1);
391 info->frame_size = -(signed short)(tmp & 0xf);
393 ip = (void *) &ip->halfword[1];
394 ip--;
395 } else
396 #endif
397 info->frame_size = - ip->i_format.simmediate;
399 continue;
401 if (info->pc_offset == -1 &&
402 is_ra_save_ins(&insn, &info->pc_offset))
403 break;
405 if (info->frame_size && info->pc_offset >= 0) /* nested */
406 return 0;
407 if (info->pc_offset < 0) /* leaf */
408 return 1;
409 /* prologue seems bogus... */
410 err:
411 return -1;
414 static struct mips_frame_info schedule_mfi __read_mostly;
416 #ifdef CONFIG_KALLSYMS
417 static unsigned long get___schedule_addr(void)
419 return kallsyms_lookup_name("__schedule");
421 #else
422 static unsigned long get___schedule_addr(void)
424 union mips_instruction *ip = (void *)schedule;
425 int max_insns = 8;
426 int i;
428 for (i = 0; i < max_insns; i++, ip++) {
429 if (ip->j_format.opcode == j_op)
430 return J_TARGET(ip, ip->j_format.target);
432 return 0;
434 #endif
436 static int __init frame_info_init(void)
438 unsigned long size = 0;
439 #ifdef CONFIG_KALLSYMS
440 unsigned long ofs;
441 #endif
442 unsigned long addr;
444 addr = get___schedule_addr();
445 if (!addr)
446 addr = (unsigned long)schedule;
448 #ifdef CONFIG_KALLSYMS
449 kallsyms_lookup_size_offset(addr, &size, &ofs);
450 #endif
451 schedule_mfi.func = (void *)addr;
452 schedule_mfi.func_size = size;
454 get_frame_info(&schedule_mfi);
457 * Without schedule() frame info, result given by
458 * thread_saved_pc() and get_wchan() are not reliable.
460 if (schedule_mfi.pc_offset < 0)
461 printk("Can't analyze schedule() prologue at %p\n", schedule);
463 return 0;
466 arch_initcall(frame_info_init);
469 * Return saved PC of a blocked thread.
471 unsigned long thread_saved_pc(struct task_struct *tsk)
473 struct thread_struct *t = &tsk->thread;
475 /* New born processes are a special case */
476 if (t->reg31 == (unsigned long) ret_from_fork)
477 return t->reg31;
478 if (schedule_mfi.pc_offset < 0)
479 return 0;
480 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
484 #ifdef CONFIG_KALLSYMS
485 /* generic stack unwinding function */
486 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
487 unsigned long *sp,
488 unsigned long pc,
489 unsigned long *ra)
491 struct mips_frame_info info;
492 unsigned long size, ofs;
493 int leaf;
494 extern void ret_from_irq(void);
495 extern void ret_from_exception(void);
497 if (!stack_page)
498 return 0;
501 * If we reached the bottom of interrupt context,
502 * return saved pc in pt_regs.
504 if (pc == (unsigned long)ret_from_irq ||
505 pc == (unsigned long)ret_from_exception) {
506 struct pt_regs *regs;
507 if (*sp >= stack_page &&
508 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
509 regs = (struct pt_regs *)*sp;
510 pc = regs->cp0_epc;
511 if (!user_mode(regs) && __kernel_text_address(pc)) {
512 *sp = regs->regs[29];
513 *ra = regs->regs[31];
514 return pc;
517 return 0;
519 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
520 return 0;
522 * Return ra if an exception occurred at the first instruction
524 if (unlikely(ofs == 0)) {
525 pc = *ra;
526 *ra = 0;
527 return pc;
530 info.func = (void *)(pc - ofs);
531 info.func_size = ofs; /* analyze from start to ofs */
532 leaf = get_frame_info(&info);
533 if (leaf < 0)
534 return 0;
536 if (*sp < stack_page ||
537 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
538 return 0;
540 if (leaf)
542 * For some extreme cases, get_frame_info() can
543 * consider wrongly a nested function as a leaf
544 * one. In that cases avoid to return always the
545 * same value.
547 pc = pc != *ra ? *ra : 0;
548 else
549 pc = ((unsigned long *)(*sp))[info.pc_offset];
551 *sp += info.frame_size;
552 *ra = 0;
553 return __kernel_text_address(pc) ? pc : 0;
555 EXPORT_SYMBOL(unwind_stack_by_address);
557 /* used by show_backtrace() */
558 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
559 unsigned long pc, unsigned long *ra)
561 unsigned long stack_page = 0;
562 int cpu;
564 for_each_possible_cpu(cpu) {
565 if (on_irq_stack(cpu, *sp)) {
566 stack_page = (unsigned long)irq_stack[cpu];
567 break;
571 if (!stack_page)
572 stack_page = (unsigned long)task_stack_page(task);
574 return unwind_stack_by_address(stack_page, sp, pc, ra);
576 #endif
579 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
581 unsigned long get_wchan(struct task_struct *task)
583 unsigned long pc = 0;
584 #ifdef CONFIG_KALLSYMS
585 unsigned long sp;
586 unsigned long ra = 0;
587 #endif
589 if (!task || task == current || task->state == TASK_RUNNING)
590 goto out;
591 if (!task_stack_page(task))
592 goto out;
594 pc = thread_saved_pc(task);
596 #ifdef CONFIG_KALLSYMS
597 sp = task->thread.reg29 + schedule_mfi.frame_size;
599 while (in_sched_functions(pc))
600 pc = unwind_stack(task, &sp, pc, &ra);
601 #endif
603 out:
604 return pc;
608 * Don't forget that the stack pointer must be aligned on a 8 bytes
609 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
611 unsigned long arch_align_stack(unsigned long sp)
613 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
614 sp -= get_random_int() & ~PAGE_MASK;
616 return sp & ALMASK;
619 static void arch_dump_stack(void *info)
621 struct pt_regs *regs;
623 regs = get_irq_regs();
625 if (regs)
626 show_regs(regs);
628 dump_stack();
631 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
633 long this_cpu = get_cpu();
635 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
636 dump_stack();
638 smp_call_function_many(mask, arch_dump_stack, NULL, 1);
640 put_cpu();
643 int mips_get_process_fp_mode(struct task_struct *task)
645 int value = 0;
647 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
648 value |= PR_FP_MODE_FR;
649 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
650 value |= PR_FP_MODE_FRE;
652 return value;
655 static void prepare_for_fp_mode_switch(void *info)
657 struct mm_struct *mm = info;
659 if (current->mm == mm)
660 lose_fpu(1);
663 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
665 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
666 struct task_struct *t;
667 int max_users;
669 /* Check the value is valid */
670 if (value & ~known_bits)
671 return -EOPNOTSUPP;
673 /* Avoid inadvertently triggering emulation */
674 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
675 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
676 return -EOPNOTSUPP;
677 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
678 return -EOPNOTSUPP;
680 /* FR = 0 not supported in MIPS R6 */
681 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
682 return -EOPNOTSUPP;
684 /* Proceed with the mode switch */
685 preempt_disable();
687 /* Save FP & vector context, then disable FPU & MSA */
688 if (task->signal == current->signal)
689 lose_fpu(1);
691 /* Prevent any threads from obtaining live FP context */
692 atomic_set(&task->mm->context.fp_mode_switching, 1);
693 smp_mb__after_atomic();
696 * If there are multiple online CPUs then force any which are running
697 * threads in this process to lose their FPU context, which they can't
698 * regain until fp_mode_switching is cleared later.
700 if (num_online_cpus() > 1) {
701 /* No need to send an IPI for the local CPU */
702 max_users = (task->mm == current->mm) ? 1 : 0;
704 if (atomic_read(&current->mm->mm_users) > max_users)
705 smp_call_function(prepare_for_fp_mode_switch,
706 (void *)current->mm, 1);
710 * There are now no threads of the process with live FP context, so it
711 * is safe to proceed with the FP mode switch.
713 for_each_thread(task, t) {
714 /* Update desired FP register width */
715 if (value & PR_FP_MODE_FR) {
716 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
717 } else {
718 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
719 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
722 /* Update desired FP single layout */
723 if (value & PR_FP_MODE_FRE)
724 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
725 else
726 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
729 /* Allow threads to use FP again */
730 atomic_set(&task->mm->context.fp_mode_switching, 0);
731 preempt_enable();
733 return 0;
736 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
737 void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
739 unsigned int i;
741 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
742 /* k0/k1 are copied as zero. */
743 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
744 uregs[i] = 0;
745 else
746 uregs[i] = regs->regs[i - MIPS32_EF_R0];
749 uregs[MIPS32_EF_LO] = regs->lo;
750 uregs[MIPS32_EF_HI] = regs->hi;
751 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
752 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
753 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
754 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
756 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
758 #ifdef CONFIG_64BIT
759 void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
761 unsigned int i;
763 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
764 /* k0/k1 are copied as zero. */
765 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
766 uregs[i] = 0;
767 else
768 uregs[i] = regs->regs[i - MIPS64_EF_R0];
771 uregs[MIPS64_EF_LO] = regs->lo;
772 uregs[MIPS64_EF_HI] = regs->hi;
773 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
774 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
775 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
776 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
778 #endif /* CONFIG_64BIT */