1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/sched.h>
5 #include <linux/sched/debug.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/delay.h>
8 #include <linux/kallsyms.h>
9 #include <linux/uaccess.h>
11 #include <asm/proc-fns.h>
13 #include <linux/ptrace.h>
14 #include <linux/reboot.h>
16 #if IS_ENABLED(CONFIG_LAZY_FPU)
17 struct task_struct
*last_task_used_math
;
20 extern void setup_mm_for_reboot(char mode
);
22 extern inline void arch_reset(char mode
)
25 /* Use cpu handler, jump to 0 */
30 void (*pm_power_off
) (void);
31 EXPORT_SYMBOL(pm_power_off
);
33 static char reboot_mode_nds32
= 'h';
35 int __init
reboot_setup(char *str
)
37 reboot_mode_nds32
= str
[0];
41 static int cpub_pwroff(void)
46 __setup("reboot=", reboot_setup
);
48 void machine_halt(void)
53 EXPORT_SYMBOL(machine_halt
);
55 void machine_power_off(void)
61 EXPORT_SYMBOL(machine_power_off
);
63 void machine_restart(char *cmd
)
66 * Clean and disable cache, and turn off interrupts
71 * Tell the mm system that we are going to reboot -
72 * we may need it to insert some 1:1 mappings so that
75 setup_mm_for_reboot(reboot_mode_nds32
);
77 /* Execute kernel restart handler call chain */
78 do_kernel_restart(cmd
);
81 * Now call the architecture specific reboot code.
83 arch_reset(reboot_mode_nds32
);
86 * Whoops - the architecture was unable to reboot.
90 pr_info("Reboot failed -- System halted\n");
94 EXPORT_SYMBOL(machine_restart
);
96 void show_regs(struct pt_regs
*regs
)
98 printk("PC is at %pS\n", (void *)instruction_pointer(regs
));
99 printk("LP is at %pS\n", (void *)regs
->lp
);
100 pr_info("pc : [<%08lx>] lp : [<%08lx>] %s\n"
101 "sp : %08lx fp : %08lx gp : %08lx\n",
102 instruction_pointer(regs
),
103 regs
->lp
, print_tainted(), regs
->sp
, regs
->fp
, regs
->gp
);
104 pr_info("r25: %08lx r24: %08lx\n", regs
->uregs
[25], regs
->uregs
[24]);
106 pr_info("r23: %08lx r22: %08lx r21: %08lx r20: %08lx\n",
107 regs
->uregs
[23], regs
->uregs
[22],
108 regs
->uregs
[21], regs
->uregs
[20]);
109 pr_info("r19: %08lx r18: %08lx r17: %08lx r16: %08lx\n",
110 regs
->uregs
[19], regs
->uregs
[18],
111 regs
->uregs
[17], regs
->uregs
[16]);
112 pr_info("r15: %08lx r14: %08lx r13: %08lx r12: %08lx\n",
113 regs
->uregs
[15], regs
->uregs
[14],
114 regs
->uregs
[13], regs
->uregs
[12]);
115 pr_info("r11: %08lx r10: %08lx r9 : %08lx r8 : %08lx\n",
116 regs
->uregs
[11], regs
->uregs
[10],
117 regs
->uregs
[9], regs
->uregs
[8]);
118 pr_info("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
119 regs
->uregs
[7], regs
->uregs
[6], regs
->uregs
[5], regs
->uregs
[4]);
120 pr_info("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
121 regs
->uregs
[3], regs
->uregs
[2], regs
->uregs
[1], regs
->uregs
[0]);
122 pr_info(" IRQs o%s Segment %s\n",
123 interrupts_enabled(regs
) ? "n" : "ff",
124 segment_eq(get_fs(), KERNEL_DS
)? "kernel" : "user");
127 EXPORT_SYMBOL(show_regs
);
129 void exit_thread(struct task_struct
*tsk
)
131 #if defined(CONFIG_FPU) && defined(CONFIG_LAZY_FPU)
132 if (last_task_used_math
== tsk
)
133 last_task_used_math
= NULL
;
137 void flush_thread(void)
139 #if defined(CONFIG_FPU)
140 clear_fpu(task_pt_regs(current
));
142 # ifdef CONFIG_LAZY_FPU
143 if (last_task_used_math
== current
)
144 last_task_used_math
= NULL
;
149 DEFINE_PER_CPU(struct task_struct
*, __entry_task
);
151 asmlinkage
void ret_from_fork(void) __asm__("ret_from_fork");
152 int copy_thread(unsigned long clone_flags
, unsigned long stack_start
,
153 unsigned long stk_sz
, struct task_struct
*p
)
155 struct pt_regs
*childregs
= task_pt_regs(p
);
157 memset(&p
->thread
.cpu_context
, 0, sizeof(struct cpu_context
));
159 if (unlikely(p
->flags
& PF_KTHREAD
)) {
160 memset(childregs
, 0, sizeof(struct pt_regs
));
161 /* kernel thread fn */
162 p
->thread
.cpu_context
.r6
= stack_start
;
163 /* kernel thread argument */
164 p
->thread
.cpu_context
.r7
= stk_sz
;
166 *childregs
= *current_pt_regs();
168 childregs
->sp
= stack_start
;
169 /* child get zero as ret. */
170 childregs
->uregs
[0] = 0;
172 if (clone_flags
& CLONE_SETTLS
)
173 childregs
->uregs
[25] = childregs
->uregs
[3];
175 /* cpu context switching */
176 p
->thread
.cpu_context
.pc
= (unsigned long)ret_from_fork
;
177 p
->thread
.cpu_context
.sp
= (unsigned long)childregs
;
179 #if IS_ENABLED(CONFIG_FPU)
181 # if !IS_ENABLED(CONFIG_LAZY_FPU)
185 if (last_task_used_math
== current
)
189 p
->thread
.fpu
= current
->thread
.fpu
;
190 clear_fpu(task_pt_regs(p
));
191 set_stopped_child_used_math(p
);
204 #if IS_ENABLED(CONFIG_FPU)
205 struct task_struct
*_switch_fpu(struct task_struct
*prev
, struct task_struct
*next
)
207 #if !IS_ENABLED(CONFIG_LAZY_FPU)
210 if (!(next
->flags
& PF_KTHREAD
))
211 clear_fpu(task_pt_regs(next
));
217 * fill in the fpe structure for a core dump...
219 int dump_fpu(struct pt_regs
*regs
, elf_fpregset_t
* fpu
)
222 #if IS_ENABLED(CONFIG_FPU)
223 struct task_struct
*tsk
= current
;
225 fpvalid
= tsk_used_math(tsk
);
228 memcpy(fpu
, &tsk
->thread
.fpu
, sizeof(*fpu
));
234 EXPORT_SYMBOL(dump_fpu
);
236 unsigned long get_wchan(struct task_struct
*p
)
238 unsigned long fp
, lr
;
239 unsigned long stack_start
, stack_end
;
242 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
245 if (IS_ENABLED(CONFIG_FRAME_POINTER
)) {
246 stack_start
= (unsigned long)end_of_stack(p
);
247 stack_end
= (unsigned long)task_stack_page(p
) + THREAD_SIZE
;
249 fp
= thread_saved_fp(p
);
251 if (fp
< stack_start
|| fp
> stack_end
)
253 lr
= ((unsigned long *)fp
)[0];
254 if (!in_sched_functions(lr
))
256 fp
= *(unsigned long *)(fp
+ 4);
257 } while (count
++ < 16);
262 EXPORT_SYMBOL(get_wchan
);