Linux 2.6.28-rc5
[cris-mirror.git] / arch / sh / kernel / process_64.c
blobb7aa09235b51a65202119a5afaa324549d172b35
1 /*
2 * arch/sh/kernel/process_64.c
4 * This file handles the architecture-dependent parts of process handling..
6 * Copyright (C) 2000, 2001 Paolo Alberelli
7 * Copyright (C) 2003 - 2007 Paul Mundt
8 * Copyright (C) 2003, 2004 Richard Curnow
10 * Started from SH3/4 version:
11 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
13 * In turn started from i386 version:
14 * Copyright (C) 1995 Linus Torvalds
16 * This file is subject to the terms and conditions of the GNU General Public
17 * License. See the file "COPYING" in the main directory of this archive
18 * for more details.
20 #include <linux/mm.h>
21 #include <linux/fs.h>
22 #include <linux/ptrace.h>
23 #include <linux/reboot.h>
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/proc_fs.h>
27 #include <linux/io.h>
28 #include <asm/syscalls.h>
29 #include <asm/uaccess.h>
30 #include <asm/pgtable.h>
31 #include <asm/mmu_context.h>
32 #include <asm/fpu.h>
34 struct task_struct *last_task_used_math = NULL;
36 static int hlt_counter = 1;
38 #define HARD_IDLE_TIMEOUT (HZ / 3)
40 static int __init nohlt_setup(char *__unused)
42 hlt_counter = 1;
43 return 1;
46 static int __init hlt_setup(char *__unused)
48 hlt_counter = 0;
49 return 1;
52 __setup("nohlt", nohlt_setup);
53 __setup("hlt", hlt_setup);
55 static inline void hlt(void)
57 __asm__ __volatile__ ("sleep" : : : "memory");
61 * The idle loop on a uniprocessor SH..
63 void cpu_idle(void)
65 /* endless idle loop with no priority at all */
66 while (1) {
67 if (hlt_counter) {
68 while (!need_resched())
69 cpu_relax();
70 } else {
71 local_irq_disable();
72 while (!need_resched()) {
73 local_irq_enable();
74 hlt();
75 local_irq_disable();
77 local_irq_enable();
79 preempt_enable_no_resched();
80 schedule();
81 preempt_disable();
86 void machine_restart(char * __unused)
88 extern void phys_stext(void);
90 phys_stext();
93 void machine_halt(void)
95 for (;;);
98 void machine_power_off(void)
100 #if 0
101 /* Disable watchdog timer */
102 ctrl_outl(0xa5000000, WTCSR);
103 /* Configure deep standby on sleep */
104 ctrl_outl(0x03, STBCR);
105 #endif
107 __asm__ __volatile__ (
108 "sleep\n\t"
109 "synci\n\t"
110 "nop;nop;nop;nop\n\t"
113 panic("Unexpected wakeup!\n");
116 void (*pm_power_off)(void) = machine_power_off;
117 EXPORT_SYMBOL(pm_power_off);
119 void show_regs(struct pt_regs * regs)
121 unsigned long long ah, al, bh, bl, ch, cl;
123 printk("\n");
125 ah = (regs->pc) >> 32;
126 al = (regs->pc) & 0xffffffff;
127 bh = (regs->regs[18]) >> 32;
128 bl = (regs->regs[18]) & 0xffffffff;
129 ch = (regs->regs[15]) >> 32;
130 cl = (regs->regs[15]) & 0xffffffff;
131 printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
132 ah, al, bh, bl, ch, cl);
134 ah = (regs->sr) >> 32;
135 al = (regs->sr) & 0xffffffff;
136 asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
137 asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
138 bh = (bh) >> 32;
139 bl = (bl) & 0xffffffff;
140 asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
141 asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
142 ch = (ch) >> 32;
143 cl = (cl) & 0xffffffff;
144 printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
145 ah, al, bh, bl, ch, cl);
147 ah = (regs->regs[0]) >> 32;
148 al = (regs->regs[0]) & 0xffffffff;
149 bh = (regs->regs[1]) >> 32;
150 bl = (regs->regs[1]) & 0xffffffff;
151 ch = (regs->regs[2]) >> 32;
152 cl = (regs->regs[2]) & 0xffffffff;
153 printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
154 ah, al, bh, bl, ch, cl);
156 ah = (regs->regs[3]) >> 32;
157 al = (regs->regs[3]) & 0xffffffff;
158 bh = (regs->regs[4]) >> 32;
159 bl = (regs->regs[4]) & 0xffffffff;
160 ch = (regs->regs[5]) >> 32;
161 cl = (regs->regs[5]) & 0xffffffff;
162 printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
163 ah, al, bh, bl, ch, cl);
165 ah = (regs->regs[6]) >> 32;
166 al = (regs->regs[6]) & 0xffffffff;
167 bh = (regs->regs[7]) >> 32;
168 bl = (regs->regs[7]) & 0xffffffff;
169 ch = (regs->regs[8]) >> 32;
170 cl = (regs->regs[8]) & 0xffffffff;
171 printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
172 ah, al, bh, bl, ch, cl);
174 ah = (regs->regs[9]) >> 32;
175 al = (regs->regs[9]) & 0xffffffff;
176 bh = (regs->regs[10]) >> 32;
177 bl = (regs->regs[10]) & 0xffffffff;
178 ch = (regs->regs[11]) >> 32;
179 cl = (regs->regs[11]) & 0xffffffff;
180 printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
181 ah, al, bh, bl, ch, cl);
183 ah = (regs->regs[12]) >> 32;
184 al = (regs->regs[12]) & 0xffffffff;
185 bh = (regs->regs[13]) >> 32;
186 bl = (regs->regs[13]) & 0xffffffff;
187 ch = (regs->regs[14]) >> 32;
188 cl = (regs->regs[14]) & 0xffffffff;
189 printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
190 ah, al, bh, bl, ch, cl);
192 ah = (regs->regs[16]) >> 32;
193 al = (regs->regs[16]) & 0xffffffff;
194 bh = (regs->regs[17]) >> 32;
195 bl = (regs->regs[17]) & 0xffffffff;
196 ch = (regs->regs[19]) >> 32;
197 cl = (regs->regs[19]) & 0xffffffff;
198 printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
199 ah, al, bh, bl, ch, cl);
201 ah = (regs->regs[20]) >> 32;
202 al = (regs->regs[20]) & 0xffffffff;
203 bh = (regs->regs[21]) >> 32;
204 bl = (regs->regs[21]) & 0xffffffff;
205 ch = (regs->regs[22]) >> 32;
206 cl = (regs->regs[22]) & 0xffffffff;
207 printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
208 ah, al, bh, bl, ch, cl);
210 ah = (regs->regs[23]) >> 32;
211 al = (regs->regs[23]) & 0xffffffff;
212 bh = (regs->regs[24]) >> 32;
213 bl = (regs->regs[24]) & 0xffffffff;
214 ch = (regs->regs[25]) >> 32;
215 cl = (regs->regs[25]) & 0xffffffff;
216 printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
217 ah, al, bh, bl, ch, cl);
219 ah = (regs->regs[26]) >> 32;
220 al = (regs->regs[26]) & 0xffffffff;
221 bh = (regs->regs[27]) >> 32;
222 bl = (regs->regs[27]) & 0xffffffff;
223 ch = (regs->regs[28]) >> 32;
224 cl = (regs->regs[28]) & 0xffffffff;
225 printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
226 ah, al, bh, bl, ch, cl);
228 ah = (regs->regs[29]) >> 32;
229 al = (regs->regs[29]) & 0xffffffff;
230 bh = (regs->regs[30]) >> 32;
231 bl = (regs->regs[30]) & 0xffffffff;
232 ch = (regs->regs[31]) >> 32;
233 cl = (regs->regs[31]) & 0xffffffff;
234 printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
235 ah, al, bh, bl, ch, cl);
237 ah = (regs->regs[32]) >> 32;
238 al = (regs->regs[32]) & 0xffffffff;
239 bh = (regs->regs[33]) >> 32;
240 bl = (regs->regs[33]) & 0xffffffff;
241 ch = (regs->regs[34]) >> 32;
242 cl = (regs->regs[34]) & 0xffffffff;
243 printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
244 ah, al, bh, bl, ch, cl);
246 ah = (regs->regs[35]) >> 32;
247 al = (regs->regs[35]) & 0xffffffff;
248 bh = (regs->regs[36]) >> 32;
249 bl = (regs->regs[36]) & 0xffffffff;
250 ch = (regs->regs[37]) >> 32;
251 cl = (regs->regs[37]) & 0xffffffff;
252 printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
253 ah, al, bh, bl, ch, cl);
255 ah = (regs->regs[38]) >> 32;
256 al = (regs->regs[38]) & 0xffffffff;
257 bh = (regs->regs[39]) >> 32;
258 bl = (regs->regs[39]) & 0xffffffff;
259 ch = (regs->regs[40]) >> 32;
260 cl = (regs->regs[40]) & 0xffffffff;
261 printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
262 ah, al, bh, bl, ch, cl);
264 ah = (regs->regs[41]) >> 32;
265 al = (regs->regs[41]) & 0xffffffff;
266 bh = (regs->regs[42]) >> 32;
267 bl = (regs->regs[42]) & 0xffffffff;
268 ch = (regs->regs[43]) >> 32;
269 cl = (regs->regs[43]) & 0xffffffff;
270 printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
271 ah, al, bh, bl, ch, cl);
273 ah = (regs->regs[44]) >> 32;
274 al = (regs->regs[44]) & 0xffffffff;
275 bh = (regs->regs[45]) >> 32;
276 bl = (regs->regs[45]) & 0xffffffff;
277 ch = (regs->regs[46]) >> 32;
278 cl = (regs->regs[46]) & 0xffffffff;
279 printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
280 ah, al, bh, bl, ch, cl);
282 ah = (regs->regs[47]) >> 32;
283 al = (regs->regs[47]) & 0xffffffff;
284 bh = (regs->regs[48]) >> 32;
285 bl = (regs->regs[48]) & 0xffffffff;
286 ch = (regs->regs[49]) >> 32;
287 cl = (regs->regs[49]) & 0xffffffff;
288 printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
289 ah, al, bh, bl, ch, cl);
291 ah = (regs->regs[50]) >> 32;
292 al = (regs->regs[50]) & 0xffffffff;
293 bh = (regs->regs[51]) >> 32;
294 bl = (regs->regs[51]) & 0xffffffff;
295 ch = (regs->regs[52]) >> 32;
296 cl = (regs->regs[52]) & 0xffffffff;
297 printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
298 ah, al, bh, bl, ch, cl);
300 ah = (regs->regs[53]) >> 32;
301 al = (regs->regs[53]) & 0xffffffff;
302 bh = (regs->regs[54]) >> 32;
303 bl = (regs->regs[54]) & 0xffffffff;
304 ch = (regs->regs[55]) >> 32;
305 cl = (regs->regs[55]) & 0xffffffff;
306 printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
307 ah, al, bh, bl, ch, cl);
309 ah = (regs->regs[56]) >> 32;
310 al = (regs->regs[56]) & 0xffffffff;
311 bh = (regs->regs[57]) >> 32;
312 bl = (regs->regs[57]) & 0xffffffff;
313 ch = (regs->regs[58]) >> 32;
314 cl = (regs->regs[58]) & 0xffffffff;
315 printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
316 ah, al, bh, bl, ch, cl);
318 ah = (regs->regs[59]) >> 32;
319 al = (regs->regs[59]) & 0xffffffff;
320 bh = (regs->regs[60]) >> 32;
321 bl = (regs->regs[60]) & 0xffffffff;
322 ch = (regs->regs[61]) >> 32;
323 cl = (regs->regs[61]) & 0xffffffff;
324 printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
325 ah, al, bh, bl, ch, cl);
327 ah = (regs->regs[62]) >> 32;
328 al = (regs->regs[62]) & 0xffffffff;
329 bh = (regs->tregs[0]) >> 32;
330 bl = (regs->tregs[0]) & 0xffffffff;
331 ch = (regs->tregs[1]) >> 32;
332 cl = (regs->tregs[1]) & 0xffffffff;
333 printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
334 ah, al, bh, bl, ch, cl);
336 ah = (regs->tregs[2]) >> 32;
337 al = (regs->tregs[2]) & 0xffffffff;
338 bh = (regs->tregs[3]) >> 32;
339 bl = (regs->tregs[3]) & 0xffffffff;
340 ch = (regs->tregs[4]) >> 32;
341 cl = (regs->tregs[4]) & 0xffffffff;
342 printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
343 ah, al, bh, bl, ch, cl);
345 ah = (regs->tregs[5]) >> 32;
346 al = (regs->tregs[5]) & 0xffffffff;
347 bh = (regs->tregs[6]) >> 32;
348 bl = (regs->tregs[6]) & 0xffffffff;
349 ch = (regs->tregs[7]) >> 32;
350 cl = (regs->tregs[7]) & 0xffffffff;
351 printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
352 ah, al, bh, bl, ch, cl);
355 * If we're in kernel mode, dump the stack too..
357 if (!user_mode(regs)) {
358 void show_stack(struct task_struct *tsk, unsigned long *sp);
359 unsigned long sp = regs->regs[15] & 0xffffffff;
360 struct task_struct *tsk = get_current();
362 tsk->thread.kregs = regs;
364 show_stack(tsk, (unsigned long *)sp);
368 struct task_struct * alloc_task_struct(void)
370 /* Get task descriptor pages */
371 return (struct task_struct *)
372 __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE));
375 void free_task_struct(struct task_struct *p)
377 free_pages((unsigned long) p, get_order(THREAD_SIZE));
381 * Create a kernel thread
383 ATTRIB_NORET void kernel_thread_helper(void *arg, int (*fn)(void *))
385 do_exit(fn(arg));
389 * This is the mechanism for creating a new kernel thread.
391 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
392 * who haven't done an "execve()") should use this: it will work within
393 * a system call from a "real" process, but the process memory space will
394 * not be freed until both the parent and the child have exited.
396 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
398 struct pt_regs regs;
399 int pid;
401 memset(&regs, 0, sizeof(regs));
402 regs.regs[2] = (unsigned long)arg;
403 regs.regs[3] = (unsigned long)fn;
405 regs.pc = (unsigned long)kernel_thread_helper;
406 regs.sr = (1 << 30);
408 /* Ok, create the new process.. */
409 pid = do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0,
410 &regs, 0, NULL, NULL);
412 trace_mark(kernel_arch_kthread_create, "pid %d fn %p", pid, fn);
414 return pid;
418 * Free current thread data structures etc..
420 void exit_thread(void)
423 * See arch/sparc/kernel/process.c for the precedent for doing
424 * this -- RPC.
426 * The SH-5 FPU save/restore approach relies on
427 * last_task_used_math pointing to a live task_struct. When
428 * another task tries to use the FPU for the 1st time, the FPUDIS
429 * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
430 * existing FPU state to the FP regs field within
431 * last_task_used_math before re-loading the new task's FPU state
432 * (or initialising it if the FPU has been used before). So if
433 * last_task_used_math is stale, and its page has already been
434 * re-allocated for another use, the consequences are rather
435 * grim. Unless we null it here, there is no other path through
436 * which it would get safely nulled.
438 #ifdef CONFIG_SH_FPU
439 if (last_task_used_math == current) {
440 last_task_used_math = NULL;
442 #endif
445 void flush_thread(void)
448 /* Called by fs/exec.c (flush_old_exec) to remove traces of a
449 * previously running executable. */
450 #ifdef CONFIG_SH_FPU
451 if (last_task_used_math == current) {
452 last_task_used_math = NULL;
454 /* Force FPU state to be reinitialised after exec */
455 clear_used_math();
456 #endif
458 /* if we are a kernel thread, about to change to user thread,
459 * update kreg
461 if(current->thread.kregs==&fake_swapper_regs) {
462 current->thread.kregs =
463 ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
464 current->thread.uregs = current->thread.kregs;
468 void release_thread(struct task_struct *dead_task)
470 /* do nothing */
473 /* Fill in the fpu structure for a core dump.. */
474 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
476 #ifdef CONFIG_SH_FPU
477 int fpvalid;
478 struct task_struct *tsk = current;
480 fpvalid = !!tsk_used_math(tsk);
481 if (fpvalid) {
482 if (current == last_task_used_math) {
483 enable_fpu();
484 save_fpu(tsk, regs);
485 disable_fpu();
486 last_task_used_math = 0;
487 regs->sr |= SR_FD;
490 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
493 return fpvalid;
494 #else
495 return 0; /* Task didn't use the fpu at all. */
496 #endif
499 asmlinkage void ret_from_fork(void);
501 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
502 unsigned long unused,
503 struct task_struct *p, struct pt_regs *regs)
505 struct pt_regs *childregs;
506 unsigned long long se; /* Sign extension */
508 #ifdef CONFIG_SH_FPU
509 if(last_task_used_math == current) {
510 enable_fpu();
511 save_fpu(current, regs);
512 disable_fpu();
513 last_task_used_math = NULL;
514 regs->sr |= SR_FD;
516 #endif
517 /* Copy from sh version */
518 childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
520 *childregs = *regs;
522 if (user_mode(regs)) {
523 childregs->regs[15] = usp;
524 p->thread.uregs = childregs;
525 } else {
526 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
529 childregs->regs[9] = 0; /* Set return value for child */
530 childregs->sr |= SR_FD; /* Invalidate FPU flag */
532 p->thread.sp = (unsigned long) childregs;
533 p->thread.pc = (unsigned long) ret_from_fork;
536 * Sign extend the edited stack.
537 * Note that thread.pc and thread.pc will stay
538 * 32-bit wide and context switch must take care
539 * of NEFF sign extension.
542 se = childregs->regs[15];
543 se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se;
544 childregs->regs[15] = se;
546 return 0;
549 asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
550 unsigned long r4, unsigned long r5,
551 unsigned long r6, unsigned long r7,
552 struct pt_regs *pregs)
554 return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
557 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
558 unsigned long r4, unsigned long r5,
559 unsigned long r6, unsigned long r7,
560 struct pt_regs *pregs)
562 if (!newsp)
563 newsp = pregs->regs[15];
564 return do_fork(clone_flags, newsp, pregs, 0, 0, 0);
568 * This is trivial, and on the face of it looks like it
569 * could equally well be done in user mode.
571 * Not so, for quite unobvious reasons - register pressure.
572 * In user mode vfork() cannot have a stack frame, and if
573 * done by calling the "clone()" system call directly, you
574 * do not have enough call-clobbered registers to hold all
575 * the information you need.
577 asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
578 unsigned long r4, unsigned long r5,
579 unsigned long r6, unsigned long r7,
580 struct pt_regs *pregs)
582 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0);
586 * sys_execve() executes a new program.
588 asmlinkage int sys_execve(char *ufilename, char **uargv,
589 char **uenvp, unsigned long r5,
590 unsigned long r6, unsigned long r7,
591 struct pt_regs *pregs)
593 int error;
594 char *filename;
596 lock_kernel();
597 filename = getname((char __user *)ufilename);
598 error = PTR_ERR(filename);
599 if (IS_ERR(filename))
600 goto out;
602 error = do_execve(filename,
603 (char __user * __user *)uargv,
604 (char __user * __user *)uenvp,
605 pregs);
606 if (error == 0) {
607 task_lock(current);
608 current->ptrace &= ~PT_DTRACE;
609 task_unlock(current);
611 putname(filename);
612 out:
613 unlock_kernel();
614 return error;
618 * These bracket the sleeping functions..
620 extern void interruptible_sleep_on(wait_queue_head_t *q);
622 #define mid_sched ((unsigned long) interruptible_sleep_on)
624 #ifdef CONFIG_FRAME_POINTER
625 static int in_sh64_switch_to(unsigned long pc)
627 extern char __sh64_switch_to_end;
628 /* For a sleeping task, the PC is somewhere in the middle of the function,
629 so we don't have to worry about masking the LSB off */
630 return (pc >= (unsigned long) sh64_switch_to) &&
631 (pc < (unsigned long) &__sh64_switch_to_end);
633 #endif
635 unsigned long get_wchan(struct task_struct *p)
637 unsigned long pc;
639 if (!p || p == current || p->state == TASK_RUNNING)
640 return 0;
643 * The same comment as on the Alpha applies here, too ...
645 pc = thread_saved_pc(p);
647 #ifdef CONFIG_FRAME_POINTER
648 if (in_sh64_switch_to(pc)) {
649 unsigned long schedule_fp;
650 unsigned long sh64_switch_to_fp;
651 unsigned long schedule_caller_pc;
653 sh64_switch_to_fp = (long) p->thread.sp;
654 /* r14 is saved at offset 4 in the sh64_switch_to frame */
655 schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
657 /* and the caller of 'schedule' is (currently!) saved at offset 24
658 in the frame of schedule (from disasm) */
659 schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
660 return schedule_caller_pc;
662 #endif
663 return pc;
666 /* Provide a /proc/asids file that lists out the
667 ASIDs currently associated with the processes. (If the DM.PC register is
668 examined through the debug link, this shows ASID + PC. To make use of this,
669 the PID->ASID relationship needs to be known. This is primarily for
670 debugging.)
673 #if defined(CONFIG_SH64_PROC_ASIDS)
674 static int
675 asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data)
677 int len=0;
678 struct task_struct *p;
679 read_lock(&tasklist_lock);
680 for_each_process(p) {
681 int pid = p->pid;
683 if (!pid)
684 continue;
685 if (p->mm)
686 len += sprintf(buf+len, "%5d : %02lx\n", pid,
687 asid_cache(smp_processor_id()));
688 else
689 len += sprintf(buf+len, "%5d : (none)\n", pid);
691 read_unlock(&tasklist_lock);
692 *eof = 1;
693 return len;
696 static int __init register_proc_asids(void)
698 create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL);
699 return 0;
701 __initcall(register_proc_asids);
702 #endif