2 * Ptrace support for Hexagon
4 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <generated/compile.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
26 #include <linux/smp.h>
27 #include <linux/errno.h>
28 #include <linux/ptrace.h>
29 #include <linux/regset.h>
30 #include <linux/user.h>
31 #include <linux/elf.h>
35 #if arch_has_single_step()
36 /* Both called from ptrace_resume */
37 void user_enable_single_step(struct task_struct
*child
)
39 pt_set_singlestep(task_pt_regs(child
));
40 set_tsk_thread_flag(child
, TIF_SINGLESTEP
);
43 void user_disable_single_step(struct task_struct
*child
)
45 pt_clr_singlestep(task_pt_regs(child
));
46 clear_tsk_thread_flag(child
, TIF_SINGLESTEP
);
50 static int genregs_get(struct task_struct
*target
,
51 const struct user_regset
*regset
,
52 unsigned int pos
, unsigned int count
,
53 void *kbuf
, void __user
*ubuf
)
57 struct pt_regs
*regs
= task_pt_regs(target
);
63 /* The general idea here is that the copyout must happen in
64 * exactly the same order in which the userspace expects these
65 * regs. Now, the sequence in userspace does not match the
66 * sequence in the kernel, so everything past the 32 gprs
67 * happens one at a time.
69 ret
= user_regset_copyout(&pos
, &count
, &kbuf
, &ubuf
,
70 ®s
->r00
, 0, 32*sizeof(unsigned long));
72 #define ONEXT(KPT_REG, USR_REG) \
74 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
75 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
76 offsetof(struct user_regs_struct, USR_REG) + \
77 sizeof(unsigned long));
79 /* Must be exactly same sequence as struct user_regs_struct */
80 ONEXT(®s
->sa0
, sa0
);
81 ONEXT(®s
->lc0
, lc0
);
82 ONEXT(®s
->sa1
, sa1
);
83 ONEXT(®s
->lc1
, lc1
);
86 ONEXT(®s
->usr
, usr
);
87 ONEXT(®s
->preds
, p3_0
);
89 ONEXT(®s
->ugp
, ugp
);
90 ONEXT(&pt_elr(regs
), pc
);
91 dummy
= pt_cause(regs
);
93 ONEXT(&pt_badva(regs
), badva
);
94 #if CONFIG_HEXAGON_ARCH_VERSION >=4
95 ONEXT(®s
->cs0
, cs0
);
96 ONEXT(®s
->cs1
, cs1
);
99 /* Pad the rest with zeros, if needed */
101 ret
= user_regset_copyout_zero(&pos
, &count
, &kbuf
, &ubuf
,
102 offsetof(struct user_regs_struct
, pad1
), -1);
106 static int genregs_set(struct task_struct
*target
,
107 const struct user_regset
*regset
,
108 unsigned int pos
, unsigned int count
,
109 const void *kbuf
, const void __user
*ubuf
)
112 unsigned long bucket
;
113 struct pt_regs
*regs
= task_pt_regs(target
);
118 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
119 ®s
->r00
, 0, 32*sizeof(unsigned long));
121 #define INEXT(KPT_REG, USR_REG) \
123 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
124 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
125 offsetof(struct user_regs_struct, USR_REG) + \
126 sizeof(unsigned long));
128 /* Must be exactly same sequence as struct user_regs_struct */
129 INEXT(®s
->sa0
, sa0
);
130 INEXT(®s
->lc0
, lc0
);
131 INEXT(®s
->sa1
, sa1
);
132 INEXT(®s
->lc1
, lc1
);
133 INEXT(®s
->m0
, m0
);
134 INEXT(®s
->m1
, m1
);
135 INEXT(®s
->usr
, usr
);
136 INEXT(®s
->preds
, p3_0
);
137 INEXT(®s
->gp
, gp
);
138 INEXT(®s
->ugp
, ugp
);
139 INEXT(&pt_elr(regs
), pc
);
141 /* CAUSE and BADVA aren't writeable. */
142 INEXT(&bucket
, cause
);
143 INEXT(&bucket
, badva
);
145 #if CONFIG_HEXAGON_ARCH_VERSION >=4
146 INEXT(®s
->cs0
, cs0
);
147 INEXT(®s
->cs1
, cs1
);
150 /* Ignore the rest, if needed */
152 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
153 offsetof(struct user_regs_struct
, pad1
), -1);
159 * This is special; SP is actually restored by the VM via the
160 * special event record which is set by the special trap.
162 regs
->hvmer
.vmpsp
= regs
->r29
;
166 enum hexagon_regset
{
170 static const struct user_regset hexagon_regsets
[] = {
172 .core_note_type
= NT_PRSTATUS
,
174 .size
= sizeof(unsigned long),
175 .align
= sizeof(unsigned long),
181 static const struct user_regset_view hexagon_user_view
= {
183 .e_machine
= ELF_ARCH
,
184 .ei_osabi
= ELF_OSABI
,
185 .regsets
= hexagon_regsets
,
186 .e_flags
= ELF_CORE_EFLAGS
,
187 .n
= ARRAY_SIZE(hexagon_regsets
)
190 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
192 return &hexagon_user_view
;
195 void ptrace_disable(struct task_struct
*child
)
197 /* Boilerplate - resolves to null inline if no HW single-step */
198 user_disable_single_step(child
);
201 long arch_ptrace(struct task_struct
*child
, long request
,
202 unsigned long addr
, unsigned long data
)
204 return ptrace_request(child
, request
, addr
, data
);