Avoid beyond bounds copy while caching ACL
[zen-stable.git] / arch / hexagon / kernel / ptrace.c
blobbea3f08470fd226c7c5bf7024b906d1feb676468
1 /*
2 * Ptrace support for Hexagon
4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <generated/compile.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/mm.h>
26 #include <linux/smp.h>
27 #include <linux/errno.h>
28 #include <linux/ptrace.h>
29 #include <linux/regset.h>
30 #include <linux/user.h>
32 #include <asm/system.h>
33 #include <asm/user.h>
35 static int genregs_get(struct task_struct *target,
36 const struct user_regset *regset,
37 unsigned int pos, unsigned int count,
38 void *kbuf, void __user *ubuf)
40 int ret;
41 unsigned int dummy;
42 struct pt_regs *regs = task_pt_regs(target);
45 if (!regs)
46 return -EIO;
48 /* The general idea here is that the copyout must happen in
49 * exactly the same order in which the userspace expects these
50 * regs. Now, the sequence in userspace does not match the
51 * sequence in the kernel, so everything past the 32 gprs
52 * happens one at a time.
54 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
55 &regs->r00, 0, 32*sizeof(unsigned long));
57 #define ONEXT(KPT_REG, USR_REG) \
58 if (!ret) \
59 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \
60 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
61 offsetof(struct user_regs_struct, USR_REG) + \
62 sizeof(unsigned long));
64 /* Must be exactly same sequence as struct user_regs_struct */
65 ONEXT(&regs->sa0, sa0);
66 ONEXT(&regs->lc0, lc0);
67 ONEXT(&regs->sa1, sa1);
68 ONEXT(&regs->lc1, lc1);
69 ONEXT(&regs->m0, m0);
70 ONEXT(&regs->m1, m1);
71 ONEXT(&regs->usr, usr);
72 ONEXT(&regs->preds, p3_0);
73 ONEXT(&regs->gp, gp);
74 ONEXT(&regs->ugp, ugp);
75 ONEXT(&pt_elr(regs), pc);
76 dummy = pt_cause(regs);
77 ONEXT(&dummy, cause);
78 ONEXT(&pt_badva(regs), badva);
80 /* Pad the rest with zeros, if needed */
81 if (!ret)
82 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
83 offsetof(struct user_regs_struct, pad1), -1);
84 return ret;
87 static int genregs_set(struct task_struct *target,
88 const struct user_regset *regset,
89 unsigned int pos, unsigned int count,
90 const void *kbuf, const void __user *ubuf)
92 int ret;
93 unsigned long bucket;
94 struct pt_regs *regs = task_pt_regs(target);
96 if (!regs)
97 return -EIO;
99 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
100 &regs->r00, 0, 32*sizeof(unsigned long));
102 #define INEXT(KPT_REG, USR_REG) \
103 if (!ret) \
104 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
105 KPT_REG, offsetof(struct user_regs_struct, USR_REG), \
106 offsetof(struct user_regs_struct, USR_REG) + \
107 sizeof(unsigned long));
109 /* Must be exactly same sequence as struct user_regs_struct */
110 INEXT(&regs->sa0, sa0);
111 INEXT(&regs->lc0, lc0);
112 INEXT(&regs->sa1, sa1);
113 INEXT(&regs->lc1, lc1);
114 INEXT(&regs->m0, m0);
115 INEXT(&regs->m1, m1);
116 INEXT(&regs->usr, usr);
117 INEXT(&regs->preds, p3_0);
118 INEXT(&regs->gp, gp);
119 INEXT(&regs->ugp, ugp);
120 INEXT(&pt_elr(regs), pc);
122 /* CAUSE and BADVA aren't writeable. */
123 INEXT(&bucket, cause);
124 INEXT(&bucket, badva);
126 /* Ignore the rest, if needed */
127 if (!ret)
128 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
129 offsetof(struct user_regs_struct, pad1), -1);
131 if (ret)
132 return ret;
135 * This is special; SP is actually restored by the VM via the
136 * special event record which is set by the special trap.
138 regs->hvmer.vmpsp = regs->r29;
139 return 0;
142 enum hexagon_regset {
143 REGSET_GENERAL,
146 static const struct user_regset hexagon_regsets[] = {
147 [REGSET_GENERAL] = {
148 .core_note_type = NT_PRSTATUS,
149 .n = ELF_NGREG,
150 .size = sizeof(unsigned long),
151 .align = sizeof(unsigned long),
152 .get = genregs_get,
153 .set = genregs_set,
157 static const struct user_regset_view hexagon_user_view = {
158 .name = UTS_MACHINE,
159 .e_machine = ELF_ARCH,
160 .ei_osabi = ELF_OSABI,
161 .regsets = hexagon_regsets,
162 .n = ARRAY_SIZE(hexagon_regsets)
165 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
167 return &hexagon_user_view;
170 void ptrace_disable(struct task_struct *child)
172 /* Boilerplate - resolves to null inline if no HW single-step */
173 user_disable_single_step(child);
176 long arch_ptrace(struct task_struct *child, long request,
177 unsigned long addr, unsigned long data)
179 return ptrace_request(child, request, addr, data);