timekeeping: Cast raw_interval to u64 to avoid shift overflow
[linux/fpc-iii.git] / security / yama / yama_lsm.c
blobdcd617829fae54bcc0f46d1043f56d4db6bde27c
1 /*
2 * Yama Linux Security Module
4 * Author: Kees Cook <keescook@chromium.org>
6 * Copyright (C) 2010 Canonical, Ltd.
7 * Copyright (C) 2011 The Chromium OS Authors.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2, as
11 * published by the Free Software Foundation.
15 #include <linux/security.h>
16 #include <linux/sysctl.h>
17 #include <linux/ptrace.h>
18 #include <linux/prctl.h>
19 #include <linux/ratelimit.h>
21 #define YAMA_SCOPE_DISABLED 0
22 #define YAMA_SCOPE_RELATIONAL 1
23 #define YAMA_SCOPE_CAPABILITY 2
24 #define YAMA_SCOPE_NO_ATTACH 3
26 static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
28 /* describe a ptrace relationship for potential exception */
29 struct ptrace_relation {
30 struct task_struct *tracer;
31 struct task_struct *tracee;
32 struct list_head node;
35 static LIST_HEAD(ptracer_relations);
36 static DEFINE_SPINLOCK(ptracer_relations_lock);
38 /**
39 * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
40 * @tracer: the task_struct of the process doing the ptrace
41 * @tracee: the task_struct of the process to be ptraced
43 * Each tracee can have, at most, one tracer registered. Each time this
44 * is called, the prior registered tracer will be replaced for the tracee.
46 * Returns 0 if relationship was added, -ve on error.
48 static int yama_ptracer_add(struct task_struct *tracer,
49 struct task_struct *tracee)
51 int rc = 0;
52 struct ptrace_relation *added;
53 struct ptrace_relation *entry, *relation = NULL;
55 added = kmalloc(sizeof(*added), GFP_KERNEL);
56 if (!added)
57 return -ENOMEM;
59 spin_lock_bh(&ptracer_relations_lock);
60 list_for_each_entry(entry, &ptracer_relations, node)
61 if (entry->tracee == tracee) {
62 relation = entry;
63 break;
65 if (!relation) {
66 relation = added;
67 relation->tracee = tracee;
68 list_add(&relation->node, &ptracer_relations);
70 relation->tracer = tracer;
72 spin_unlock_bh(&ptracer_relations_lock);
73 if (added != relation)
74 kfree(added);
76 return rc;
79 /**
80 * yama_ptracer_del - remove exceptions related to the given tasks
81 * @tracer: remove any relation where tracer task matches
82 * @tracee: remove any relation where tracee task matches
84 static void yama_ptracer_del(struct task_struct *tracer,
85 struct task_struct *tracee)
87 struct ptrace_relation *relation, *safe;
89 spin_lock_bh(&ptracer_relations_lock);
90 list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
91 if (relation->tracee == tracee ||
92 (tracer && relation->tracer == tracer)) {
93 list_del(&relation->node);
94 kfree(relation);
96 spin_unlock_bh(&ptracer_relations_lock);
99 /**
100 * yama_task_free - check for task_pid to remove from exception list
101 * @task: task being removed
103 static void yama_task_free(struct task_struct *task)
105 yama_ptracer_del(task, task);
109 * yama_task_prctl - check for Yama-specific prctl operations
110 * @option: operation
111 * @arg2: argument
112 * @arg3: argument
113 * @arg4: argument
114 * @arg5: argument
116 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
117 * does not handle the given option.
119 static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
120 unsigned long arg4, unsigned long arg5)
122 int rc;
123 struct task_struct *myself = current;
125 rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
126 if (rc != -ENOSYS)
127 return rc;
129 switch (option) {
130 case PR_SET_PTRACER:
131 /* Since a thread can call prctl(), find the group leader
132 * before calling _add() or _del() on it, since we want
133 * process-level granularity of control. The tracer group
134 * leader checking is handled later when walking the ancestry
135 * at the time of PTRACE_ATTACH check.
137 rcu_read_lock();
138 if (!thread_group_leader(myself))
139 myself = rcu_dereference(myself->group_leader);
140 get_task_struct(myself);
141 rcu_read_unlock();
143 if (arg2 == 0) {
144 yama_ptracer_del(NULL, myself);
145 rc = 0;
146 } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
147 rc = yama_ptracer_add(NULL, myself);
148 } else {
149 struct task_struct *tracer;
151 rcu_read_lock();
152 tracer = find_task_by_vpid(arg2);
153 if (tracer)
154 get_task_struct(tracer);
155 else
156 rc = -EINVAL;
157 rcu_read_unlock();
159 if (tracer) {
160 rc = yama_ptracer_add(tracer, myself);
161 put_task_struct(tracer);
165 put_task_struct(myself);
166 break;
169 return rc;
173 * task_is_descendant - walk up a process family tree looking for a match
174 * @parent: the process to compare against while walking up from child
175 * @child: the process to start from while looking upwards for parent
177 * Returns 1 if child is a descendant of parent, 0 if not.
179 static int task_is_descendant(struct task_struct *parent,
180 struct task_struct *child)
182 int rc = 0;
183 struct task_struct *walker = child;
185 if (!parent || !child)
186 return 0;
188 rcu_read_lock();
189 if (!thread_group_leader(parent))
190 parent = rcu_dereference(parent->group_leader);
191 while (walker->pid > 0) {
192 if (!thread_group_leader(walker))
193 walker = rcu_dereference(walker->group_leader);
194 if (walker == parent) {
195 rc = 1;
196 break;
198 walker = rcu_dereference(walker->real_parent);
200 rcu_read_unlock();
202 return rc;
206 * ptracer_exception_found - tracer registered as exception for this tracee
207 * @tracer: the task_struct of the process attempting ptrace
208 * @tracee: the task_struct of the process to be ptraced
210 * Returns 1 if tracer has is ptracer exception ancestor for tracee.
212 static int ptracer_exception_found(struct task_struct *tracer,
213 struct task_struct *tracee)
215 int rc = 0;
216 struct ptrace_relation *relation;
217 struct task_struct *parent = NULL;
218 bool found = false;
220 spin_lock_bh(&ptracer_relations_lock);
221 rcu_read_lock();
222 if (!thread_group_leader(tracee))
223 tracee = rcu_dereference(tracee->group_leader);
224 list_for_each_entry(relation, &ptracer_relations, node)
225 if (relation->tracee == tracee) {
226 parent = relation->tracer;
227 found = true;
228 break;
231 if (found && (parent == NULL || task_is_descendant(parent, tracer)))
232 rc = 1;
233 rcu_read_unlock();
234 spin_unlock_bh(&ptracer_relations_lock);
236 return rc;
240 * yama_ptrace_access_check - validate PTRACE_ATTACH calls
241 * @child: task that current task is attempting to ptrace
242 * @mode: ptrace attach mode
244 * Returns 0 if following the ptrace is allowed, -ve on error.
246 static int yama_ptrace_access_check(struct task_struct *child,
247 unsigned int mode)
249 int rc;
251 /* If standard caps disallows it, so does Yama. We should
252 * only tighten restrictions further.
254 rc = cap_ptrace_access_check(child, mode);
255 if (rc)
256 return rc;
258 /* require ptrace target be a child of ptracer on attach */
259 if (mode == PTRACE_MODE_ATTACH) {
260 switch (ptrace_scope) {
261 case YAMA_SCOPE_DISABLED:
262 /* No additional restrictions. */
263 break;
264 case YAMA_SCOPE_RELATIONAL:
265 if (!task_is_descendant(current, child) &&
266 !ptracer_exception_found(current, child) &&
267 !ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
268 rc = -EPERM;
269 break;
270 case YAMA_SCOPE_CAPABILITY:
271 if (!ns_capable(task_user_ns(child), CAP_SYS_PTRACE))
272 rc = -EPERM;
273 break;
274 case YAMA_SCOPE_NO_ATTACH:
275 default:
276 rc = -EPERM;
277 break;
281 if (rc) {
282 printk_ratelimited(KERN_NOTICE
283 "ptrace of pid %d was attempted by: %s (pid %d)\n",
284 child->pid, current->comm, current->pid);
287 return rc;
291 * yama_ptrace_traceme - validate PTRACE_TRACEME calls
292 * @parent: task that will become the ptracer of the current task
294 * Returns 0 if following the ptrace is allowed, -ve on error.
296 static int yama_ptrace_traceme(struct task_struct *parent)
298 int rc;
300 /* If standard caps disallows it, so does Yama. We should
301 * only tighten restrictions further.
303 rc = cap_ptrace_traceme(parent);
304 if (rc)
305 return rc;
307 /* Only disallow PTRACE_TRACEME on more aggressive settings. */
308 switch (ptrace_scope) {
309 case YAMA_SCOPE_CAPABILITY:
310 if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE))
311 rc = -EPERM;
312 break;
313 case YAMA_SCOPE_NO_ATTACH:
314 rc = -EPERM;
315 break;
318 if (rc) {
319 printk_ratelimited(KERN_NOTICE
320 "ptraceme of pid %d was attempted by: %s (pid %d)\n",
321 current->pid, parent->comm, parent->pid);
324 return rc;
327 static struct security_operations yama_ops = {
328 .name = "yama",
330 .ptrace_access_check = yama_ptrace_access_check,
331 .ptrace_traceme = yama_ptrace_traceme,
332 .task_prctl = yama_task_prctl,
333 .task_free = yama_task_free,
336 #ifdef CONFIG_SYSCTL
337 static int yama_dointvec_minmax(struct ctl_table *table, int write,
338 void __user *buffer, size_t *lenp, loff_t *ppos)
340 int rc;
342 if (write && !capable(CAP_SYS_PTRACE))
343 return -EPERM;
345 rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
346 if (rc)
347 return rc;
349 /* Lock the max value if it ever gets set. */
350 if (write && *(int *)table->data == *(int *)table->extra2)
351 table->extra1 = table->extra2;
353 return rc;
356 static int zero;
357 static int max_scope = YAMA_SCOPE_NO_ATTACH;
359 struct ctl_path yama_sysctl_path[] = {
360 { .procname = "kernel", },
361 { .procname = "yama", },
365 static struct ctl_table yama_sysctl_table[] = {
367 .procname = "ptrace_scope",
368 .data = &ptrace_scope,
369 .maxlen = sizeof(int),
370 .mode = 0644,
371 .proc_handler = yama_dointvec_minmax,
372 .extra1 = &zero,
373 .extra2 = &max_scope,
377 #endif /* CONFIG_SYSCTL */
379 static __init int yama_init(void)
381 if (!security_module_enable(&yama_ops))
382 return 0;
384 printk(KERN_INFO "Yama: becoming mindful.\n");
386 if (register_security(&yama_ops))
387 panic("Yama: kernel registration failed.\n");
389 #ifdef CONFIG_SYSCTL
390 if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
391 panic("Yama: sysctl registration failed.\n");
392 #endif
394 return 0;
397 security_initcall(yama_init);