2 * Yama Linux Security Module
4 * Author: Kees Cook <keescook@chromium.org>
6 * Copyright (C) 2010 Canonical, Ltd.
7 * Copyright (C) 2011 The Chromium OS Authors.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2, as
11 * published by the Free Software Foundation.
15 #include <linux/lsm_hooks.h>
16 #include <linux/sysctl.h>
17 #include <linux/ptrace.h>
18 #include <linux/prctl.h>
19 #include <linux/ratelimit.h>
20 #include <linux/workqueue.h>
21 #include <linux/string_helpers.h>
22 #include <linux/task_work.h>
23 #include <linux/sched.h>
24 #include <linux/spinlock.h>
26 #define YAMA_SCOPE_DISABLED 0
27 #define YAMA_SCOPE_RELATIONAL 1
28 #define YAMA_SCOPE_CAPABILITY 2
29 #define YAMA_SCOPE_NO_ATTACH 3
31 static int ptrace_scope
= YAMA_SCOPE_RELATIONAL
;
33 /* describe a ptrace relationship for potential exception */
34 struct ptrace_relation
{
35 struct task_struct
*tracer
;
36 struct task_struct
*tracee
;
38 struct list_head node
;
42 static LIST_HEAD(ptracer_relations
);
43 static DEFINE_SPINLOCK(ptracer_relations_lock
);
45 static void yama_relation_cleanup(struct work_struct
*work
);
46 static DECLARE_WORK(yama_relation_work
, yama_relation_cleanup
);
48 struct access_report_info
{
49 struct callback_head work
;
51 struct task_struct
*target
;
52 struct task_struct
*agent
;
55 static void __report_access(struct callback_head
*work
)
57 struct access_report_info
*info
=
58 container_of(work
, struct access_report_info
, work
);
59 char *target_cmd
, *agent_cmd
;
61 target_cmd
= kstrdup_quotable_cmdline(info
->target
, GFP_KERNEL
);
62 agent_cmd
= kstrdup_quotable_cmdline(info
->agent
, GFP_KERNEL
);
64 pr_notice_ratelimited(
65 "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
66 info
->access
, target_cmd
, info
->target
->pid
, agent_cmd
,
72 put_task_struct(info
->agent
);
73 put_task_struct(info
->target
);
77 /* defers execution because cmdline access can sleep */
78 static void report_access(const char *access
, struct task_struct
*target
,
79 struct task_struct
*agent
)
81 struct access_report_info
*info
;
82 char agent_comm
[sizeof(agent
->comm
)];
84 assert_spin_locked(&target
->alloc_lock
); /* for target->comm */
86 if (current
->flags
& PF_KTHREAD
) {
87 /* I don't think kthreads call task_work_run() before exiting.
88 * Imagine angry ranting about procfs here.
90 pr_notice_ratelimited(
91 "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
92 access
, target
->comm
, target
->pid
,
93 get_task_comm(agent_comm
, agent
), agent
->pid
);
97 info
= kmalloc(sizeof(*info
), GFP_ATOMIC
);
100 init_task_work(&info
->work
, __report_access
);
101 get_task_struct(target
);
102 get_task_struct(agent
);
103 info
->access
= access
;
104 info
->target
= target
;
106 if (task_work_add(current
, &info
->work
, true) == 0)
107 return; /* success */
109 WARN(1, "report_access called from exiting task");
110 put_task_struct(target
);
111 put_task_struct(agent
);
116 * yama_relation_cleanup - remove invalid entries from the relation list
119 static void yama_relation_cleanup(struct work_struct
*work
)
121 struct ptrace_relation
*relation
;
123 spin_lock(&ptracer_relations_lock
);
125 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
126 if (relation
->invalid
) {
127 list_del_rcu(&relation
->node
);
128 kfree_rcu(relation
, rcu
);
132 spin_unlock(&ptracer_relations_lock
);
136 * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
137 * @tracer: the task_struct of the process doing the ptrace
138 * @tracee: the task_struct of the process to be ptraced
140 * Each tracee can have, at most, one tracer registered. Each time this
141 * is called, the prior registered tracer will be replaced for the tracee.
143 * Returns 0 if relationship was added, -ve on error.
145 static int yama_ptracer_add(struct task_struct
*tracer
,
146 struct task_struct
*tracee
)
148 struct ptrace_relation
*relation
, *added
;
150 added
= kmalloc(sizeof(*added
), GFP_KERNEL
);
154 added
->tracee
= tracee
;
155 added
->tracer
= tracer
;
156 added
->invalid
= false;
158 spin_lock(&ptracer_relations_lock
);
160 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
161 if (relation
->invalid
)
163 if (relation
->tracee
== tracee
) {
164 list_replace_rcu(&relation
->node
, &added
->node
);
165 kfree_rcu(relation
, rcu
);
170 list_add_rcu(&added
->node
, &ptracer_relations
);
174 spin_unlock(&ptracer_relations_lock
);
179 * yama_ptracer_del - remove exceptions related to the given tasks
180 * @tracer: remove any relation where tracer task matches
181 * @tracee: remove any relation where tracee task matches
183 static void yama_ptracer_del(struct task_struct
*tracer
,
184 struct task_struct
*tracee
)
186 struct ptrace_relation
*relation
;
190 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
191 if (relation
->invalid
)
193 if (relation
->tracee
== tracee
||
194 (tracer
&& relation
->tracer
== tracer
)) {
195 relation
->invalid
= true;
202 schedule_work(&yama_relation_work
);
206 * yama_task_free - check for task_pid to remove from exception list
207 * @task: task being removed
209 void yama_task_free(struct task_struct
*task
)
211 yama_ptracer_del(task
, task
);
215 * yama_task_prctl - check for Yama-specific prctl operations
222 * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
223 * does not handle the given option.
225 int yama_task_prctl(int option
, unsigned long arg2
, unsigned long arg3
,
226 unsigned long arg4
, unsigned long arg5
)
229 struct task_struct
*myself
= current
;
233 /* Since a thread can call prctl(), find the group leader
234 * before calling _add() or _del() on it, since we want
235 * process-level granularity of control. The tracer group
236 * leader checking is handled later when walking the ancestry
237 * at the time of PTRACE_ATTACH check.
240 if (!thread_group_leader(myself
))
241 myself
= rcu_dereference(myself
->group_leader
);
242 get_task_struct(myself
);
246 yama_ptracer_del(NULL
, myself
);
248 } else if (arg2
== PR_SET_PTRACER_ANY
|| (int)arg2
== -1) {
249 rc
= yama_ptracer_add(NULL
, myself
);
251 struct task_struct
*tracer
;
253 tracer
= find_get_task_by_vpid(arg2
);
257 rc
= yama_ptracer_add(tracer
, myself
);
258 put_task_struct(tracer
);
262 put_task_struct(myself
);
270 * task_is_descendant - walk up a process family tree looking for a match
271 * @parent: the process to compare against while walking up from child
272 * @child: the process to start from while looking upwards for parent
274 * Returns 1 if child is a descendant of parent, 0 if not.
276 static int task_is_descendant(struct task_struct
*parent
,
277 struct task_struct
*child
)
280 struct task_struct
*walker
= child
;
282 if (!parent
|| !child
)
286 if (!thread_group_leader(parent
))
287 parent
= rcu_dereference(parent
->group_leader
);
288 while (walker
->pid
> 0) {
289 if (!thread_group_leader(walker
))
290 walker
= rcu_dereference(walker
->group_leader
);
291 if (walker
== parent
) {
295 walker
= rcu_dereference(walker
->real_parent
);
303 * ptracer_exception_found - tracer registered as exception for this tracee
304 * @tracer: the task_struct of the process attempting ptrace
305 * @tracee: the task_struct of the process to be ptraced
307 * Returns 1 if tracer has a ptracer exception ancestor for tracee.
309 static int ptracer_exception_found(struct task_struct
*tracer
,
310 struct task_struct
*tracee
)
313 struct ptrace_relation
*relation
;
314 struct task_struct
*parent
= NULL
;
320 * If there's already an active tracing relationship, then make an
321 * exception for the sake of other accesses, like process_vm_rw().
323 parent
= ptrace_parent(tracee
);
324 if (parent
!= NULL
&& same_thread_group(parent
, tracer
)) {
329 /* Look for a PR_SET_PTRACER relationship. */
330 if (!thread_group_leader(tracee
))
331 tracee
= rcu_dereference(tracee
->group_leader
);
332 list_for_each_entry_rcu(relation
, &ptracer_relations
, node
) {
333 if (relation
->invalid
)
335 if (relation
->tracee
== tracee
) {
336 parent
= relation
->tracer
;
342 if (found
&& (parent
== NULL
|| task_is_descendant(parent
, tracer
)))
352 * yama_ptrace_access_check - validate PTRACE_ATTACH calls
353 * @child: task that current task is attempting to ptrace
354 * @mode: ptrace attach mode
356 * Returns 0 if following the ptrace is allowed, -ve on error.
358 static int yama_ptrace_access_check(struct task_struct
*child
,
363 /* require ptrace target be a child of ptracer on attach */
364 if (mode
& PTRACE_MODE_ATTACH
) {
365 switch (ptrace_scope
) {
366 case YAMA_SCOPE_DISABLED
:
367 /* No additional restrictions. */
369 case YAMA_SCOPE_RELATIONAL
:
371 if (!task_is_descendant(current
, child
) &&
372 !ptracer_exception_found(current
, child
) &&
373 !ns_capable(__task_cred(child
)->user_ns
, CAP_SYS_PTRACE
))
377 case YAMA_SCOPE_CAPABILITY
:
379 if (!ns_capable(__task_cred(child
)->user_ns
, CAP_SYS_PTRACE
))
383 case YAMA_SCOPE_NO_ATTACH
:
390 if (rc
&& (mode
& PTRACE_MODE_NOAUDIT
) == 0)
391 report_access("attach", child
, current
);
397 * yama_ptrace_traceme - validate PTRACE_TRACEME calls
398 * @parent: task that will become the ptracer of the current task
400 * Returns 0 if following the ptrace is allowed, -ve on error.
402 int yama_ptrace_traceme(struct task_struct
*parent
)
406 /* Only disallow PTRACE_TRACEME on more aggressive settings. */
407 switch (ptrace_scope
) {
408 case YAMA_SCOPE_CAPABILITY
:
409 if (!has_ns_capability(parent
, current_user_ns(), CAP_SYS_PTRACE
))
412 case YAMA_SCOPE_NO_ATTACH
:
419 report_access("traceme", current
, parent
);
420 task_unlock(current
);
426 static struct security_hook_list yama_hooks
[] __lsm_ro_after_init
= {
427 LSM_HOOK_INIT(ptrace_access_check
, yama_ptrace_access_check
),
428 LSM_HOOK_INIT(ptrace_traceme
, yama_ptrace_traceme
),
429 LSM_HOOK_INIT(task_prctl
, yama_task_prctl
),
430 LSM_HOOK_INIT(task_free
, yama_task_free
),
434 static int yama_dointvec_minmax(struct ctl_table
*table
, int write
,
435 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
437 struct ctl_table table_copy
;
439 if (write
&& !capable(CAP_SYS_PTRACE
))
442 /* Lock the max value if it ever gets set. */
444 if (*(int *)table_copy
.data
== *(int *)table_copy
.extra2
)
445 table_copy
.extra1
= table_copy
.extra2
;
447 return proc_dointvec_minmax(&table_copy
, write
, buffer
, lenp
, ppos
);
451 static int max_scope
= YAMA_SCOPE_NO_ATTACH
;
453 struct ctl_path yama_sysctl_path
[] = {
454 { .procname
= "kernel", },
455 { .procname
= "yama", },
459 static struct ctl_table yama_sysctl_table
[] = {
461 .procname
= "ptrace_scope",
462 .data
= &ptrace_scope
,
463 .maxlen
= sizeof(int),
465 .proc_handler
= yama_dointvec_minmax
,
467 .extra2
= &max_scope
,
471 static void __init
yama_init_sysctl(void)
473 if (!register_sysctl_paths(yama_sysctl_path
, yama_sysctl_table
))
474 panic("Yama: sysctl registration failed.\n");
477 static inline void yama_init_sysctl(void) { }
478 #endif /* CONFIG_SYSCTL */
480 void __init
yama_add_hooks(void)
482 pr_info("Yama: becoming mindful.\n");
483 security_add_hooks(yama_hooks
, ARRAY_SIZE(yama_hooks
), "yama");