2 * cn_proc.c - process events connector
4 * Copyright (C) Matt Helsley, IBM Corp. 2005
5 * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net>
6 * Original copyright notice follows:
7 * Copyright (C) 2005 BULL SA.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/ktime.h>
28 #include <linux/init.h>
29 #include <linux/connector.h>
30 #include <linux/gfp.h>
31 #include <linux/ptrace.h>
32 #include <linux/atomic.h>
33 #include <linux/pid_namespace.h>
35 #include <asm/unaligned.h>
37 #include <linux/cn_proc.h>
39 #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
41 static atomic_t proc_event_num_listeners
= ATOMIC_INIT(0);
42 static struct cb_id cn_proc_event_id
= { CN_IDX_PROC
, CN_VAL_PROC
};
44 /* proc_event_counts is used as the sequence number of the netlink message */
45 static DEFINE_PER_CPU(__u32
, proc_event_counts
) = { 0 };
47 static inline void get_seq(__u32
*ts
, int *cpu
)
50 *ts
= __this_cpu_inc_return(proc_event_counts
) - 1;
51 *cpu
= smp_processor_id();
55 void proc_fork_connector(struct task_struct
*task
)
58 struct proc_event
*ev
;
59 __u8 buffer
[CN_PROC_MSG_SIZE
];
61 struct task_struct
*parent
;
63 if (atomic_read(&proc_event_num_listeners
) < 1)
66 msg
= (struct cn_msg
*)buffer
;
67 ev
= (struct proc_event
*)msg
->data
;
68 get_seq(&msg
->seq
, &ev
->cpu
);
69 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
70 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
71 ev
->what
= PROC_EVENT_FORK
;
73 parent
= rcu_dereference(task
->real_parent
);
74 ev
->event_data
.fork
.parent_pid
= parent
->pid
;
75 ev
->event_data
.fork
.parent_tgid
= parent
->tgid
;
77 ev
->event_data
.fork
.child_pid
= task
->pid
;
78 ev
->event_data
.fork
.child_tgid
= task
->tgid
;
80 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
81 msg
->ack
= 0; /* not used */
82 msg
->len
= sizeof(*ev
);
83 /* If cn_netlink_send() failed, the data is not sent */
84 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
87 void proc_exec_connector(struct task_struct
*task
)
90 struct proc_event
*ev
;
92 __u8 buffer
[CN_PROC_MSG_SIZE
];
94 if (atomic_read(&proc_event_num_listeners
) < 1)
97 msg
= (struct cn_msg
*)buffer
;
98 ev
= (struct proc_event
*)msg
->data
;
99 get_seq(&msg
->seq
, &ev
->cpu
);
100 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
101 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
102 ev
->what
= PROC_EVENT_EXEC
;
103 ev
->event_data
.exec
.process_pid
= task
->pid
;
104 ev
->event_data
.exec
.process_tgid
= task
->tgid
;
106 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
107 msg
->ack
= 0; /* not used */
108 msg
->len
= sizeof(*ev
);
109 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
112 void proc_id_connector(struct task_struct
*task
, int which_id
)
115 struct proc_event
*ev
;
116 __u8 buffer
[CN_PROC_MSG_SIZE
];
118 const struct cred
*cred
;
120 if (atomic_read(&proc_event_num_listeners
) < 1)
123 msg
= (struct cn_msg
*)buffer
;
124 ev
= (struct proc_event
*)msg
->data
;
126 ev
->event_data
.id
.process_pid
= task
->pid
;
127 ev
->event_data
.id
.process_tgid
= task
->tgid
;
129 cred
= __task_cred(task
);
130 if (which_id
== PROC_EVENT_UID
) {
131 ev
->event_data
.id
.r
.ruid
= from_kuid_munged(&init_user_ns
, cred
->uid
);
132 ev
->event_data
.id
.e
.euid
= from_kuid_munged(&init_user_ns
, cred
->euid
);
133 } else if (which_id
== PROC_EVENT_GID
) {
134 ev
->event_data
.id
.r
.rgid
= from_kgid_munged(&init_user_ns
, cred
->gid
);
135 ev
->event_data
.id
.e
.egid
= from_kgid_munged(&init_user_ns
, cred
->egid
);
141 get_seq(&msg
->seq
, &ev
->cpu
);
142 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
143 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
145 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
146 msg
->ack
= 0; /* not used */
147 msg
->len
= sizeof(*ev
);
148 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
151 void proc_sid_connector(struct task_struct
*task
)
154 struct proc_event
*ev
;
156 __u8 buffer
[CN_PROC_MSG_SIZE
];
158 if (atomic_read(&proc_event_num_listeners
) < 1)
161 msg
= (struct cn_msg
*)buffer
;
162 ev
= (struct proc_event
*)msg
->data
;
163 get_seq(&msg
->seq
, &ev
->cpu
);
164 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
165 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
166 ev
->what
= PROC_EVENT_SID
;
167 ev
->event_data
.sid
.process_pid
= task
->pid
;
168 ev
->event_data
.sid
.process_tgid
= task
->tgid
;
170 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
171 msg
->ack
= 0; /* not used */
172 msg
->len
= sizeof(*ev
);
173 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
176 void proc_ptrace_connector(struct task_struct
*task
, int ptrace_id
)
179 struct proc_event
*ev
;
181 __u8 buffer
[CN_PROC_MSG_SIZE
];
183 if (atomic_read(&proc_event_num_listeners
) < 1)
186 msg
= (struct cn_msg
*)buffer
;
187 ev
= (struct proc_event
*)msg
->data
;
188 get_seq(&msg
->seq
, &ev
->cpu
);
189 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
190 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
191 ev
->what
= PROC_EVENT_PTRACE
;
192 ev
->event_data
.ptrace
.process_pid
= task
->pid
;
193 ev
->event_data
.ptrace
.process_tgid
= task
->tgid
;
194 if (ptrace_id
== PTRACE_ATTACH
) {
195 ev
->event_data
.ptrace
.tracer_pid
= current
->pid
;
196 ev
->event_data
.ptrace
.tracer_tgid
= current
->tgid
;
197 } else if (ptrace_id
== PTRACE_DETACH
) {
198 ev
->event_data
.ptrace
.tracer_pid
= 0;
199 ev
->event_data
.ptrace
.tracer_tgid
= 0;
203 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
204 msg
->ack
= 0; /* not used */
205 msg
->len
= sizeof(*ev
);
206 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
209 void proc_comm_connector(struct task_struct
*task
)
212 struct proc_event
*ev
;
214 __u8 buffer
[CN_PROC_MSG_SIZE
];
216 if (atomic_read(&proc_event_num_listeners
) < 1)
219 msg
= (struct cn_msg
*)buffer
;
220 ev
= (struct proc_event
*)msg
->data
;
221 get_seq(&msg
->seq
, &ev
->cpu
);
222 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
223 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
224 ev
->what
= PROC_EVENT_COMM
;
225 ev
->event_data
.comm
.process_pid
= task
->pid
;
226 ev
->event_data
.comm
.process_tgid
= task
->tgid
;
227 get_task_comm(ev
->event_data
.comm
.comm
, task
);
229 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
230 msg
->ack
= 0; /* not used */
231 msg
->len
= sizeof(*ev
);
232 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
235 void proc_exit_connector(struct task_struct
*task
)
238 struct proc_event
*ev
;
239 __u8 buffer
[CN_PROC_MSG_SIZE
];
242 if (atomic_read(&proc_event_num_listeners
) < 1)
245 msg
= (struct cn_msg
*)buffer
;
246 ev
= (struct proc_event
*)msg
->data
;
247 get_seq(&msg
->seq
, &ev
->cpu
);
248 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
249 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
250 ev
->what
= PROC_EVENT_EXIT
;
251 ev
->event_data
.exit
.process_pid
= task
->pid
;
252 ev
->event_data
.exit
.process_tgid
= task
->tgid
;
253 ev
->event_data
.exit
.exit_code
= task
->exit_code
;
254 ev
->event_data
.exit
.exit_signal
= task
->exit_signal
;
256 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
257 msg
->ack
= 0; /* not used */
258 msg
->len
= sizeof(*ev
);
259 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
263 * Send an acknowledgement message to userspace
265 * Use 0 for success, EFOO otherwise.
266 * Note: this is the negative of conventional kernel error
267 * values because it's not being returned via syscall return
270 static void cn_proc_ack(int err
, int rcvd_seq
, int rcvd_ack
)
273 struct proc_event
*ev
;
274 __u8 buffer
[CN_PROC_MSG_SIZE
];
277 if (atomic_read(&proc_event_num_listeners
) < 1)
280 msg
= (struct cn_msg
*)buffer
;
281 ev
= (struct proc_event
*)msg
->data
;
283 ktime_get_ts(&ts
); /* get high res monotonic timestamp */
284 put_unaligned(timespec_to_ns(&ts
), (__u64
*)&ev
->timestamp_ns
);
286 ev
->what
= PROC_EVENT_NONE
;
287 ev
->event_data
.ack
.err
= err
;
288 memcpy(&msg
->id
, &cn_proc_event_id
, sizeof(msg
->id
));
289 msg
->ack
= rcvd_ack
+ 1;
290 msg
->len
= sizeof(*ev
);
291 cn_netlink_send(msg
, CN_IDX_PROC
, GFP_KERNEL
);
296 * @data: message sent from userspace via the connector
298 static void cn_proc_mcast_ctl(struct cn_msg
*msg
,
299 struct netlink_skb_parms
*nsp
)
301 enum proc_cn_mcast_op
*mc_op
= NULL
;
304 if (msg
->len
!= sizeof(*mc_op
))
308 * Events are reported with respect to the initial pid
309 * and user namespaces so ignore requestors from
312 if ((current_user_ns() != &init_user_ns
) ||
313 (task_active_pid_ns(current
) != &init_pid_ns
))
316 mc_op
= (enum proc_cn_mcast_op
*)msg
->data
;
318 case PROC_CN_MCAST_LISTEN
:
319 atomic_inc(&proc_event_num_listeners
);
321 case PROC_CN_MCAST_IGNORE
:
322 atomic_dec(&proc_event_num_listeners
);
328 cn_proc_ack(err
, msg
->seq
, msg
->ack
);
332 * cn_proc_init - initialization entry point
334 * Adds the connector callback to the connector driver.
336 static int __init
cn_proc_init(void)
338 int err
= cn_add_callback(&cn_proc_event_id
,
342 pr_warn("cn_proc failed to register\n");
348 module_init(cn_proc_init
);