btrfs: Use right extent length when inserting overlap extent map.
[linux/fpc-iii.git] / kernel / taskstats.c
blob13d2f7cd65dbfd851eaa61cf08645994c9fd0037
1 /*
2 * taskstats.c - Export per-task statistics to userland
4 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
5 * (C) Balbir Singh, IBM Corp. 2006
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
19 #include <linux/kernel.h>
20 #include <linux/taskstats_kern.h>
21 #include <linux/tsacct_kern.h>
22 #include <linux/delayacct.h>
23 #include <linux/cpumask.h>
24 #include <linux/percpu.h>
25 #include <linux/slab.h>
26 #include <linux/cgroupstats.h>
27 #include <linux/cgroup.h>
28 #include <linux/fs.h>
29 #include <linux/file.h>
30 #include <linux/pid_namespace.h>
31 #include <net/genetlink.h>
32 #include <linux/atomic.h>
35 * Maximum length of a cpumask that can be specified in
36 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
38 #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
40 static DEFINE_PER_CPU(__u32, taskstats_seqnum);
41 static int family_registered;
42 struct kmem_cache *taskstats_cache;
44 static struct genl_family family = {
45 .id = GENL_ID_GENERATE,
46 .name = TASKSTATS_GENL_NAME,
47 .version = TASKSTATS_GENL_VERSION,
48 .maxattr = TASKSTATS_CMD_ATTR_MAX,
51 static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
52 [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
53 [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
54 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
55 [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
57 static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
58 [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
61 struct listener {
62 struct list_head list;
63 pid_t pid;
64 char valid;
67 struct listener_list {
68 struct rw_semaphore sem;
69 struct list_head list;
71 static DEFINE_PER_CPU(struct listener_list, listener_array);
73 enum actions {
74 REGISTER,
75 DEREGISTER,
76 CPU_DONT_CARE
79 static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
80 size_t size)
82 struct sk_buff *skb;
83 void *reply;
86 * If new attributes are added, please revisit this allocation
88 skb = genlmsg_new(size, GFP_KERNEL);
89 if (!skb)
90 return -ENOMEM;
92 if (!info) {
93 int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
96 } else
97 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
98 if (reply == NULL) {
99 nlmsg_free(skb);
100 return -EINVAL;
103 *skbp = skb;
104 return 0;
108 * Send taskstats data in @skb to listener with nl_pid @pid
110 static int send_reply(struct sk_buff *skb, struct genl_info *info)
112 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
113 void *reply = genlmsg_data(genlhdr);
114 int rc;
116 rc = genlmsg_end(skb, reply);
117 if (rc < 0) {
118 nlmsg_free(skb);
119 return rc;
122 return genlmsg_reply(skb, info);
126 * Send taskstats data in @skb to listeners registered for @cpu's exit data
128 static void send_cpu_listeners(struct sk_buff *skb,
129 struct listener_list *listeners)
131 struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
132 struct listener *s, *tmp;
133 struct sk_buff *skb_next, *skb_cur = skb;
134 void *reply = genlmsg_data(genlhdr);
135 int rc, delcount = 0;
137 rc = genlmsg_end(skb, reply);
138 if (rc < 0) {
139 nlmsg_free(skb);
140 return;
143 rc = 0;
144 down_read(&listeners->sem);
145 list_for_each_entry(s, &listeners->list, list) {
146 skb_next = NULL;
147 if (!list_is_last(&s->list, &listeners->list)) {
148 skb_next = skb_clone(skb_cur, GFP_KERNEL);
149 if (!skb_next)
150 break;
152 rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
153 if (rc == -ECONNREFUSED) {
154 s->valid = 0;
155 delcount++;
157 skb_cur = skb_next;
159 up_read(&listeners->sem);
161 if (skb_cur)
162 nlmsg_free(skb_cur);
164 if (!delcount)
165 return;
167 /* Delete invalidated entries */
168 down_write(&listeners->sem);
169 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
170 if (!s->valid) {
171 list_del(&s->list);
172 kfree(s);
175 up_write(&listeners->sem);
178 static void fill_stats(struct user_namespace *user_ns,
179 struct pid_namespace *pid_ns,
180 struct task_struct *tsk, struct taskstats *stats)
182 memset(stats, 0, sizeof(*stats));
184 * Each accounting subsystem adds calls to its functions to
185 * fill in relevant parts of struct taskstsats as follows
187 * per-task-foo(stats, tsk);
190 delayacct_add_tsk(stats, tsk);
192 /* fill in basic acct fields */
193 stats->version = TASKSTATS_VERSION;
194 stats->nvcsw = tsk->nvcsw;
195 stats->nivcsw = tsk->nivcsw;
196 bacct_add_tsk(user_ns, pid_ns, stats, tsk);
198 /* fill in extended acct fields */
199 xacct_add_tsk(stats, tsk);
202 static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
204 struct task_struct *tsk;
206 rcu_read_lock();
207 tsk = find_task_by_vpid(pid);
208 if (tsk)
209 get_task_struct(tsk);
210 rcu_read_unlock();
211 if (!tsk)
212 return -ESRCH;
213 fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
214 put_task_struct(tsk);
215 return 0;
218 static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
220 struct task_struct *tsk, *first;
221 unsigned long flags;
222 int rc = -ESRCH;
225 * Add additional stats from live tasks except zombie thread group
226 * leaders who are already counted with the dead tasks
228 rcu_read_lock();
229 first = find_task_by_vpid(tgid);
231 if (!first || !lock_task_sighand(first, &flags))
232 goto out;
234 if (first->signal->stats)
235 memcpy(stats, first->signal->stats, sizeof(*stats));
236 else
237 memset(stats, 0, sizeof(*stats));
239 tsk = first;
240 do {
241 if (tsk->exit_state)
242 continue;
244 * Accounting subsystem can call its functions here to
245 * fill in relevant parts of struct taskstsats as follows
247 * per-task-foo(stats, tsk);
249 delayacct_add_tsk(stats, tsk);
251 stats->nvcsw += tsk->nvcsw;
252 stats->nivcsw += tsk->nivcsw;
253 } while_each_thread(first, tsk);
255 unlock_task_sighand(first, &flags);
256 rc = 0;
257 out:
258 rcu_read_unlock();
260 stats->version = TASKSTATS_VERSION;
262 * Accounting subsystems can also add calls here to modify
263 * fields of taskstats.
265 return rc;
268 static void fill_tgid_exit(struct task_struct *tsk)
270 unsigned long flags;
272 spin_lock_irqsave(&tsk->sighand->siglock, flags);
273 if (!tsk->signal->stats)
274 goto ret;
277 * Each accounting subsystem calls its functions here to
278 * accumalate its per-task stats for tsk, into the per-tgid structure
280 * per-task-foo(tsk->signal->stats, tsk);
282 delayacct_add_tsk(tsk->signal->stats, tsk);
283 ret:
284 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
285 return;
288 static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
290 struct listener_list *listeners;
291 struct listener *s, *tmp, *s2;
292 unsigned int cpu;
293 int ret = 0;
295 if (!cpumask_subset(mask, cpu_possible_mask))
296 return -EINVAL;
298 if (current_user_ns() != &init_user_ns)
299 return -EINVAL;
301 if (task_active_pid_ns(current) != &init_pid_ns)
302 return -EINVAL;
304 if (isadd == REGISTER) {
305 for_each_cpu(cpu, mask) {
306 s = kmalloc_node(sizeof(struct listener),
307 GFP_KERNEL, cpu_to_node(cpu));
308 if (!s) {
309 ret = -ENOMEM;
310 goto cleanup;
312 s->pid = pid;
313 s->valid = 1;
315 listeners = &per_cpu(listener_array, cpu);
316 down_write(&listeners->sem);
317 list_for_each_entry(s2, &listeners->list, list) {
318 if (s2->pid == pid && s2->valid)
319 goto exists;
321 list_add(&s->list, &listeners->list);
322 s = NULL;
323 exists:
324 up_write(&listeners->sem);
325 kfree(s); /* nop if NULL */
327 return 0;
330 /* Deregister or cleanup */
331 cleanup:
332 for_each_cpu(cpu, mask) {
333 listeners = &per_cpu(listener_array, cpu);
334 down_write(&listeners->sem);
335 list_for_each_entry_safe(s, tmp, &listeners->list, list) {
336 if (s->pid == pid) {
337 list_del(&s->list);
338 kfree(s);
339 break;
342 up_write(&listeners->sem);
344 return ret;
347 static int parse(struct nlattr *na, struct cpumask *mask)
349 char *data;
350 int len;
351 int ret;
353 if (na == NULL)
354 return 1;
355 len = nla_len(na);
356 if (len > TASKSTATS_CPUMASK_MAXLEN)
357 return -E2BIG;
358 if (len < 1)
359 return -EINVAL;
360 data = kmalloc(len, GFP_KERNEL);
361 if (!data)
362 return -ENOMEM;
363 nla_strlcpy(data, na, len);
364 ret = cpulist_parse(data, mask);
365 kfree(data);
366 return ret;
369 #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
370 #define TASKSTATS_NEEDS_PADDING 1
371 #endif
373 static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
375 struct nlattr *na, *ret;
376 int aggr;
378 aggr = (type == TASKSTATS_TYPE_PID)
379 ? TASKSTATS_TYPE_AGGR_PID
380 : TASKSTATS_TYPE_AGGR_TGID;
383 * The taskstats structure is internally aligned on 8 byte
384 * boundaries but the layout of the aggregrate reply, with
385 * two NLA headers and the pid (each 4 bytes), actually
386 * force the entire structure to be unaligned. This causes
387 * the kernel to issue unaligned access warnings on some
388 * architectures like ia64. Unfortunately, some software out there
389 * doesn't properly unroll the NLA packet and assumes that the start
390 * of the taskstats structure will always be 20 bytes from the start
391 * of the netlink payload. Aligning the start of the taskstats
392 * structure breaks this software, which we don't want. So, for now
393 * the alignment only happens on architectures that require it
394 * and those users will have to update to fixed versions of those
395 * packages. Space is reserved in the packet only when needed.
396 * This ifdef should be removed in several years e.g. 2012 once
397 * we can be confident that fixed versions are installed on most
398 * systems. We add the padding before the aggregate since the
399 * aggregate is already a defined type.
401 #ifdef TASKSTATS_NEEDS_PADDING
402 if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
403 goto err;
404 #endif
405 na = nla_nest_start(skb, aggr);
406 if (!na)
407 goto err;
409 if (nla_put(skb, type, sizeof(pid), &pid) < 0) {
410 nla_nest_cancel(skb, na);
411 goto err;
413 ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
414 if (!ret) {
415 nla_nest_cancel(skb, na);
416 goto err;
418 nla_nest_end(skb, na);
420 return nla_data(ret);
421 err:
422 return NULL;
425 static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
427 int rc = 0;
428 struct sk_buff *rep_skb;
429 struct cgroupstats *stats;
430 struct nlattr *na;
431 size_t size;
432 u32 fd;
433 struct fd f;
435 na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
436 if (!na)
437 return -EINVAL;
439 fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
440 f = fdget(fd);
441 if (!f.file)
442 return 0;
444 size = nla_total_size(sizeof(struct cgroupstats));
446 rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
447 size);
448 if (rc < 0)
449 goto err;
451 na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
452 sizeof(struct cgroupstats));
453 if (na == NULL) {
454 nlmsg_free(rep_skb);
455 rc = -EMSGSIZE;
456 goto err;
459 stats = nla_data(na);
460 memset(stats, 0, sizeof(*stats));
462 rc = cgroupstats_build(stats, f.file->f_dentry);
463 if (rc < 0) {
464 nlmsg_free(rep_skb);
465 goto err;
468 rc = send_reply(rep_skb, info);
470 err:
471 fdput(f);
472 return rc;
475 static int cmd_attr_register_cpumask(struct genl_info *info)
477 cpumask_var_t mask;
478 int rc;
480 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
481 return -ENOMEM;
482 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
483 if (rc < 0)
484 goto out;
485 rc = add_del_listener(info->snd_portid, mask, REGISTER);
486 out:
487 free_cpumask_var(mask);
488 return rc;
491 static int cmd_attr_deregister_cpumask(struct genl_info *info)
493 cpumask_var_t mask;
494 int rc;
496 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
497 return -ENOMEM;
498 rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
499 if (rc < 0)
500 goto out;
501 rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
502 out:
503 free_cpumask_var(mask);
504 return rc;
507 static size_t taskstats_packet_size(void)
509 size_t size;
511 size = nla_total_size(sizeof(u32)) +
512 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
513 #ifdef TASKSTATS_NEEDS_PADDING
514 size += nla_total_size(0); /* Padding for alignment */
515 #endif
516 return size;
519 static int cmd_attr_pid(struct genl_info *info)
521 struct taskstats *stats;
522 struct sk_buff *rep_skb;
523 size_t size;
524 u32 pid;
525 int rc;
527 size = taskstats_packet_size();
529 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
530 if (rc < 0)
531 return rc;
533 rc = -EINVAL;
534 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
535 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
536 if (!stats)
537 goto err;
539 rc = fill_stats_for_pid(pid, stats);
540 if (rc < 0)
541 goto err;
542 return send_reply(rep_skb, info);
543 err:
544 nlmsg_free(rep_skb);
545 return rc;
548 static int cmd_attr_tgid(struct genl_info *info)
550 struct taskstats *stats;
551 struct sk_buff *rep_skb;
552 size_t size;
553 u32 tgid;
554 int rc;
556 size = taskstats_packet_size();
558 rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
559 if (rc < 0)
560 return rc;
562 rc = -EINVAL;
563 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
564 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
565 if (!stats)
566 goto err;
568 rc = fill_stats_for_tgid(tgid, stats);
569 if (rc < 0)
570 goto err;
571 return send_reply(rep_skb, info);
572 err:
573 nlmsg_free(rep_skb);
574 return rc;
577 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
579 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
580 return cmd_attr_register_cpumask(info);
581 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
582 return cmd_attr_deregister_cpumask(info);
583 else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
584 return cmd_attr_pid(info);
585 else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
586 return cmd_attr_tgid(info);
587 else
588 return -EINVAL;
591 static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
593 struct signal_struct *sig = tsk->signal;
594 struct taskstats *stats;
596 if (sig->stats || thread_group_empty(tsk))
597 goto ret;
599 /* No problem if kmem_cache_zalloc() fails */
600 stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
602 spin_lock_irq(&tsk->sighand->siglock);
603 if (!sig->stats) {
604 sig->stats = stats;
605 stats = NULL;
607 spin_unlock_irq(&tsk->sighand->siglock);
609 if (stats)
610 kmem_cache_free(taskstats_cache, stats);
611 ret:
612 return sig->stats;
615 /* Send pid data out on exit */
616 void taskstats_exit(struct task_struct *tsk, int group_dead)
618 int rc;
619 struct listener_list *listeners;
620 struct taskstats *stats;
621 struct sk_buff *rep_skb;
622 size_t size;
623 int is_thread_group;
625 if (!family_registered)
626 return;
629 * Size includes space for nested attributes
631 size = taskstats_packet_size();
633 is_thread_group = !!taskstats_tgid_alloc(tsk);
634 if (is_thread_group) {
635 /* PID + STATS + TGID + STATS */
636 size = 2 * size;
637 /* fill the tsk->signal->stats structure */
638 fill_tgid_exit(tsk);
641 listeners = __this_cpu_ptr(&listener_array);
642 if (list_empty(&listeners->list))
643 return;
645 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
646 if (rc < 0)
647 return;
649 stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
650 task_pid_nr_ns(tsk, &init_pid_ns));
651 if (!stats)
652 goto err;
654 fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
657 * Doesn't matter if tsk is the leader or the last group member leaving
659 if (!is_thread_group || !group_dead)
660 goto send;
662 stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
663 task_tgid_nr_ns(tsk, &init_pid_ns));
664 if (!stats)
665 goto err;
667 memcpy(stats, tsk->signal->stats, sizeof(*stats));
669 send:
670 send_cpu_listeners(rep_skb, listeners);
671 return;
672 err:
673 nlmsg_free(rep_skb);
676 static const struct genl_ops taskstats_ops[] = {
678 .cmd = TASKSTATS_CMD_GET,
679 .doit = taskstats_user_cmd,
680 .policy = taskstats_cmd_get_policy,
681 .flags = GENL_ADMIN_PERM,
684 .cmd = CGROUPSTATS_CMD_GET,
685 .doit = cgroupstats_user_cmd,
686 .policy = cgroupstats_cmd_get_policy,
690 /* Needed early in initialization */
691 void __init taskstats_init_early(void)
693 unsigned int i;
695 taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
696 for_each_possible_cpu(i) {
697 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
698 init_rwsem(&(per_cpu(listener_array, i).sem));
702 static int __init taskstats_init(void)
704 int rc;
706 rc = genl_register_family_with_ops(&family, taskstats_ops);
707 if (rc)
708 return rc;
710 family_registered = 1;
711 pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
712 return 0;
716 * late initcall ensures initialization of statistics collection
717 * mechanisms precedes initialization of the taskstats interface
719 late_initcall(taskstats_init);