HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage()
[linux/fpc-iii.git] / fs / proc / array.c
blob6238f45eed02af7882e2f7074b3fdf1cc9dbe84a
1 /*
2 * linux/fs/proc/array.c
4 * Copyright (C) 1992 by Linus Torvalds
5 * based on ideas by Darren Senn
7 * Fixes:
8 * Michael. K. Johnson: stat,statm extensions.
9 * <johnsonm@stolaf.edu>
11 * Pauline Middelink : Made cmdline,envline only break at '\0's, to
12 * make sure SET_PROCTITLE works. Also removed
13 * bad '!' which forced address recalculation for
14 * EVERY character on the current page.
15 * <middelin@polyware.iaf.nl>
17 * Danny ter Haar : added cpuinfo
18 * <dth@cistron.nl>
20 * Alessandro Rubini : profile extension.
21 * <rubini@ipvvis.unipv.it>
23 * Jeff Tranter : added BogoMips field to cpuinfo
24 * <Jeff_Tranter@Mitel.COM>
26 * Bruno Haible : remove 4K limit for the maps file
27 * <haible@ma2s2.mathematik.uni-karlsruhe.de>
29 * Yves Arrouye : remove removal of trailing spaces in get_array.
30 * <Yves.Arrouye@marin.fdn.fr>
32 * Jerome Forissier : added per-CPU time information to /proc/stat
33 * and /proc/<pid>/cpu extension
34 * <forissier@isia.cma.fr>
35 * - Incorporation and non-SMP safe operation
36 * of forissier patch in 2.1.78 by
37 * Hans Marcus <crowbar@concepts.nl>
39 * aeb@cwi.nl : /proc/partitions
42 * Alan Cox : security fixes.
43 * <alan@lxorguk.ukuu.org.uk>
45 * Al Viro : safe handling of mm_struct
47 * Gerhard Wichert : added BIGMEM support
48 * Siemens AG <Gerhard.Wichert@pdb.siemens.de>
50 * Al Viro & Jeff Garzik : moved most of the thing into base.c and
51 * : proc_misc.c. The rest may eventually go into
52 * : base.c too.
55 #include <linux/types.h>
56 #include <linux/errno.h>
57 #include <linux/time.h>
58 #include <linux/kernel.h>
59 #include <linux/kernel_stat.h>
60 #include <linux/tty.h>
61 #include <linux/string.h>
62 #include <linux/mman.h>
63 #include <linux/proc_fs.h>
64 #include <linux/ioport.h>
65 #include <linux/uaccess.h>
66 #include <linux/io.h>
67 #include <linux/mm.h>
68 #include <linux/hugetlb.h>
69 #include <linux/pagemap.h>
70 #include <linux/swap.h>
71 #include <linux/smp.h>
72 #include <linux/signal.h>
73 #include <linux/highmem.h>
74 #include <linux/file.h>
75 #include <linux/fdtable.h>
76 #include <linux/times.h>
77 #include <linux/cpuset.h>
78 #include <linux/rcupdate.h>
79 #include <linux/delayacct.h>
80 #include <linux/seq_file.h>
81 #include <linux/pid_namespace.h>
82 #include <linux/prctl.h>
83 #include <linux/ptrace.h>
84 #include <linux/tracehook.h>
85 #include <linux/string_helpers.h>
86 #include <linux/user_namespace.h>
88 #include <asm/pgtable.h>
89 #include <asm/processor.h>
90 #include "internal.h"
92 static inline void task_name(struct seq_file *m, struct task_struct *p)
94 char *buf;
95 size_t size;
96 char tcomm[sizeof(p->comm)];
97 int ret;
99 get_task_comm(tcomm, p);
101 seq_puts(m, "Name:\t");
103 size = seq_get_buf(m, &buf);
104 ret = string_escape_str(tcomm, buf, size, ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\");
105 seq_commit(m, ret < size ? ret : -1);
107 seq_putc(m, '\n');
111 * The task state array is a strange "bitmap" of
112 * reasons to sleep. Thus "running" is zero, and
113 * you can test for combinations of others with
114 * simple bit tests.
116 static const char * const task_state_array[] = {
117 "R (running)", /* 0 */
118 "S (sleeping)", /* 1 */
119 "D (disk sleep)", /* 2 */
120 "T (stopped)", /* 4 */
121 "t (tracing stop)", /* 8 */
122 "X (dead)", /* 16 */
123 "Z (zombie)", /* 32 */
126 static inline const char *get_task_state(struct task_struct *tsk)
128 unsigned int state = (tsk->state | tsk->exit_state) & TASK_REPORT;
131 * Parked tasks do not run; they sit in __kthread_parkme().
132 * Without this check, we would report them as running, which is
133 * clearly wrong, so we report them as sleeping instead.
135 if (tsk->state == TASK_PARKED)
136 state = TASK_INTERRUPTIBLE;
138 BUILD_BUG_ON(1 + ilog2(TASK_REPORT) != ARRAY_SIZE(task_state_array)-1);
140 return task_state_array[fls(state)];
143 static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
144 struct pid *pid, struct task_struct *p)
146 struct user_namespace *user_ns = seq_user_ns(m);
147 struct group_info *group_info;
148 int g;
149 struct task_struct *tracer;
150 const struct cred *cred;
151 pid_t ppid, tpid = 0, tgid, ngid;
152 unsigned int max_fds = 0;
154 rcu_read_lock();
155 ppid = pid_alive(p) ?
156 task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
158 tracer = ptrace_parent(p);
159 if (tracer)
160 tpid = task_pid_nr_ns(tracer, ns);
162 tgid = task_tgid_nr_ns(p, ns);
163 ngid = task_numa_group_id(p);
164 cred = get_task_cred(p);
166 task_lock(p);
167 if (p->files)
168 max_fds = files_fdtable(p->files)->max_fds;
169 task_unlock(p);
170 rcu_read_unlock();
172 seq_printf(m,
173 "State:\t%s\n"
174 "Tgid:\t%d\n"
175 "Ngid:\t%d\n"
176 "Pid:\t%d\n"
177 "PPid:\t%d\n"
178 "TracerPid:\t%d\n"
179 "Uid:\t%d\t%d\t%d\t%d\n"
180 "Gid:\t%d\t%d\t%d\t%d\n"
181 "FDSize:\t%d\nGroups:\t",
182 get_task_state(p),
183 tgid, ngid, pid_nr_ns(pid, ns), ppid, tpid,
184 from_kuid_munged(user_ns, cred->uid),
185 from_kuid_munged(user_ns, cred->euid),
186 from_kuid_munged(user_ns, cred->suid),
187 from_kuid_munged(user_ns, cred->fsuid),
188 from_kgid_munged(user_ns, cred->gid),
189 from_kgid_munged(user_ns, cred->egid),
190 from_kgid_munged(user_ns, cred->sgid),
191 from_kgid_munged(user_ns, cred->fsgid),
192 max_fds);
194 group_info = cred->group_info;
195 for (g = 0; g < group_info->ngroups; g++)
196 seq_printf(m, "%d ",
197 from_kgid_munged(user_ns, GROUP_AT(group_info, g)));
198 put_cred(cred);
200 #ifdef CONFIG_PID_NS
201 seq_puts(m, "\nNStgid:");
202 for (g = ns->level; g <= pid->level; g++)
203 seq_printf(m, "\t%d",
204 task_tgid_nr_ns(p, pid->numbers[g].ns));
205 seq_puts(m, "\nNSpid:");
206 for (g = ns->level; g <= pid->level; g++)
207 seq_printf(m, "\t%d",
208 task_pid_nr_ns(p, pid->numbers[g].ns));
209 seq_puts(m, "\nNSpgid:");
210 for (g = ns->level; g <= pid->level; g++)
211 seq_printf(m, "\t%d",
212 task_pgrp_nr_ns(p, pid->numbers[g].ns));
213 seq_puts(m, "\nNSsid:");
214 for (g = ns->level; g <= pid->level; g++)
215 seq_printf(m, "\t%d",
216 task_session_nr_ns(p, pid->numbers[g].ns));
217 #endif
218 seq_putc(m, '\n');
221 void render_sigset_t(struct seq_file *m, const char *header,
222 sigset_t *set)
224 int i;
226 seq_puts(m, header);
228 i = _NSIG;
229 do {
230 int x = 0;
232 i -= 4;
233 if (sigismember(set, i+1)) x |= 1;
234 if (sigismember(set, i+2)) x |= 2;
235 if (sigismember(set, i+3)) x |= 4;
236 if (sigismember(set, i+4)) x |= 8;
237 seq_printf(m, "%x", x);
238 } while (i >= 4);
240 seq_putc(m, '\n');
243 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
244 sigset_t *catch)
246 struct k_sigaction *k;
247 int i;
249 k = p->sighand->action;
250 for (i = 1; i <= _NSIG; ++i, ++k) {
251 if (k->sa.sa_handler == SIG_IGN)
252 sigaddset(ign, i);
253 else if (k->sa.sa_handler != SIG_DFL)
254 sigaddset(catch, i);
258 static inline void task_sig(struct seq_file *m, struct task_struct *p)
260 unsigned long flags;
261 sigset_t pending, shpending, blocked, ignored, caught;
262 int num_threads = 0;
263 unsigned long qsize = 0;
264 unsigned long qlim = 0;
266 sigemptyset(&pending);
267 sigemptyset(&shpending);
268 sigemptyset(&blocked);
269 sigemptyset(&ignored);
270 sigemptyset(&caught);
272 if (lock_task_sighand(p, &flags)) {
273 pending = p->pending.signal;
274 shpending = p->signal->shared_pending.signal;
275 blocked = p->blocked;
276 collect_sigign_sigcatch(p, &ignored, &caught);
277 num_threads = get_nr_threads(p);
278 rcu_read_lock(); /* FIXME: is this correct? */
279 qsize = atomic_read(&__task_cred(p)->user->sigpending);
280 rcu_read_unlock();
281 qlim = task_rlimit(p, RLIMIT_SIGPENDING);
282 unlock_task_sighand(p, &flags);
285 seq_printf(m, "Threads:\t%d\n", num_threads);
286 seq_printf(m, "SigQ:\t%lu/%lu\n", qsize, qlim);
288 /* render them all */
289 render_sigset_t(m, "SigPnd:\t", &pending);
290 render_sigset_t(m, "ShdPnd:\t", &shpending);
291 render_sigset_t(m, "SigBlk:\t", &blocked);
292 render_sigset_t(m, "SigIgn:\t", &ignored);
293 render_sigset_t(m, "SigCgt:\t", &caught);
296 static void render_cap_t(struct seq_file *m, const char *header,
297 kernel_cap_t *a)
299 unsigned __capi;
301 seq_puts(m, header);
302 CAP_FOR_EACH_U32(__capi) {
303 seq_printf(m, "%08x",
304 a->cap[CAP_LAST_U32 - __capi]);
306 seq_putc(m, '\n');
309 static inline void task_cap(struct seq_file *m, struct task_struct *p)
311 const struct cred *cred;
312 kernel_cap_t cap_inheritable, cap_permitted, cap_effective,
313 cap_bset, cap_ambient;
315 rcu_read_lock();
316 cred = __task_cred(p);
317 cap_inheritable = cred->cap_inheritable;
318 cap_permitted = cred->cap_permitted;
319 cap_effective = cred->cap_effective;
320 cap_bset = cred->cap_bset;
321 cap_ambient = cred->cap_ambient;
322 rcu_read_unlock();
324 render_cap_t(m, "CapInh:\t", &cap_inheritable);
325 render_cap_t(m, "CapPrm:\t", &cap_permitted);
326 render_cap_t(m, "CapEff:\t", &cap_effective);
327 render_cap_t(m, "CapBnd:\t", &cap_bset);
328 render_cap_t(m, "CapAmb:\t", &cap_ambient);
331 static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
333 #ifdef CONFIG_SECCOMP
334 seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
335 #endif
336 seq_printf(m, "Speculation_Store_Bypass:\t");
337 switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
338 case -EINVAL:
339 seq_printf(m, "unknown");
340 break;
341 case PR_SPEC_NOT_AFFECTED:
342 seq_printf(m, "not vulnerable");
343 break;
344 case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
345 seq_printf(m, "thread force mitigated");
346 break;
347 case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
348 seq_printf(m, "thread mitigated");
349 break;
350 case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
351 seq_printf(m, "thread vulnerable");
352 break;
353 case PR_SPEC_DISABLE:
354 seq_printf(m, "globally mitigated");
355 break;
356 default:
357 seq_printf(m, "vulnerable");
358 break;
360 seq_putc(m, '\n');
363 static inline void task_context_switch_counts(struct seq_file *m,
364 struct task_struct *p)
366 seq_printf(m, "voluntary_ctxt_switches:\t%lu\n"
367 "nonvoluntary_ctxt_switches:\t%lu\n",
368 p->nvcsw,
369 p->nivcsw);
372 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
374 seq_printf(m, "Cpus_allowed:\t%*pb\n",
375 cpumask_pr_args(&task->cpus_allowed));
376 seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
377 cpumask_pr_args(&task->cpus_allowed));
380 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
381 struct pid *pid, struct task_struct *task)
383 struct mm_struct *mm = get_task_mm(task);
385 task_name(m, task);
386 task_state(m, ns, pid, task);
388 if (mm) {
389 task_mem(m, mm);
390 mmput(mm);
392 task_sig(m, task);
393 task_cap(m, task);
394 task_seccomp(m, task);
395 task_cpus_allowed(m, task);
396 cpuset_task_status_allowed(m, task);
397 task_context_switch_counts(m, task);
398 return 0;
401 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
402 struct pid *pid, struct task_struct *task, int whole)
404 unsigned long vsize, eip, esp, wchan = 0;
405 int priority, nice;
406 int tty_pgrp = -1, tty_nr = 0;
407 sigset_t sigign, sigcatch;
408 char state;
409 pid_t ppid = 0, pgid = -1, sid = -1;
410 int num_threads = 0;
411 int permitted;
412 struct mm_struct *mm;
413 unsigned long long start_time;
414 unsigned long cmin_flt = 0, cmaj_flt = 0;
415 unsigned long min_flt = 0, maj_flt = 0;
416 cputime_t cutime, cstime, utime, stime;
417 cputime_t cgtime, gtime;
418 unsigned long rsslim = 0;
419 char tcomm[sizeof(task->comm)];
420 unsigned long flags;
422 state = *get_task_state(task);
423 vsize = eip = esp = 0;
424 permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
425 mm = get_task_mm(task);
426 if (mm) {
427 vsize = task_vsize(mm);
429 * esp and eip are intentionally zeroed out. There is no
430 * non-racy way to read them without freezing the task.
431 * Programs that need reliable values can use ptrace(2).
433 * The only exception is if the task is core dumping because
434 * a program is not able to use ptrace(2) in that case. It is
435 * safe because the task has stopped executing permanently.
437 if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
438 if (try_get_task_stack(task)) {
439 eip = KSTK_EIP(task);
440 esp = KSTK_ESP(task);
441 put_task_stack(task);
446 get_task_comm(tcomm, task);
448 sigemptyset(&sigign);
449 sigemptyset(&sigcatch);
450 cutime = cstime = utime = stime = 0;
451 cgtime = gtime = 0;
453 if (lock_task_sighand(task, &flags)) {
454 struct signal_struct *sig = task->signal;
456 if (sig->tty) {
457 struct pid *pgrp = tty_get_pgrp(sig->tty);
458 tty_pgrp = pid_nr_ns(pgrp, ns);
459 put_pid(pgrp);
460 tty_nr = new_encode_dev(tty_devnum(sig->tty));
463 num_threads = get_nr_threads(task);
464 collect_sigign_sigcatch(task, &sigign, &sigcatch);
466 cmin_flt = sig->cmin_flt;
467 cmaj_flt = sig->cmaj_flt;
468 cutime = sig->cutime;
469 cstime = sig->cstime;
470 cgtime = sig->cgtime;
471 rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
473 /* add up live thread stats at the group level */
474 if (whole) {
475 struct task_struct *t = task;
476 do {
477 min_flt += t->min_flt;
478 maj_flt += t->maj_flt;
479 gtime += task_gtime(t);
480 } while_each_thread(task, t);
482 min_flt += sig->min_flt;
483 maj_flt += sig->maj_flt;
484 thread_group_cputime_adjusted(task, &utime, &stime);
485 gtime += sig->gtime;
488 sid = task_session_nr_ns(task, ns);
489 ppid = task_tgid_nr_ns(task->real_parent, ns);
490 pgid = task_pgrp_nr_ns(task, ns);
492 unlock_task_sighand(task, &flags);
495 if (permitted && (!whole || num_threads < 2))
496 wchan = get_wchan(task);
497 if (!whole) {
498 min_flt = task->min_flt;
499 maj_flt = task->maj_flt;
500 task_cputime_adjusted(task, &utime, &stime);
501 gtime = task_gtime(task);
504 /* scale priority and nice values from timeslices to -20..20 */
505 /* to make it look like a "normal" Unix priority/nice value */
506 priority = task_prio(task);
507 nice = task_nice(task);
509 /* convert nsec -> ticks */
510 start_time = nsec_to_clock_t(task->real_start_time);
512 seq_printf(m, "%d (%s) %c", pid_nr_ns(pid, ns), tcomm, state);
513 seq_put_decimal_ll(m, ' ', ppid);
514 seq_put_decimal_ll(m, ' ', pgid);
515 seq_put_decimal_ll(m, ' ', sid);
516 seq_put_decimal_ll(m, ' ', tty_nr);
517 seq_put_decimal_ll(m, ' ', tty_pgrp);
518 seq_put_decimal_ull(m, ' ', task->flags);
519 seq_put_decimal_ull(m, ' ', min_flt);
520 seq_put_decimal_ull(m, ' ', cmin_flt);
521 seq_put_decimal_ull(m, ' ', maj_flt);
522 seq_put_decimal_ull(m, ' ', cmaj_flt);
523 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(utime));
524 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(stime));
525 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cutime));
526 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cstime));
527 seq_put_decimal_ll(m, ' ', priority);
528 seq_put_decimal_ll(m, ' ', nice);
529 seq_put_decimal_ll(m, ' ', num_threads);
530 seq_put_decimal_ull(m, ' ', 0);
531 seq_put_decimal_ull(m, ' ', start_time);
532 seq_put_decimal_ull(m, ' ', vsize);
533 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
534 seq_put_decimal_ull(m, ' ', rsslim);
535 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
536 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
537 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
538 seq_put_decimal_ull(m, ' ', esp);
539 seq_put_decimal_ull(m, ' ', eip);
540 /* The signal information here is obsolete.
541 * It must be decimal for Linux 2.0 compatibility.
542 * Use /proc/#/status for real-time signals.
544 seq_put_decimal_ull(m, ' ', task->pending.signal.sig[0] & 0x7fffffffUL);
545 seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
546 seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
547 seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
550 * We used to output the absolute kernel address, but that's an
551 * information leak - so instead we show a 0/1 flag here, to signal
552 * to user-space whether there's a wchan field in /proc/PID/wchan.
554 * This works with older implementations of procps as well.
556 if (wchan)
557 seq_puts(m, " 1");
558 else
559 seq_puts(m, " 0");
561 seq_put_decimal_ull(m, ' ', 0);
562 seq_put_decimal_ull(m, ' ', 0);
563 seq_put_decimal_ll(m, ' ', task->exit_signal);
564 seq_put_decimal_ll(m, ' ', task_cpu(task));
565 seq_put_decimal_ull(m, ' ', task->rt_priority);
566 seq_put_decimal_ull(m, ' ', task->policy);
567 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
568 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
569 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
571 if (mm && permitted) {
572 seq_put_decimal_ull(m, ' ', mm->start_data);
573 seq_put_decimal_ull(m, ' ', mm->end_data);
574 seq_put_decimal_ull(m, ' ', mm->start_brk);
575 seq_put_decimal_ull(m, ' ', mm->arg_start);
576 seq_put_decimal_ull(m, ' ', mm->arg_end);
577 seq_put_decimal_ull(m, ' ', mm->env_start);
578 seq_put_decimal_ull(m, ' ', mm->env_end);
579 } else
580 seq_printf(m, " 0 0 0 0 0 0 0");
582 if (permitted)
583 seq_put_decimal_ll(m, ' ', task->exit_code);
584 else
585 seq_put_decimal_ll(m, ' ', 0);
587 seq_putc(m, '\n');
588 if (mm)
589 mmput(mm);
590 return 0;
593 int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
594 struct pid *pid, struct task_struct *task)
596 return do_task_stat(m, ns, pid, task, 0);
599 int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
600 struct pid *pid, struct task_struct *task)
602 return do_task_stat(m, ns, pid, task, 1);
605 int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
606 struct pid *pid, struct task_struct *task)
608 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
609 struct mm_struct *mm = get_task_mm(task);
611 if (mm) {
612 size = task_statm(mm, &shared, &text, &data, &resident);
613 mmput(mm);
616 * For quick read, open code by putting numbers directly
617 * expected format is
618 * seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
619 * size, resident, shared, text, data);
621 seq_put_decimal_ull(m, 0, size);
622 seq_put_decimal_ull(m, ' ', resident);
623 seq_put_decimal_ull(m, ' ', shared);
624 seq_put_decimal_ull(m, ' ', text);
625 seq_put_decimal_ull(m, ' ', 0);
626 seq_put_decimal_ull(m, ' ', data);
627 seq_put_decimal_ull(m, ' ', 0);
628 seq_putc(m, '\n');
630 return 0;
633 #ifdef CONFIG_PROC_CHILDREN
634 static struct pid *
635 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
637 struct task_struct *start, *task;
638 struct pid *pid = NULL;
640 read_lock(&tasklist_lock);
642 start = pid_task(proc_pid(inode), PIDTYPE_PID);
643 if (!start)
644 goto out;
647 * Lets try to continue searching first, this gives
648 * us significant speedup on children-rich processes.
650 if (pid_prev) {
651 task = pid_task(pid_prev, PIDTYPE_PID);
652 if (task && task->real_parent == start &&
653 !(list_empty(&task->sibling))) {
654 if (list_is_last(&task->sibling, &start->children))
655 goto out;
656 task = list_first_entry(&task->sibling,
657 struct task_struct, sibling);
658 pid = get_pid(task_pid(task));
659 goto out;
664 * Slow search case.
666 * We might miss some children here if children
667 * are exited while we were not holding the lock,
668 * but it was never promised to be accurate that
669 * much.
671 * "Just suppose that the parent sleeps, but N children
672 * exit after we printed their tids. Now the slow paths
673 * skips N extra children, we miss N tasks." (c)
675 * So one need to stop or freeze the leader and all
676 * its children to get a precise result.
678 list_for_each_entry(task, &start->children, sibling) {
679 if (pos-- == 0) {
680 pid = get_pid(task_pid(task));
681 break;
685 out:
686 read_unlock(&tasklist_lock);
687 return pid;
690 static int children_seq_show(struct seq_file *seq, void *v)
692 struct inode *inode = seq->private;
693 pid_t pid;
695 pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
696 seq_printf(seq, "%d ", pid);
698 return 0;
701 static void *children_seq_start(struct seq_file *seq, loff_t *pos)
703 return get_children_pid(seq->private, NULL, *pos);
706 static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos)
708 struct pid *pid;
710 pid = get_children_pid(seq->private, v, *pos + 1);
711 put_pid(v);
713 ++*pos;
714 return pid;
717 static void children_seq_stop(struct seq_file *seq, void *v)
719 put_pid(v);
722 static const struct seq_operations children_seq_ops = {
723 .start = children_seq_start,
724 .next = children_seq_next,
725 .stop = children_seq_stop,
726 .show = children_seq_show,
729 static int children_seq_open(struct inode *inode, struct file *file)
731 struct seq_file *m;
732 int ret;
734 ret = seq_open(file, &children_seq_ops);
735 if (ret)
736 return ret;
738 m = file->private_data;
739 m->private = inode;
741 return ret;
744 int children_seq_release(struct inode *inode, struct file *file)
746 seq_release(inode, file);
747 return 0;
750 const struct file_operations proc_tid_children_operations = {
751 .open = children_seq_open,
752 .read = seq_read,
753 .llseek = seq_lseek,
754 .release = children_seq_release,
756 #endif /* CONFIG_PROC_CHILDREN */