11 #include <api/fs/fs.h>
13 #include "thread_map.h"
18 /* Skip "." and ".." directories */
19 static int filter(const struct dirent
*dir
)
21 if (dir
->d_name
[0] == '.')
27 static void thread_map__reset(struct thread_map
*map
, int start
, int nr
)
29 size_t size
= (nr
- start
) * sizeof(map
->map
[0]);
31 memset(&map
->map
[start
], 0, size
);
34 static struct thread_map
*thread_map__realloc(struct thread_map
*map
, int nr
)
36 size_t size
= sizeof(*map
) + sizeof(map
->map
[0]) * nr
;
37 int start
= map
? map
->nr
: 0;
39 map
= realloc(map
, size
);
41 * We only realloc to add more items, let's reset new items.
44 thread_map__reset(map
, start
, nr
);
49 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
51 struct thread_map
*thread_map__new_by_pid(pid_t pid
)
53 struct thread_map
*threads
;
56 struct dirent
**namelist
= NULL
;
59 sprintf(name
, "/proc/%d/task", pid
);
60 items
= scandir(name
, &namelist
, filter
, NULL
);
64 threads
= thread_map__alloc(items
);
65 if (threads
!= NULL
) {
66 for (i
= 0; i
< items
; i
++)
67 thread_map__set_pid(threads
, i
, atoi(namelist
[i
]->d_name
));
69 atomic_set(&threads
->refcnt
, 1);
72 for (i
=0; i
<items
; i
++)
79 struct thread_map
*thread_map__new_by_tid(pid_t tid
)
81 struct thread_map
*threads
= thread_map__alloc(1);
83 if (threads
!= NULL
) {
84 thread_map__set_pid(threads
, 0, tid
);
86 atomic_set(&threads
->refcnt
, 1);
92 struct thread_map
*thread_map__new_by_uid(uid_t uid
)
95 int max_threads
= 32, items
, i
;
97 struct dirent dirent
, *next
, **namelist
= NULL
;
98 struct thread_map
*threads
= thread_map__alloc(max_threads
);
103 proc
= opendir("/proc");
105 goto out_free_threads
;
108 atomic_set(&threads
->refcnt
, 1);
110 while (!readdir_r(proc
, &dirent
, &next
) && next
) {
114 pid_t pid
= strtol(dirent
.d_name
, &end
, 10);
116 if (*end
) /* only interested in proper numerical dirents */
119 snprintf(path
, sizeof(path
), "/proc/%s", dirent
.d_name
);
121 if (stat(path
, &st
) != 0)
124 if (st
.st_uid
!= uid
)
127 snprintf(path
, sizeof(path
), "/proc/%d/task", pid
);
128 items
= scandir(path
, &namelist
, filter
, NULL
);
130 goto out_free_closedir
;
132 while (threads
->nr
+ items
>= max_threads
) {
138 struct thread_map
*tmp
;
140 tmp
= thread_map__realloc(threads
, max_threads
);
142 goto out_free_namelist
;
147 for (i
= 0; i
< items
; i
++) {
148 thread_map__set_pid(threads
, threads
->nr
+ i
,
149 atoi(namelist
[i
]->d_name
));
152 for (i
= 0; i
< items
; i
++)
156 threads
->nr
+= items
;
169 for (i
= 0; i
< items
; i
++)
178 struct thread_map
*thread_map__new(pid_t pid
, pid_t tid
, uid_t uid
)
181 return thread_map__new_by_pid(pid
);
183 if (tid
== -1 && uid
!= UINT_MAX
)
184 return thread_map__new_by_uid(uid
);
186 return thread_map__new_by_tid(tid
);
189 static struct thread_map
*thread_map__new_by_pid_str(const char *pid_str
)
191 struct thread_map
*threads
= NULL
, *nt
;
193 int items
, total_tasks
= 0;
194 struct dirent
**namelist
= NULL
;
196 pid_t pid
, prev_pid
= INT_MAX
;
198 struct str_node
*pos
;
199 struct strlist_config slist_config
= { .dont_dupstr
= true, };
200 struct strlist
*slist
= strlist__new(pid_str
, &slist_config
);
205 strlist__for_each(pos
, slist
) {
206 pid
= strtol(pos
->s
, &end_ptr
, 10);
208 if (pid
== INT_MIN
|| pid
== INT_MAX
||
209 (*end_ptr
!= '\0' && *end_ptr
!= ','))
210 goto out_free_threads
;
215 sprintf(name
, "/proc/%d/task", pid
);
216 items
= scandir(name
, &namelist
, filter
, NULL
);
218 goto out_free_threads
;
220 total_tasks
+= items
;
221 nt
= thread_map__realloc(threads
, total_tasks
);
223 goto out_free_namelist
;
227 for (i
= 0; i
< items
; i
++) {
228 thread_map__set_pid(threads
, j
++, atoi(namelist
[i
]->d_name
));
231 threads
->nr
= total_tasks
;
236 strlist__delete(slist
);
238 atomic_set(&threads
->refcnt
, 1);
242 for (i
= 0; i
< items
; i
++)
251 struct thread_map
*thread_map__new_dummy(void)
253 struct thread_map
*threads
= thread_map__alloc(1);
255 if (threads
!= NULL
) {
256 thread_map__set_pid(threads
, 0, -1);
258 atomic_set(&threads
->refcnt
, 1);
263 static struct thread_map
*thread_map__new_by_tid_str(const char *tid_str
)
265 struct thread_map
*threads
= NULL
, *nt
;
267 pid_t tid
, prev_tid
= INT_MAX
;
269 struct str_node
*pos
;
270 struct strlist_config slist_config
= { .dont_dupstr
= true, };
271 struct strlist
*slist
;
273 /* perf-stat expects threads to be generated even if tid not given */
275 return thread_map__new_dummy();
277 slist
= strlist__new(tid_str
, &slist_config
);
281 strlist__for_each(pos
, slist
) {
282 tid
= strtol(pos
->s
, &end_ptr
, 10);
284 if (tid
== INT_MIN
|| tid
== INT_MAX
||
285 (*end_ptr
!= '\0' && *end_ptr
!= ','))
286 goto out_free_threads
;
292 nt
= thread_map__realloc(threads
, ntasks
);
295 goto out_free_threads
;
298 thread_map__set_pid(threads
, ntasks
- 1, tid
);
299 threads
->nr
= ntasks
;
303 atomic_set(&threads
->refcnt
, 1);
308 strlist__delete(slist
);
312 struct thread_map
*thread_map__new_str(const char *pid
, const char *tid
,
316 return thread_map__new_by_pid_str(pid
);
318 if (!tid
&& uid
!= UINT_MAX
)
319 return thread_map__new_by_uid(uid
);
321 return thread_map__new_by_tid_str(tid
);
324 static void thread_map__delete(struct thread_map
*threads
)
329 WARN_ONCE(atomic_read(&threads
->refcnt
) != 0,
330 "thread map refcnt unbalanced\n");
331 for (i
= 0; i
< threads
->nr
; i
++)
332 free(thread_map__comm(threads
, i
));
337 struct thread_map
*thread_map__get(struct thread_map
*map
)
340 atomic_inc(&map
->refcnt
);
344 void thread_map__put(struct thread_map
*map
)
346 if (map
&& atomic_dec_and_test(&map
->refcnt
))
347 thread_map__delete(map
);
350 size_t thread_map__fprintf(struct thread_map
*threads
, FILE *fp
)
353 size_t printed
= fprintf(fp
, "%d thread%s: ",
354 threads
->nr
, threads
->nr
> 1 ? "s" : "");
355 for (i
= 0; i
< threads
->nr
; ++i
)
356 printed
+= fprintf(fp
, "%s%d", i
? ", " : "", thread_map__pid(threads
, i
));
358 return printed
+ fprintf(fp
, "\n");
361 static int get_comm(char **comm
, pid_t pid
)
367 if (asprintf(&path
, "%s/%d/comm", procfs__mountpoint(), pid
) == -1)
370 err
= filename__read_str(path
, comm
, &size
);
373 * We're reading 16 bytes, while filename__read_str
374 * allocates data per BUFSIZ bytes, so we can safely
375 * mark the end of the string.
385 static void comm_init(struct thread_map
*map
, int i
)
387 pid_t pid
= thread_map__pid(map
, i
);
390 /* dummy pid comm initialization */
392 map
->map
[i
].comm
= strdup("dummy");
397 * The comm name is like extra bonus ;-),
398 * so just warn if we fail for any reason.
400 if (get_comm(&comm
, pid
))
401 pr_warning("Couldn't resolve comm name for pid %d\n", pid
);
403 map
->map
[i
].comm
= comm
;
406 void thread_map__read_comms(struct thread_map
*threads
)
410 for (i
= 0; i
< threads
->nr
; ++i
)
411 comm_init(threads
, i
);
414 static void thread_map__copy_event(struct thread_map
*threads
,
415 struct thread_map_event
*event
)
419 threads
->nr
= (int) event
->nr
;
421 for (i
= 0; i
< event
->nr
; i
++) {
422 thread_map__set_pid(threads
, i
, (pid_t
) event
->entries
[i
].pid
);
423 threads
->map
[i
].comm
= strndup(event
->entries
[i
].comm
, 16);
426 atomic_set(&threads
->refcnt
, 1);
429 struct thread_map
*thread_map__new_event(struct thread_map_event
*event
)
431 struct thread_map
*threads
;
433 threads
= thread_map__alloc(event
->nr
);
435 thread_map__copy_event(threads
, event
);