Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / tools / perf / util / thread_map.c
blob08afc69099538f66172968dc3827fd9b7b40d5c2
1 #include <dirent.h>
2 #include <limits.h>
3 #include <stdbool.h>
4 #include <stdlib.h>
5 #include <stdio.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include "strlist.h"
10 #include <string.h>
11 #include <api/fs/fs.h>
12 #include "asm/bug.h"
13 #include "thread_map.h"
14 #include "util.h"
15 #include "debug.h"
16 #include "event.h"
18 /* Skip "." and ".." directories */
19 static int filter(const struct dirent *dir)
21 if (dir->d_name[0] == '.')
22 return 0;
23 else
24 return 1;
27 static void thread_map__reset(struct thread_map *map, int start, int nr)
29 size_t size = (nr - start) * sizeof(map->map[0]);
31 memset(&map->map[start], 0, size);
34 static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
36 size_t size = sizeof(*map) + sizeof(map->map[0]) * nr;
37 int start = map ? map->nr : 0;
39 map = realloc(map, size);
41 * We only realloc to add more items, let's reset new items.
43 if (map)
44 thread_map__reset(map, start, nr);
46 return map;
49 #define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
51 struct thread_map *thread_map__new_by_pid(pid_t pid)
53 struct thread_map *threads;
54 char name[256];
55 int items;
56 struct dirent **namelist = NULL;
57 int i;
59 sprintf(name, "/proc/%d/task", pid);
60 items = scandir(name, &namelist, filter, NULL);
61 if (items <= 0)
62 return NULL;
64 threads = thread_map__alloc(items);
65 if (threads != NULL) {
66 for (i = 0; i < items; i++)
67 thread_map__set_pid(threads, i, atoi(namelist[i]->d_name));
68 threads->nr = items;
69 atomic_set(&threads->refcnt, 1);
72 for (i=0; i<items; i++)
73 zfree(&namelist[i]);
74 free(namelist);
76 return threads;
79 struct thread_map *thread_map__new_by_tid(pid_t tid)
81 struct thread_map *threads = thread_map__alloc(1);
83 if (threads != NULL) {
84 thread_map__set_pid(threads, 0, tid);
85 threads->nr = 1;
86 atomic_set(&threads->refcnt, 1);
89 return threads;
92 struct thread_map *thread_map__new_by_uid(uid_t uid)
94 DIR *proc;
95 int max_threads = 32, items, i;
96 char path[256];
97 struct dirent dirent, *next, **namelist = NULL;
98 struct thread_map *threads = thread_map__alloc(max_threads);
100 if (threads == NULL)
101 goto out;
103 proc = opendir("/proc");
104 if (proc == NULL)
105 goto out_free_threads;
107 threads->nr = 0;
108 atomic_set(&threads->refcnt, 1);
110 while (!readdir_r(proc, &dirent, &next) && next) {
111 char *end;
112 bool grow = false;
113 struct stat st;
114 pid_t pid = strtol(dirent.d_name, &end, 10);
116 if (*end) /* only interested in proper numerical dirents */
117 continue;
119 snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
121 if (stat(path, &st) != 0)
122 continue;
124 if (st.st_uid != uid)
125 continue;
127 snprintf(path, sizeof(path), "/proc/%d/task", pid);
128 items = scandir(path, &namelist, filter, NULL);
129 if (items <= 0)
130 goto out_free_closedir;
132 while (threads->nr + items >= max_threads) {
133 max_threads *= 2;
134 grow = true;
137 if (grow) {
138 struct thread_map *tmp;
140 tmp = thread_map__realloc(threads, max_threads);
141 if (tmp == NULL)
142 goto out_free_namelist;
144 threads = tmp;
147 for (i = 0; i < items; i++) {
148 thread_map__set_pid(threads, threads->nr + i,
149 atoi(namelist[i]->d_name));
152 for (i = 0; i < items; i++)
153 zfree(&namelist[i]);
154 free(namelist);
156 threads->nr += items;
159 out_closedir:
160 closedir(proc);
161 out:
162 return threads;
164 out_free_threads:
165 free(threads);
166 return NULL;
168 out_free_namelist:
169 for (i = 0; i < items; i++)
170 zfree(&namelist[i]);
171 free(namelist);
173 out_free_closedir:
174 zfree(&threads);
175 goto out_closedir;
178 struct thread_map *thread_map__new(pid_t pid, pid_t tid, uid_t uid)
180 if (pid != -1)
181 return thread_map__new_by_pid(pid);
183 if (tid == -1 && uid != UINT_MAX)
184 return thread_map__new_by_uid(uid);
186 return thread_map__new_by_tid(tid);
189 static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
191 struct thread_map *threads = NULL, *nt;
192 char name[256];
193 int items, total_tasks = 0;
194 struct dirent **namelist = NULL;
195 int i, j = 0;
196 pid_t pid, prev_pid = INT_MAX;
197 char *end_ptr;
198 struct str_node *pos;
199 struct strlist_config slist_config = { .dont_dupstr = true, };
200 struct strlist *slist = strlist__new(pid_str, &slist_config);
202 if (!slist)
203 return NULL;
205 strlist__for_each(pos, slist) {
206 pid = strtol(pos->s, &end_ptr, 10);
208 if (pid == INT_MIN || pid == INT_MAX ||
209 (*end_ptr != '\0' && *end_ptr != ','))
210 goto out_free_threads;
212 if (pid == prev_pid)
213 continue;
215 sprintf(name, "/proc/%d/task", pid);
216 items = scandir(name, &namelist, filter, NULL);
217 if (items <= 0)
218 goto out_free_threads;
220 total_tasks += items;
221 nt = thread_map__realloc(threads, total_tasks);
222 if (nt == NULL)
223 goto out_free_namelist;
225 threads = nt;
227 for (i = 0; i < items; i++) {
228 thread_map__set_pid(threads, j++, atoi(namelist[i]->d_name));
229 zfree(&namelist[i]);
231 threads->nr = total_tasks;
232 free(namelist);
235 out:
236 strlist__delete(slist);
237 if (threads)
238 atomic_set(&threads->refcnt, 1);
239 return threads;
241 out_free_namelist:
242 for (i = 0; i < items; i++)
243 zfree(&namelist[i]);
244 free(namelist);
246 out_free_threads:
247 zfree(&threads);
248 goto out;
251 struct thread_map *thread_map__new_dummy(void)
253 struct thread_map *threads = thread_map__alloc(1);
255 if (threads != NULL) {
256 thread_map__set_pid(threads, 0, -1);
257 threads->nr = 1;
258 atomic_set(&threads->refcnt, 1);
260 return threads;
263 static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
265 struct thread_map *threads = NULL, *nt;
266 int ntasks = 0;
267 pid_t tid, prev_tid = INT_MAX;
268 char *end_ptr;
269 struct str_node *pos;
270 struct strlist_config slist_config = { .dont_dupstr = true, };
271 struct strlist *slist;
273 /* perf-stat expects threads to be generated even if tid not given */
274 if (!tid_str)
275 return thread_map__new_dummy();
277 slist = strlist__new(tid_str, &slist_config);
278 if (!slist)
279 return NULL;
281 strlist__for_each(pos, slist) {
282 tid = strtol(pos->s, &end_ptr, 10);
284 if (tid == INT_MIN || tid == INT_MAX ||
285 (*end_ptr != '\0' && *end_ptr != ','))
286 goto out_free_threads;
288 if (tid == prev_tid)
289 continue;
291 ntasks++;
292 nt = thread_map__realloc(threads, ntasks);
294 if (nt == NULL)
295 goto out_free_threads;
297 threads = nt;
298 thread_map__set_pid(threads, ntasks - 1, tid);
299 threads->nr = ntasks;
301 out:
302 if (threads)
303 atomic_set(&threads->refcnt, 1);
304 return threads;
306 out_free_threads:
307 zfree(&threads);
308 strlist__delete(slist);
309 goto out;
312 struct thread_map *thread_map__new_str(const char *pid, const char *tid,
313 uid_t uid)
315 if (pid)
316 return thread_map__new_by_pid_str(pid);
318 if (!tid && uid != UINT_MAX)
319 return thread_map__new_by_uid(uid);
321 return thread_map__new_by_tid_str(tid);
324 static void thread_map__delete(struct thread_map *threads)
326 if (threads) {
327 int i;
329 WARN_ONCE(atomic_read(&threads->refcnt) != 0,
330 "thread map refcnt unbalanced\n");
331 for (i = 0; i < threads->nr; i++)
332 free(thread_map__comm(threads, i));
333 free(threads);
337 struct thread_map *thread_map__get(struct thread_map *map)
339 if (map)
340 atomic_inc(&map->refcnt);
341 return map;
344 void thread_map__put(struct thread_map *map)
346 if (map && atomic_dec_and_test(&map->refcnt))
347 thread_map__delete(map);
350 size_t thread_map__fprintf(struct thread_map *threads, FILE *fp)
352 int i;
353 size_t printed = fprintf(fp, "%d thread%s: ",
354 threads->nr, threads->nr > 1 ? "s" : "");
355 for (i = 0; i < threads->nr; ++i)
356 printed += fprintf(fp, "%s%d", i ? ", " : "", thread_map__pid(threads, i));
358 return printed + fprintf(fp, "\n");
361 static int get_comm(char **comm, pid_t pid)
363 char *path;
364 size_t size;
365 int err;
367 if (asprintf(&path, "%s/%d/comm", procfs__mountpoint(), pid) == -1)
368 return -ENOMEM;
370 err = filename__read_str(path, comm, &size);
371 if (!err) {
373 * We're reading 16 bytes, while filename__read_str
374 * allocates data per BUFSIZ bytes, so we can safely
375 * mark the end of the string.
377 (*comm)[size] = 0;
378 rtrim(*comm);
381 free(path);
382 return err;
385 static void comm_init(struct thread_map *map, int i)
387 pid_t pid = thread_map__pid(map, i);
388 char *comm = NULL;
390 /* dummy pid comm initialization */
391 if (pid == -1) {
392 map->map[i].comm = strdup("dummy");
393 return;
397 * The comm name is like extra bonus ;-),
398 * so just warn if we fail for any reason.
400 if (get_comm(&comm, pid))
401 pr_warning("Couldn't resolve comm name for pid %d\n", pid);
403 map->map[i].comm = comm;
406 void thread_map__read_comms(struct thread_map *threads)
408 int i;
410 for (i = 0; i < threads->nr; ++i)
411 comm_init(threads, i);
414 static void thread_map__copy_event(struct thread_map *threads,
415 struct thread_map_event *event)
417 unsigned i;
419 threads->nr = (int) event->nr;
421 for (i = 0; i < event->nr; i++) {
422 thread_map__set_pid(threads, i, (pid_t) event->entries[i].pid);
423 threads->map[i].comm = strndup(event->entries[i].comm, 16);
426 atomic_set(&threads->refcnt, 1);
429 struct thread_map *thread_map__new_event(struct thread_map_event *event)
431 struct thread_map *threads;
433 threads = thread_map__alloc(event->nr);
434 if (threads)
435 thread_map__copy_event(threads, event);
437 return threads;