OMAP3 SRF: Update OMAP-PM layer
[linux-ginger.git] / tools / perf / util / thread.c
blob45efb5db0d19819f281f75b17541dd9ebb76b8ad
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "thread.h"
6 #include "util.h"
7 #include "debug.h"
9 static struct thread *thread__new(pid_t pid)
11 struct thread *self = calloc(1, sizeof(*self));
13 if (self != NULL) {
14 self->pid = pid;
15 self->comm = malloc(32);
16 if (self->comm)
17 snprintf(self->comm, 32, ":%d", self->pid);
18 INIT_LIST_HEAD(&self->maps);
21 return self;
24 int thread__set_comm(struct thread *self, const char *comm)
26 if (self->comm)
27 free(self->comm);
28 self->comm = strdup(comm);
29 return self->comm ? 0 : -ENOMEM;
32 static size_t thread__fprintf(struct thread *self, FILE *fp)
34 struct map *pos;
35 size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);
37 list_for_each_entry(pos, &self->maps, node)
38 ret += map__fprintf(pos, fp);
40 return ret;
43 struct thread *
44 threads__findnew(pid_t pid, struct rb_root *threads, struct thread **last_match)
46 struct rb_node **p = &threads->rb_node;
47 struct rb_node *parent = NULL;
48 struct thread *th;
51 * Font-end cache - PID lookups come in blocks,
52 * so most of the time we dont have to look up
53 * the full rbtree:
55 if (*last_match && (*last_match)->pid == pid)
56 return *last_match;
58 while (*p != NULL) {
59 parent = *p;
60 th = rb_entry(parent, struct thread, rb_node);
62 if (th->pid == pid) {
63 *last_match = th;
64 return th;
67 if (pid < th->pid)
68 p = &(*p)->rb_left;
69 else
70 p = &(*p)->rb_right;
73 th = thread__new(pid);
74 if (th != NULL) {
75 rb_link_node(&th->rb_node, parent, p);
76 rb_insert_color(&th->rb_node, threads);
77 *last_match = th;
80 return th;
83 struct thread *
84 register_idle_thread(struct rb_root *threads, struct thread **last_match)
86 struct thread *thread = threads__findnew(0, threads, last_match);
88 if (!thread || thread__set_comm(thread, "swapper")) {
89 fprintf(stderr, "problem inserting idle task.\n");
90 exit(-1);
93 return thread;
96 void thread__insert_map(struct thread *self, struct map *map)
98 struct map *pos, *tmp;
100 list_for_each_entry_safe(pos, tmp, &self->maps, node) {
101 if (map__overlap(pos, map)) {
102 if (verbose >= 2) {
103 printf("overlapping maps:\n");
104 map__fprintf(map, stdout);
105 map__fprintf(pos, stdout);
108 if (map->start <= pos->start && map->end > pos->start)
109 pos->start = map->end;
111 if (map->end >= pos->end && map->start < pos->end)
112 pos->end = map->start;
114 if (verbose >= 2) {
115 printf("after collision:\n");
116 map__fprintf(pos, stdout);
119 if (pos->start >= pos->end) {
120 list_del_init(&pos->node);
121 free(pos);
126 list_add_tail(&map->node, &self->maps);
129 int thread__fork(struct thread *self, struct thread *parent)
131 struct map *map;
133 if (self->comm)
134 free(self->comm);
135 self->comm = strdup(parent->comm);
136 if (!self->comm)
137 return -ENOMEM;
139 list_for_each_entry(map, &parent->maps, node) {
140 struct map *new = map__clone(map);
141 if (!new)
142 return -ENOMEM;
143 thread__insert_map(self, new);
146 return 0;
149 struct map *thread__find_map(struct thread *self, u64 ip)
151 struct map *pos;
153 if (self == NULL)
154 return NULL;
156 list_for_each_entry(pos, &self->maps, node)
157 if (ip >= pos->start && ip <= pos->end)
158 return pos;
160 return NULL;
163 size_t threads__fprintf(FILE *fp, struct rb_root *threads)
165 size_t ret = 0;
166 struct rb_node *nd;
168 for (nd = rb_first(threads); nd; nd = rb_next(nd)) {
169 struct thread *pos = rb_entry(nd, struct thread, rb_node);
171 ret += thread__fprintf(pos, fp);
174 return ret;