9 static struct thread
*thread__new(pid_t pid
)
11 struct thread
*self
= calloc(1, sizeof(*self
));
15 self
->comm
= malloc(32);
17 snprintf(self
->comm
, 32, ":%d", self
->pid
);
19 INIT_LIST_HEAD(&self
->removed_maps
);
25 int thread__set_comm(struct thread
*self
, const char *comm
)
29 self
->comm
= strdup(comm
);
30 return self
->comm
? 0 : -ENOMEM
;
33 static size_t thread__fprintf(struct thread
*self
, FILE *fp
)
37 size_t ret
= fprintf(fp
, "Thread %d %s\nCurrent maps:\n",
38 self
->pid
, self
->comm
);
40 for (nd
= rb_first(&self
->maps
); nd
; nd
= rb_next(nd
)) {
41 pos
= rb_entry(nd
, struct map
, rb_node
);
42 ret
+= map__fprintf(pos
, fp
);
45 ret
= fprintf(fp
, "Removed maps:\n");
47 list_for_each_entry(pos
, &self
->removed_maps
, node
)
48 ret
+= map__fprintf(pos
, fp
);
54 threads__findnew(pid_t pid
, struct rb_root
*threads
, struct thread
**last_match
)
56 struct rb_node
**p
= &threads
->rb_node
;
57 struct rb_node
*parent
= NULL
;
61 * Font-end cache - PID lookups come in blocks,
62 * so most of the time we dont have to look up
65 if (*last_match
&& (*last_match
)->pid
== pid
)
70 th
= rb_entry(parent
, struct thread
, rb_node
);
83 th
= thread__new(pid
);
85 rb_link_node(&th
->rb_node
, parent
, p
);
86 rb_insert_color(&th
->rb_node
, threads
);
94 register_idle_thread(struct rb_root
*threads
, struct thread
**last_match
)
96 struct thread
*thread
= threads__findnew(0, threads
, last_match
);
98 if (!thread
|| thread__set_comm(thread
, "swapper")) {
99 fprintf(stderr
, "problem inserting idle task.\n");
106 static void thread__remove_overlappings(struct thread
*self
, struct map
*map
)
108 struct rb_node
*next
= rb_first(&self
->maps
);
111 struct map
*pos
= rb_entry(next
, struct map
, rb_node
);
112 next
= rb_next(&pos
->rb_node
);
114 if (!map__overlap(pos
, map
))
118 printf("overlapping maps:\n");
119 map__fprintf(map
, stdout
);
120 map__fprintf(pos
, stdout
);
123 rb_erase(&pos
->rb_node
, &self
->maps
);
125 * We may have references to this map, for instance in some
126 * hist_entry instances, so just move them to a separate
129 list_add_tail(&pos
->node
, &self
->removed_maps
);
133 void maps__insert(struct rb_root
*maps
, struct map
*map
)
135 struct rb_node
**p
= &maps
->rb_node
;
136 struct rb_node
*parent
= NULL
;
137 const u64 ip
= map
->start
;
142 m
= rb_entry(parent
, struct map
, rb_node
);
149 rb_link_node(&map
->rb_node
, parent
, p
);
150 rb_insert_color(&map
->rb_node
, maps
);
153 struct map
*maps__find(struct rb_root
*maps
, u64 ip
)
155 struct rb_node
**p
= &maps
->rb_node
;
156 struct rb_node
*parent
= NULL
;
161 m
= rb_entry(parent
, struct map
, rb_node
);
164 else if (ip
> m
->end
)
173 void thread__insert_map(struct thread
*self
, struct map
*map
)
175 thread__remove_overlappings(self
, map
);
176 maps__insert(&self
->maps
, map
);
179 int thread__fork(struct thread
*self
, struct thread
*parent
)
185 self
->comm
= strdup(parent
->comm
);
189 for (nd
= rb_first(&parent
->maps
); nd
; nd
= rb_next(nd
)) {
190 struct map
*map
= rb_entry(nd
, struct map
, rb_node
);
191 struct map
*new = map__clone(map
);
194 thread__insert_map(self
, new);
200 size_t threads__fprintf(FILE *fp
, struct rb_root
*threads
)
205 for (nd
= rb_first(threads
); nd
; nd
= rb_next(nd
)) {
206 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
208 ret
+= thread__fprintf(pos
, fp
);