spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / tools / perf / util / thread.c
blobfb4b7ea6752fd86121a968fa77212727f603d7e9
1 #include "../perf.h"
2 #include <stdlib.h>
3 #include <stdio.h>
4 #include <string.h>
5 #include "session.h"
6 #include "thread.h"
7 #include "util.h"
8 #include "debug.h"
10 static struct thread *thread__new(pid_t pid)
12 struct thread *self = zalloc(sizeof(*self));
14 if (self != NULL) {
15 map_groups__init(&self->mg);
16 self->pid = pid;
17 self->comm = malloc(32);
18 if (self->comm)
19 snprintf(self->comm, 32, ":%d", self->pid);
22 return self;
25 void thread__delete(struct thread *self)
27 map_groups__exit(&self->mg);
28 free(self->comm);
29 free(self);
32 int thread__set_comm(struct thread *self, const char *comm)
34 int err;
36 if (self->comm)
37 free(self->comm);
38 self->comm = strdup(comm);
39 err = self->comm == NULL ? -ENOMEM : 0;
40 if (!err) {
41 self->comm_set = true;
42 map_groups__flush(&self->mg);
44 return err;
47 int thread__comm_len(struct thread *self)
49 if (!self->comm_len) {
50 if (!self->comm)
51 return 0;
52 self->comm_len = strlen(self->comm);
55 return self->comm_len;
58 static size_t thread__fprintf(struct thread *self, FILE *fp)
60 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
61 map_groups__fprintf(&self->mg, verbose, fp);
64 struct thread *machine__findnew_thread(struct machine *self, pid_t pid)
66 struct rb_node **p = &self->threads.rb_node;
67 struct rb_node *parent = NULL;
68 struct thread *th;
71 * Font-end cache - PID lookups come in blocks,
72 * so most of the time we dont have to look up
73 * the full rbtree:
75 if (self->last_match && self->last_match->pid == pid)
76 return self->last_match;
78 while (*p != NULL) {
79 parent = *p;
80 th = rb_entry(parent, struct thread, rb_node);
82 if (th->pid == pid) {
83 self->last_match = th;
84 return th;
87 if (pid < th->pid)
88 p = &(*p)->rb_left;
89 else
90 p = &(*p)->rb_right;
93 th = thread__new(pid);
94 if (th != NULL) {
95 rb_link_node(&th->rb_node, parent, p);
96 rb_insert_color(&th->rb_node, &self->threads);
97 self->last_match = th;
100 return th;
103 void thread__insert_map(struct thread *self, struct map *map)
105 map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
106 map_groups__insert(&self->mg, map);
109 int thread__fork(struct thread *self, struct thread *parent)
111 int i;
113 if (parent->comm_set) {
114 if (self->comm)
115 free(self->comm);
116 self->comm = strdup(parent->comm);
117 if (!self->comm)
118 return -ENOMEM;
119 self->comm_set = true;
122 for (i = 0; i < MAP__NR_TYPES; ++i)
123 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
124 return -ENOMEM;
125 return 0;
128 size_t machine__fprintf(struct machine *machine, FILE *fp)
130 size_t ret = 0;
131 struct rb_node *nd;
133 for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
134 struct thread *pos = rb_entry(nd, struct thread, rb_node);
136 ret += thread__fprintf(pos, fp);
139 return ret;