1 // SPDX-License-Identifier: GPL-2.0
6 static struct threads_table_entry
*threads__table(struct threads
*threads
, pid_t tid
)
8 /* Cast it to handle tid == -1 */
9 return &threads
->table
[(unsigned int)tid
% THREADS__TABLE_SIZE
];
12 static size_t key_hash(long key
, void *ctx __maybe_unused
)
14 /* The table lookup removes low bit entropy, but this is just ignored here. */
18 static bool key_equal(long key1
, long key2
, void *ctx __maybe_unused
)
23 void threads__init(struct threads
*threads
)
25 for (int i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
26 struct threads_table_entry
*table
= &threads
->table
[i
];
28 hashmap__init(&table
->shard
, key_hash
, key_equal
, NULL
);
29 init_rwsem(&table
->lock
);
30 table
->last_match
= NULL
;
34 void threads__exit(struct threads
*threads
)
36 threads__remove_all_threads(threads
);
37 for (int i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
38 struct threads_table_entry
*table
= &threads
->table
[i
];
40 hashmap__clear(&table
->shard
);
41 exit_rwsem(&table
->lock
);
45 size_t threads__nr(struct threads
*threads
)
49 for (int i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
50 struct threads_table_entry
*table
= &threads
->table
[i
];
52 down_read(&table
->lock
);
53 nr
+= hashmap__size(&table
->shard
);
54 up_read(&table
->lock
);
60 * Front-end cache - TID lookups come in blocks,
61 * so most of the time we dont have to look up
64 static struct thread
*__threads_table_entry__get_last_match(struct threads_table_entry
*table
,
67 struct thread
*th
, *res
= NULL
;
69 th
= table
->last_match
;
71 if (thread__tid(th
) == tid
)
72 res
= thread__get(th
);
77 static void __threads_table_entry__set_last_match(struct threads_table_entry
*table
,
80 thread__put(table
->last_match
);
81 table
->last_match
= thread__get(th
);
84 static void threads_table_entry__set_last_match(struct threads_table_entry
*table
,
87 down_write(&table
->lock
);
88 __threads_table_entry__set_last_match(table
, th
);
89 up_write(&table
->lock
);
92 struct thread
*threads__find(struct threads
*threads
, pid_t tid
)
94 struct threads_table_entry
*table
= threads__table(threads
, tid
);
97 down_read(&table
->lock
);
98 res
= __threads_table_entry__get_last_match(table
, tid
);
100 if (hashmap__find(&table
->shard
, tid
, &res
))
101 res
= thread__get(res
);
103 up_read(&table
->lock
);
105 threads_table_entry__set_last_match(table
, res
);
109 struct thread
*threads__findnew(struct threads
*threads
, pid_t pid
, pid_t tid
, bool *created
)
111 struct threads_table_entry
*table
= threads__table(threads
, tid
);
112 struct thread
*res
= NULL
;
115 down_write(&table
->lock
);
116 res
= thread__new(pid
, tid
);
118 if (hashmap__add(&table
->shard
, tid
, res
)) {
119 /* Add failed. Assume a race so find other entry. */
122 if (hashmap__find(&table
->shard
, tid
, &res
))
123 res
= thread__get(res
);
125 res
= thread__get(res
);
129 __threads_table_entry__set_last_match(table
, res
);
131 up_write(&table
->lock
);
135 void threads__remove_all_threads(struct threads
*threads
)
137 for (int i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
138 struct threads_table_entry
*table
= &threads
->table
[i
];
139 struct hashmap_entry
*cur
, *tmp
;
142 down_write(&table
->lock
);
143 __threads_table_entry__set_last_match(table
, NULL
);
144 hashmap__for_each_entry_safe(&table
->shard
, cur
, tmp
, bkt
) {
145 struct thread
*old_value
;
147 hashmap__delete(&table
->shard
, cur
->key
, /*old_key=*/NULL
, &old_value
);
148 thread__put(old_value
);
150 up_write(&table
->lock
);
154 void threads__remove(struct threads
*threads
, struct thread
*thread
)
156 struct threads_table_entry
*table
= threads__table(threads
, thread__tid(thread
));
157 struct thread
*old_value
;
159 down_write(&table
->lock
);
160 if (table
->last_match
&& RC_CHK_EQUAL(table
->last_match
, thread
))
161 __threads_table_entry__set_last_match(table
, NULL
);
163 hashmap__delete(&table
->shard
, thread__tid(thread
), /*old_key=*/NULL
, &old_value
);
164 thread__put(old_value
);
165 up_write(&table
->lock
);
168 int threads__for_each_thread(struct threads
*threads
,
169 int (*fn
)(struct thread
*thread
, void *data
),
172 for (int i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
173 struct threads_table_entry
*table
= &threads
->table
[i
];
174 struct hashmap_entry
*cur
;
177 down_read(&table
->lock
);
178 hashmap__for_each_entry(&table
->shard
, cur
, bkt
) {
179 int rc
= fn((struct thread
*)cur
->pvalue
, data
);
182 up_read(&table
->lock
);
186 up_read(&table
->lock
);