1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2021 VMware Inc, Steven Rostedt <rostedt@goodmis.org>
5 #include <linux/spinlock.h>
6 #include <linux/irq_work.h>
7 #include <linux/slab.h>
10 /* See pid_list.h for details */
12 static inline union lower_chunk
*get_lower_chunk(struct trace_pid_list
*pid_list
)
14 union lower_chunk
*chunk
;
16 lockdep_assert_held(&pid_list
->lock
);
18 if (!pid_list
->lower_list
)
21 chunk
= pid_list
->lower_list
;
22 pid_list
->lower_list
= chunk
->next
;
23 pid_list
->free_lower_chunks
--;
24 WARN_ON_ONCE(pid_list
->free_lower_chunks
< 0);
27 * If a refill needs to happen, it can not happen here
28 * as the scheduler run queue locks are held.
30 if (pid_list
->free_lower_chunks
<= CHUNK_REALLOC
)
31 irq_work_queue(&pid_list
->refill_irqwork
);
36 static inline union upper_chunk
*get_upper_chunk(struct trace_pid_list
*pid_list
)
38 union upper_chunk
*chunk
;
40 lockdep_assert_held(&pid_list
->lock
);
42 if (!pid_list
->upper_list
)
45 chunk
= pid_list
->upper_list
;
46 pid_list
->upper_list
= chunk
->next
;
47 pid_list
->free_upper_chunks
--;
48 WARN_ON_ONCE(pid_list
->free_upper_chunks
< 0);
51 * If a refill needs to happen, it can not happen here
52 * as the scheduler run queue locks are held.
54 if (pid_list
->free_upper_chunks
<= CHUNK_REALLOC
)
55 irq_work_queue(&pid_list
->refill_irqwork
);
60 static inline void put_lower_chunk(struct trace_pid_list
*pid_list
,
61 union lower_chunk
*chunk
)
63 lockdep_assert_held(&pid_list
->lock
);
65 chunk
->next
= pid_list
->lower_list
;
66 pid_list
->lower_list
= chunk
;
67 pid_list
->free_lower_chunks
++;
70 static inline void put_upper_chunk(struct trace_pid_list
*pid_list
,
71 union upper_chunk
*chunk
)
73 lockdep_assert_held(&pid_list
->lock
);
75 chunk
->next
= pid_list
->upper_list
;
76 pid_list
->upper_list
= chunk
;
77 pid_list
->free_upper_chunks
++;
80 static inline bool upper_empty(union upper_chunk
*chunk
)
83 * If chunk->data has no lower chunks, it will be the same
84 * as a zeroed bitmask. Use find_first_bit() to test it
85 * and if it doesn't find any bits set, then the array
88 int bit
= find_first_bit((unsigned long *)chunk
->data
,
89 sizeof(chunk
->data
) * 8);
90 return bit
>= sizeof(chunk
->data
) * 8;
93 static inline int pid_split(unsigned int pid
, unsigned int *upper1
,
94 unsigned int *upper2
, unsigned int *lower
)
96 /* MAX_PID should cover all pids */
97 BUILD_BUG_ON(MAX_PID
< PID_MAX_LIMIT
);
99 /* In case a bad pid is passed in, then fail */
100 if (unlikely(pid
>= MAX_PID
))
103 *upper1
= (pid
>> UPPER1_SHIFT
) & UPPER_MASK
;
104 *upper2
= (pid
>> UPPER2_SHIFT
) & UPPER_MASK
;
105 *lower
= pid
& LOWER_MASK
;
110 static inline unsigned int pid_join(unsigned int upper1
,
111 unsigned int upper2
, unsigned int lower
)
113 return ((upper1
& UPPER_MASK
) << UPPER1_SHIFT
) |
114 ((upper2
& UPPER_MASK
) << UPPER2_SHIFT
) |
115 (lower
& LOWER_MASK
);
119 * trace_pid_list_is_set - test if the pid is set in the list
120 * @pid_list: The pid list to test
121 * @pid: The pid to see if set in the list.
123 * Tests if @pid is set in the @pid_list. This is usually called
124 * from the scheduler when a task is scheduled. Its pid is checked
125 * if it should be traced or not.
127 * Return true if the pid is in the list, false otherwise.
129 bool trace_pid_list_is_set(struct trace_pid_list
*pid_list
, unsigned int pid
)
131 union upper_chunk
*upper_chunk
;
132 union lower_chunk
*lower_chunk
;
142 if (pid_split(pid
, &upper1
, &upper2
, &lower
) < 0)
145 raw_spin_lock_irqsave(&pid_list
->lock
, flags
);
146 upper_chunk
= pid_list
->upper
[upper1
];
148 lower_chunk
= upper_chunk
->data
[upper2
];
150 ret
= test_bit(lower
, lower_chunk
->data
);
152 raw_spin_unlock_irqrestore(&pid_list
->lock
, flags
);
158 * trace_pid_list_set - add a pid to the list
159 * @pid_list: The pid list to add the @pid to.
160 * @pid: The pid to add.
162 * Adds @pid to @pid_list. This is usually done explicitly by a user
163 * adding a task to be traced, or indirectly by the fork function
164 * when children should be traced and a task's pid is in the list.
166 * Return 0 on success, negative otherwise.
168 int trace_pid_list_set(struct trace_pid_list
*pid_list
, unsigned int pid
)
170 union upper_chunk
*upper_chunk
;
171 union lower_chunk
*lower_chunk
;
181 if (pid_split(pid
, &upper1
, &upper2
, &lower
) < 0)
184 raw_spin_lock_irqsave(&pid_list
->lock
, flags
);
185 upper_chunk
= pid_list
->upper
[upper1
];
187 upper_chunk
= get_upper_chunk(pid_list
);
192 pid_list
->upper
[upper1
] = upper_chunk
;
194 lower_chunk
= upper_chunk
->data
[upper2
];
196 lower_chunk
= get_lower_chunk(pid_list
);
201 upper_chunk
->data
[upper2
] = lower_chunk
;
203 set_bit(lower
, lower_chunk
->data
);
206 raw_spin_unlock_irqrestore(&pid_list
->lock
, flags
);
211 * trace_pid_list_clear - remove a pid from the list
212 * @pid_list: The pid list to remove the @pid from.
213 * @pid: The pid to remove.
215 * Removes @pid from @pid_list. This is usually done explicitly by a user
216 * removing tasks from tracing, or indirectly by the exit function
217 * when a task that is set to be traced exits.
219 * Return 0 on success, negative otherwise.
221 int trace_pid_list_clear(struct trace_pid_list
*pid_list
, unsigned int pid
)
223 union upper_chunk
*upper_chunk
;
224 union lower_chunk
*lower_chunk
;
233 if (pid_split(pid
, &upper1
, &upper2
, &lower
) < 0)
236 raw_spin_lock_irqsave(&pid_list
->lock
, flags
);
237 upper_chunk
= pid_list
->upper
[upper1
];
241 lower_chunk
= upper_chunk
->data
[upper2
];
245 clear_bit(lower
, lower_chunk
->data
);
247 /* if there's no more bits set, add it to the free list */
248 if (find_first_bit(lower_chunk
->data
, LOWER_MAX
) >= LOWER_MAX
) {
249 put_lower_chunk(pid_list
, lower_chunk
);
250 upper_chunk
->data
[upper2
] = NULL
;
251 if (upper_empty(upper_chunk
)) {
252 put_upper_chunk(pid_list
, upper_chunk
);
253 pid_list
->upper
[upper1
] = NULL
;
257 raw_spin_unlock_irqrestore(&pid_list
->lock
, flags
);
262 * trace_pid_list_next - return the next pid in the list
263 * @pid_list: The pid list to examine.
264 * @pid: The pid to start from
265 * @next: The pointer to place the pid that is set starting from @pid.
267 * Looks for the next consecutive pid that is in @pid_list starting
268 * at the pid specified by @pid. If one is set (including @pid), then
269 * that pid is placed into @next.
271 * Return 0 when a pid is found, -1 if there are no more pids included.
273 int trace_pid_list_next(struct trace_pid_list
*pid_list
, unsigned int pid
,
276 union upper_chunk
*upper_chunk
;
277 union lower_chunk
*lower_chunk
;
286 if (pid_split(pid
, &upper1
, &upper2
, &lower
) < 0)
289 raw_spin_lock_irqsave(&pid_list
->lock
, flags
);
290 for (; upper1
<= UPPER_MASK
; upper1
++, upper2
= 0) {
291 upper_chunk
= pid_list
->upper
[upper1
];
296 for (; upper2
<= UPPER_MASK
; upper2
++, lower
= 0) {
297 lower_chunk
= upper_chunk
->data
[upper2
];
301 lower
= find_next_bit(lower_chunk
->data
, LOWER_MAX
,
303 if (lower
< LOWER_MAX
)
309 raw_spin_unlock_irqrestore(&pid_list
->lock
, flags
);
310 if (upper1
> UPPER_MASK
)
313 *next
= pid_join(upper1
, upper2
, lower
);
318 * trace_pid_list_first - return the first pid in the list
319 * @pid_list: The pid list to examine.
320 * @pid: The pointer to place the pid first found pid that is set.
322 * Looks for the first pid that is set in @pid_list, and places it
323 * into @pid if found.
325 * Return 0 when a pid is found, -1 if there are no pids set.
327 int trace_pid_list_first(struct trace_pid_list
*pid_list
, unsigned int *pid
)
329 return trace_pid_list_next(pid_list
, 0, pid
);
332 static void pid_list_refill_irq(struct irq_work
*iwork
)
334 struct trace_pid_list
*pid_list
= container_of(iwork
, struct trace_pid_list
,
336 union upper_chunk
*upper
= NULL
;
337 union lower_chunk
*lower
= NULL
;
338 union upper_chunk
**upper_next
= &upper
;
339 union lower_chunk
**lower_next
= &lower
;
346 raw_spin_lock(&pid_list
->lock
);
347 upper_count
= CHUNK_ALLOC
- pid_list
->free_upper_chunks
;
348 lower_count
= CHUNK_ALLOC
- pid_list
->free_lower_chunks
;
349 raw_spin_unlock(&pid_list
->lock
);
351 if (upper_count
<= 0 && lower_count
<= 0)
354 while (upper_count
-- > 0) {
355 union upper_chunk
*chunk
;
357 chunk
= kzalloc(sizeof(*chunk
), GFP_NOWAIT
);
361 upper_next
= &chunk
->next
;
365 while (lower_count
-- > 0) {
366 union lower_chunk
*chunk
;
368 chunk
= kzalloc(sizeof(*chunk
), GFP_NOWAIT
);
372 lower_next
= &chunk
->next
;
376 raw_spin_lock(&pid_list
->lock
);
378 *upper_next
= pid_list
->upper_list
;
379 pid_list
->upper_list
= upper
;
380 pid_list
->free_upper_chunks
+= ucnt
;
383 *lower_next
= pid_list
->lower_list
;
384 pid_list
->lower_list
= lower
;
385 pid_list
->free_lower_chunks
+= lcnt
;
387 raw_spin_unlock(&pid_list
->lock
);
390 * On success of allocating all the chunks, both counters
391 * will be less than zero. If they are not, then an allocation
392 * failed, and we should not try again.
394 if (upper_count
>= 0 || lower_count
>= 0)
397 * When the locks were released, free chunks could have
398 * been used and allocation needs to be done again. Might as
399 * well allocate it now.
405 * trace_pid_list_alloc - create a new pid_list
407 * Allocates a new pid_list to store pids into.
409 * Returns the pid_list on success, NULL otherwise.
411 struct trace_pid_list
*trace_pid_list_alloc(void)
413 struct trace_pid_list
*pid_list
;
416 /* According to linux/thread.h, pids can be no bigger that 30 bits */
417 WARN_ON_ONCE(pid_max
> (1 << 30));
419 pid_list
= kzalloc(sizeof(*pid_list
), GFP_KERNEL
);
423 init_irq_work(&pid_list
->refill_irqwork
, pid_list_refill_irq
);
425 raw_spin_lock_init(&pid_list
->lock
);
427 for (i
= 0; i
< CHUNK_ALLOC
; i
++) {
428 union upper_chunk
*chunk
;
430 chunk
= kzalloc(sizeof(*chunk
), GFP_KERNEL
);
433 chunk
->next
= pid_list
->upper_list
;
434 pid_list
->upper_list
= chunk
;
435 pid_list
->free_upper_chunks
++;
438 for (i
= 0; i
< CHUNK_ALLOC
; i
++) {
439 union lower_chunk
*chunk
;
441 chunk
= kzalloc(sizeof(*chunk
), GFP_KERNEL
);
444 chunk
->next
= pid_list
->lower_list
;
445 pid_list
->lower_list
= chunk
;
446 pid_list
->free_lower_chunks
++;
453 * trace_pid_list_free - Frees an allocated pid_list.
454 * @pid_list: The pid list to free.
456 * Frees the memory for a pid_list that was allocated.
458 void trace_pid_list_free(struct trace_pid_list
*pid_list
)
460 union upper_chunk
*upper
;
461 union lower_chunk
*lower
;
467 irq_work_sync(&pid_list
->refill_irqwork
);
469 while (pid_list
->lower_list
) {
470 union lower_chunk
*chunk
;
472 chunk
= pid_list
->lower_list
;
473 pid_list
->lower_list
= pid_list
->lower_list
->next
;
477 while (pid_list
->upper_list
) {
478 union upper_chunk
*chunk
;
480 chunk
= pid_list
->upper_list
;
481 pid_list
->upper_list
= pid_list
->upper_list
->next
;
485 for (i
= 0; i
< UPPER1_SIZE
; i
++) {
486 upper
= pid_list
->upper
[i
];
488 for (j
= 0; j
< UPPER2_SIZE
; j
++) {
489 lower
= upper
->data
[j
];