2 * QEMU Plugin Core code
4 * This is the core code that deals with injecting instrumentation into the code
6 * Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
7 * Copyright (C) 2019, Linaro
9 * License: GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
12 * SPDX-License-Identifier: GPL-2.0-or-later
14 #include "qemu/osdep.h"
15 #include "qemu/error-report.h"
16 #include "qemu/config-file.h"
17 #include "qapi/error.h"
18 #include "qemu/lockable.h"
19 #include "qemu/option.h"
20 #include "qemu/plugin.h"
21 #include "qemu/queue.h"
22 #include "qemu/rcu_queue.h"
23 #include "qemu/xxhash.h"
25 #include "hw/core/cpu.h"
27 #include "exec/exec-all.h"
28 #include "exec/tb-flush.h"
30 #include "tcg/tcg-op.h"
33 struct qemu_plugin_cb
{
34 struct qemu_plugin_ctx
*ctx
;
35 union qemu_plugin_cb_sig f
;
37 QLIST_ENTRY(qemu_plugin_cb
) entry
;
40 struct qemu_plugin_state plugin
;
42 struct qemu_plugin_ctx
*plugin_id_to_ctx_locked(qemu_plugin_id_t id
)
44 struct qemu_plugin_ctx
*ctx
;
45 qemu_plugin_id_t
*id_p
;
47 id_p
= g_hash_table_lookup(plugin
.id_ht
, &id
);
48 ctx
= container_of(id_p
, struct qemu_plugin_ctx
, id
);
50 error_report("plugin: invalid plugin id %" PRIu64
, id
);
56 static void plugin_cpu_update__async(CPUState
*cpu
, run_on_cpu_data data
)
58 bitmap_copy(cpu
->plugin_state
->event_mask
,
59 &data
.host_ulong
, QEMU_PLUGIN_EV_MAX
);
60 tcg_flush_jmp_cache(cpu
);
63 static void plugin_cpu_update__locked(gpointer k
, gpointer v
, gpointer udata
)
65 CPUState
*cpu
= container_of(k
, CPUState
, cpu_index
);
66 run_on_cpu_data mask
= RUN_ON_CPU_HOST_ULONG(*plugin
.mask
);
68 async_run_on_cpu(cpu
, plugin_cpu_update__async
, mask
);
71 void plugin_unregister_cb__locked(struct qemu_plugin_ctx
*ctx
,
72 enum qemu_plugin_event ev
)
74 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
79 QLIST_REMOVE_RCU(cb
, entry
);
81 ctx
->callbacks
[ev
] = NULL
;
82 if (QLIST_EMPTY_RCU(&plugin
.cb_lists
[ev
])) {
83 clear_bit(ev
, plugin
.mask
);
84 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
, NULL
);
90 * The callback function has been loaded from an external library so we do not
91 * have type information
94 static void plugin_vcpu_cb__simple(CPUState
*cpu
, enum qemu_plugin_event ev
)
96 struct qemu_plugin_cb
*cb
, *next
;
99 case QEMU_PLUGIN_EV_VCPU_INIT
:
100 case QEMU_PLUGIN_EV_VCPU_EXIT
:
101 case QEMU_PLUGIN_EV_VCPU_IDLE
:
102 case QEMU_PLUGIN_EV_VCPU_RESUME
:
103 /* iterate safely; plugins might uninstall themselves at any time */
104 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
105 qemu_plugin_vcpu_simple_cb_t func
= cb
->f
.vcpu_simple
;
107 func(cb
->ctx
->id
, cpu
->cpu_index
);
111 g_assert_not_reached();
116 * Disable CFI checks.
117 * The callback function has been loaded from an external library so we do not
118 * have type information
121 static void plugin_cb__simple(enum qemu_plugin_event ev
)
123 struct qemu_plugin_cb
*cb
, *next
;
126 case QEMU_PLUGIN_EV_FLUSH
:
127 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
128 qemu_plugin_simple_cb_t func
= cb
->f
.simple
;
134 g_assert_not_reached();
139 * Disable CFI checks.
140 * The callback function has been loaded from an external library so we do not
141 * have type information
144 static void plugin_cb__udata(enum qemu_plugin_event ev
)
146 struct qemu_plugin_cb
*cb
, *next
;
149 case QEMU_PLUGIN_EV_ATEXIT
:
150 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
151 qemu_plugin_udata_cb_t func
= cb
->f
.udata
;
153 func(cb
->ctx
->id
, cb
->udata
);
157 g_assert_not_reached();
162 do_plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
163 void *func
, void *udata
)
165 struct qemu_plugin_ctx
*ctx
;
167 QEMU_LOCK_GUARD(&plugin
.lock
);
168 ctx
= plugin_id_to_ctx_locked(id
);
169 /* if the plugin is on its way out, ignore this request */
170 if (unlikely(ctx
->uninstalling
)) {
174 struct qemu_plugin_cb
*cb
= ctx
->callbacks
[ev
];
177 cb
->f
.generic
= func
;
180 cb
= g_new(struct qemu_plugin_cb
, 1);
182 cb
->f
.generic
= func
;
184 ctx
->callbacks
[ev
] = cb
;
185 QLIST_INSERT_HEAD_RCU(&plugin
.cb_lists
[ev
], cb
, entry
);
186 if (!test_bit(ev
, plugin
.mask
)) {
187 set_bit(ev
, plugin
.mask
);
188 g_hash_table_foreach(plugin
.cpu_ht
, plugin_cpu_update__locked
,
193 plugin_unregister_cb__locked(ctx
, ev
);
197 void plugin_register_cb(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
200 do_plugin_register_cb(id
, ev
, func
, NULL
);
204 plugin_register_cb_udata(qemu_plugin_id_t id
, enum qemu_plugin_event ev
,
205 void *func
, void *udata
)
207 do_plugin_register_cb(id
, ev
, func
, udata
);
210 CPUPluginState
*qemu_plugin_create_vcpu_state(void)
212 return g_new0(CPUPluginState
, 1);
215 static void plugin_grow_scoreboards__locked(CPUState
*cpu
)
217 size_t scoreboard_size
= plugin
.scoreboard_alloc_size
;
218 bool need_realloc
= false;
220 if (cpu
->cpu_index
< scoreboard_size
) {
224 while (cpu
->cpu_index
>= scoreboard_size
) {
225 scoreboard_size
*= 2;
233 if (QLIST_EMPTY(&plugin
.scoreboards
)) {
234 /* just update size for future scoreboards */
235 plugin
.scoreboard_alloc_size
= scoreboard_size
;
240 * A scoreboard creation/deletion might be in progress. If a new vcpu is
241 * initialized at the same time, we are safe, as the new
242 * plugin.scoreboard_alloc_size was not yet written.
244 qemu_rec_mutex_unlock(&plugin
.lock
);
246 /* cpus must be stopped, as tb might still use an existing scoreboard. */
248 /* re-acquire lock */
249 qemu_rec_mutex_lock(&plugin
.lock
);
250 /* in case another vcpu is created between unlock and exclusive section. */
251 if (scoreboard_size
> plugin
.scoreboard_alloc_size
) {
252 struct qemu_plugin_scoreboard
*score
;
253 QLIST_FOREACH(score
, &plugin
.scoreboards
, entry
) {
254 g_array_set_size(score
->data
, scoreboard_size
);
256 plugin
.scoreboard_alloc_size
= scoreboard_size
;
257 /* force all tb to be flushed, as scoreboard pointers were changed. */
263 static void qemu_plugin_vcpu_init__async(CPUState
*cpu
, run_on_cpu_data unused
)
267 assert(cpu
->cpu_index
!= UNASSIGNED_CPU_INDEX
);
268 qemu_rec_mutex_lock(&plugin
.lock
);
269 plugin
.num_vcpus
= MAX(plugin
.num_vcpus
, cpu
->cpu_index
+ 1);
270 plugin_cpu_update__locked(&cpu
->cpu_index
, NULL
, NULL
);
271 success
= g_hash_table_insert(plugin
.cpu_ht
, &cpu
->cpu_index
,
274 plugin_grow_scoreboards__locked(cpu
);
275 qemu_rec_mutex_unlock(&plugin
.lock
);
277 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_INIT
);
280 void qemu_plugin_vcpu_init_hook(CPUState
*cpu
)
282 /* Plugin initialization must wait until the cpu start executing code */
283 async_run_on_cpu(cpu
, qemu_plugin_vcpu_init__async
, RUN_ON_CPU_NULL
);
286 void qemu_plugin_vcpu_exit_hook(CPUState
*cpu
)
290 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_EXIT
);
292 assert(cpu
->cpu_index
!= UNASSIGNED_CPU_INDEX
);
293 qemu_rec_mutex_lock(&plugin
.lock
);
294 success
= g_hash_table_remove(plugin
.cpu_ht
, &cpu
->cpu_index
);
296 qemu_rec_mutex_unlock(&plugin
.lock
);
299 struct plugin_for_each_args
{
300 struct qemu_plugin_ctx
*ctx
;
301 qemu_plugin_vcpu_simple_cb_t cb
;
304 static void plugin_vcpu_for_each(gpointer k
, gpointer v
, gpointer udata
)
306 struct plugin_for_each_args
*args
= udata
;
307 int cpu_index
= *(int *)k
;
309 args
->cb(args
->ctx
->id
, cpu_index
);
312 void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id
,
313 qemu_plugin_vcpu_simple_cb_t cb
)
315 struct plugin_for_each_args args
;
320 qemu_rec_mutex_lock(&plugin
.lock
);
321 args
.ctx
= plugin_id_to_ctx_locked(id
);
323 g_hash_table_foreach(plugin
.cpu_ht
, plugin_vcpu_for_each
, &args
);
324 qemu_rec_mutex_unlock(&plugin
.lock
);
327 /* Allocate and return a callback record */
328 static struct qemu_plugin_dyn_cb
*plugin_get_dyn_cb(GArray
**arr
)
333 cbs
= g_array_sized_new(false, true,
334 sizeof(struct qemu_plugin_dyn_cb
), 1);
338 g_array_set_size(cbs
, cbs
->len
+ 1);
339 return &g_array_index(cbs
, struct qemu_plugin_dyn_cb
, cbs
->len
- 1);
342 static enum plugin_dyn_cb_type
op_to_cb_type(enum qemu_plugin_op op
)
345 case QEMU_PLUGIN_INLINE_ADD_U64
:
346 return PLUGIN_CB_INLINE_ADD_U64
;
347 case QEMU_PLUGIN_INLINE_STORE_U64
:
348 return PLUGIN_CB_INLINE_STORE_U64
;
350 g_assert_not_reached();
354 void plugin_register_inline_op_on_entry(GArray
**arr
,
355 enum qemu_plugin_mem_rw rw
,
356 enum qemu_plugin_op op
,
357 qemu_plugin_u64 entry
,
360 struct qemu_plugin_dyn_cb
*dyn_cb
;
362 struct qemu_plugin_inline_cb inline_cb
= { .rw
= rw
,
365 dyn_cb
= plugin_get_dyn_cb(arr
);
366 dyn_cb
->type
= op_to_cb_type(op
);
367 dyn_cb
->inline_insn
= inline_cb
;
370 void plugin_register_dyn_cb__udata(GArray
**arr
,
371 qemu_plugin_vcpu_udata_cb_t cb
,
372 enum qemu_plugin_cb_flags flags
,
375 static TCGHelperInfo info
[3] = {
376 [QEMU_PLUGIN_CB_NO_REGS
].flags
= TCG_CALL_NO_RWG
,
377 [QEMU_PLUGIN_CB_R_REGS
].flags
= TCG_CALL_NO_WG
,
379 * Match qemu_plugin_vcpu_udata_cb_t:
380 * void (*)(uint32_t, void *)
382 [0 ... 2].typemask
= (dh_typemask(void, 0) |
383 dh_typemask(i32
, 1) |
386 assert((unsigned)flags
< ARRAY_SIZE(info
));
388 struct qemu_plugin_dyn_cb
*dyn_cb
= plugin_get_dyn_cb(arr
);
389 struct qemu_plugin_regular_cb regular_cb
= { .f
.vcpu_udata
= cb
,
391 .info
= &info
[flags
] };
392 dyn_cb
->type
= PLUGIN_CB_REGULAR
;
393 dyn_cb
->regular
= regular_cb
;
396 void plugin_register_dyn_cond_cb__udata(GArray
**arr
,
397 qemu_plugin_vcpu_udata_cb_t cb
,
398 enum qemu_plugin_cb_flags flags
,
399 enum qemu_plugin_cond cond
,
400 qemu_plugin_u64 entry
,
404 static TCGHelperInfo info
[3] = {
405 [QEMU_PLUGIN_CB_NO_REGS
].flags
= TCG_CALL_NO_RWG
,
406 [QEMU_PLUGIN_CB_R_REGS
].flags
= TCG_CALL_NO_WG
,
408 * Match qemu_plugin_vcpu_udata_cb_t:
409 * void (*)(uint32_t, void *)
411 [0 ... 2].typemask
= (dh_typemask(void, 0) |
412 dh_typemask(i32
, 1) |
415 assert((unsigned)flags
< ARRAY_SIZE(info
));
417 struct qemu_plugin_dyn_cb
*dyn_cb
= plugin_get_dyn_cb(arr
);
418 struct qemu_plugin_conditional_cb cond_cb
= { .userp
= udata
,
423 .info
= &info
[flags
] };
424 dyn_cb
->type
= PLUGIN_CB_COND
;
425 dyn_cb
->cond
= cond_cb
;
428 void plugin_register_vcpu_mem_cb(GArray
**arr
,
430 enum qemu_plugin_cb_flags flags
,
431 enum qemu_plugin_mem_rw rw
,
435 * Expect that the underlying type for enum qemu_plugin_meminfo_t
436 * is either int32_t or uint32_t, aka int or unsigned int.
439 !__builtin_types_compatible_p(qemu_plugin_meminfo_t
, uint32_t) &&
440 !__builtin_types_compatible_p(qemu_plugin_meminfo_t
, int32_t));
442 static TCGHelperInfo info
[3] = {
443 [QEMU_PLUGIN_CB_NO_REGS
].flags
= TCG_CALL_NO_RWG
,
444 [QEMU_PLUGIN_CB_R_REGS
].flags
= TCG_CALL_NO_WG
,
446 * Match qemu_plugin_vcpu_mem_cb_t:
447 * void (*)(uint32_t, qemu_plugin_meminfo_t, uint64_t, void *)
450 (dh_typemask(void, 0) |
451 dh_typemask(i32
, 1) |
452 (__builtin_types_compatible_p(qemu_plugin_meminfo_t
, uint32_t)
453 ? dh_typemask(i32
, 2) : dh_typemask(s32
, 2)) |
454 dh_typemask(i64
, 3) |
457 assert((unsigned)flags
< ARRAY_SIZE(info
));
459 struct qemu_plugin_dyn_cb
*dyn_cb
= plugin_get_dyn_cb(arr
);
460 struct qemu_plugin_regular_cb regular_cb
= { .userp
= udata
,
463 .info
= &info
[flags
] };
464 dyn_cb
->type
= PLUGIN_CB_MEM_REGULAR
;
465 dyn_cb
->regular
= regular_cb
;
469 * Disable CFI checks.
470 * The callback function has been loaded from an external library so we do not
471 * have type information
474 void qemu_plugin_tb_trans_cb(CPUState
*cpu
, struct qemu_plugin_tb
*tb
)
476 struct qemu_plugin_cb
*cb
, *next
;
477 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_TB_TRANS
;
479 /* no plugin_state->event_mask check here; caller should have checked */
481 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
482 qemu_plugin_vcpu_tb_trans_cb_t func
= cb
->f
.vcpu_tb_trans
;
484 func(cb
->ctx
->id
, tb
);
489 * Disable CFI checks.
490 * The callback function has been loaded from an external library so we do not
491 * have type information
495 qemu_plugin_vcpu_syscall(CPUState
*cpu
, int64_t num
, uint64_t a1
, uint64_t a2
,
496 uint64_t a3
, uint64_t a4
, uint64_t a5
,
497 uint64_t a6
, uint64_t a7
, uint64_t a8
)
499 struct qemu_plugin_cb
*cb
, *next
;
500 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL
;
502 if (!test_bit(ev
, cpu
->plugin_state
->event_mask
)) {
506 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
507 qemu_plugin_vcpu_syscall_cb_t func
= cb
->f
.vcpu_syscall
;
509 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, a1
, a2
, a3
, a4
, a5
, a6
, a7
, a8
);
514 * Disable CFI checks.
515 * The callback function has been loaded from an external library so we do not
516 * have type information
519 void qemu_plugin_vcpu_syscall_ret(CPUState
*cpu
, int64_t num
, int64_t ret
)
521 struct qemu_plugin_cb
*cb
, *next
;
522 enum qemu_plugin_event ev
= QEMU_PLUGIN_EV_VCPU_SYSCALL_RET
;
524 if (!test_bit(ev
, cpu
->plugin_state
->event_mask
)) {
528 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
529 qemu_plugin_vcpu_syscall_ret_cb_t func
= cb
->f
.vcpu_syscall_ret
;
531 func(cb
->ctx
->id
, cpu
->cpu_index
, num
, ret
);
535 void qemu_plugin_vcpu_idle_cb(CPUState
*cpu
)
537 /* idle and resume cb may be called before init, ignore in this case */
538 if (cpu
->cpu_index
< plugin
.num_vcpus
) {
539 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_IDLE
);
543 void qemu_plugin_vcpu_resume_cb(CPUState
*cpu
)
545 if (cpu
->cpu_index
< plugin
.num_vcpus
) {
546 plugin_vcpu_cb__simple(cpu
, QEMU_PLUGIN_EV_VCPU_RESUME
);
550 void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id
,
551 qemu_plugin_vcpu_simple_cb_t cb
)
553 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_IDLE
, cb
);
556 void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id
,
557 qemu_plugin_vcpu_simple_cb_t cb
)
559 plugin_register_cb(id
, QEMU_PLUGIN_EV_VCPU_RESUME
, cb
);
562 void qemu_plugin_register_flush_cb(qemu_plugin_id_t id
,
563 qemu_plugin_simple_cb_t cb
)
565 plugin_register_cb(id
, QEMU_PLUGIN_EV_FLUSH
, cb
);
568 static bool free_dyn_cb_arr(void *p
, uint32_t h
, void *userp
)
570 g_array_free((GArray
*) p
, true);
574 void qemu_plugin_flush_cb(void)
576 qht_iter_remove(&plugin
.dyn_cb_arr_ht
, free_dyn_cb_arr
, NULL
);
577 qht_reset(&plugin
.dyn_cb_arr_ht
);
579 plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH
);
582 void exec_inline_op(enum plugin_dyn_cb_type type
,
583 struct qemu_plugin_inline_cb
*cb
,
586 char *ptr
= cb
->entry
.score
->data
->data
;
587 size_t elem_size
= g_array_get_element_size(
588 cb
->entry
.score
->data
);
589 size_t offset
= cb
->entry
.offset
;
590 uint64_t *val
= (uint64_t *)(ptr
+ offset
+ cpu_index
* elem_size
);
593 case PLUGIN_CB_INLINE_ADD_U64
:
596 case PLUGIN_CB_INLINE_STORE_U64
:
600 g_assert_not_reached();
604 void qemu_plugin_vcpu_mem_cb(CPUState
*cpu
, uint64_t vaddr
,
607 MemOpIdx oi
, enum qemu_plugin_mem_rw rw
)
609 GArray
*arr
= cpu
->neg
.plugin_mem_cbs
;
616 cpu
->neg
.plugin_mem_value_low
= value_low
;
617 cpu
->neg
.plugin_mem_value_high
= value_high
;
619 for (i
= 0; i
< arr
->len
; i
++) {
620 struct qemu_plugin_dyn_cb
*cb
=
621 &g_array_index(arr
, struct qemu_plugin_dyn_cb
, i
);
624 case PLUGIN_CB_MEM_REGULAR
:
625 if (rw
& cb
->regular
.rw
) {
626 cb
->regular
.f
.vcpu_mem(cpu
->cpu_index
,
627 make_plugin_meminfo(oi
, rw
),
628 vaddr
, cb
->regular
.userp
);
631 case PLUGIN_CB_INLINE_ADD_U64
:
632 case PLUGIN_CB_INLINE_STORE_U64
:
633 if (rw
& cb
->inline_insn
.rw
) {
634 exec_inline_op(cb
->type
, &cb
->inline_insn
, cpu
->cpu_index
);
638 g_assert_not_reached();
643 void qemu_plugin_atexit_cb(void)
645 plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT
);
648 void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id
,
649 qemu_plugin_udata_cb_t cb
,
652 plugin_register_cb_udata(id
, QEMU_PLUGIN_EV_ATEXIT
, cb
, udata
);
656 * Handle exit from linux-user. Unlike the normal atexit() mechanism
657 * we need to handle the clean-up manually as it's possible threads
658 * are still running. We need to remove all callbacks from code
659 * generation, flush the current translations and then we can safely
660 * trigger the exit callbacks.
663 void qemu_plugin_user_exit(void)
665 enum qemu_plugin_event ev
;
669 * Locking order: we must acquire locks in an order that is consistent
670 * with the one in fork_start(). That is:
671 * - start_exclusive(), which acquires qemu_cpu_list_lock,
672 * must be called before acquiring plugin.lock.
673 * - tb_flush(), which acquires mmap_lock(), must be called
674 * while plugin.lock is not held.
678 qemu_rec_mutex_lock(&plugin
.lock
);
679 /* un-register all callbacks except the final AT_EXIT one */
680 for (ev
= 0; ev
< QEMU_PLUGIN_EV_MAX
; ev
++) {
681 if (ev
!= QEMU_PLUGIN_EV_ATEXIT
) {
682 struct qemu_plugin_cb
*cb
, *next
;
684 QLIST_FOREACH_SAFE_RCU(cb
, &plugin
.cb_lists
[ev
], entry
, next
) {
685 plugin_unregister_cb__locked(cb
->ctx
, ev
);
690 qemu_plugin_disable_mem_helpers(cpu
);
692 qemu_rec_mutex_unlock(&plugin
.lock
);
694 tb_flush(current_cpu
);
697 /* now it's safe to handle the exit case */
698 qemu_plugin_atexit_cb();
702 * Helpers for *-user to ensure locks are sane across fork() events.
705 void qemu_plugin_user_prefork_lock(void)
707 qemu_rec_mutex_lock(&plugin
.lock
);
710 void qemu_plugin_user_postfork(bool is_child
)
713 /* should we just reset via plugin_init? */
714 qemu_rec_mutex_init(&plugin
.lock
);
716 qemu_rec_mutex_unlock(&plugin
.lock
);
720 static bool plugin_dyn_cb_arr_cmp(const void *ap
, const void *bp
)
725 static void __attribute__((__constructor__
)) plugin_init(void)
729 for (i
= 0; i
< QEMU_PLUGIN_EV_MAX
; i
++) {
730 QLIST_INIT(&plugin
.cb_lists
[i
]);
732 qemu_rec_mutex_init(&plugin
.lock
);
733 plugin
.id_ht
= g_hash_table_new(g_int64_hash
, g_int64_equal
);
734 plugin
.cpu_ht
= g_hash_table_new(g_int_hash
, g_int_equal
);
735 QLIST_INIT(&plugin
.scoreboards
);
736 plugin
.scoreboard_alloc_size
= 16; /* avoid frequent reallocation */
737 QTAILQ_INIT(&plugin
.ctxs
);
738 qht_init(&plugin
.dyn_cb_arr_ht
, plugin_dyn_cb_arr_cmp
, 16,
739 QHT_MODE_AUTO_RESIZE
);
740 atexit(qemu_plugin_atexit_cb
);
743 int plugin_num_vcpus(void)
745 return plugin
.num_vcpus
;
748 struct qemu_plugin_scoreboard
*plugin_scoreboard_new(size_t element_size
)
750 struct qemu_plugin_scoreboard
*score
=
751 g_malloc0(sizeof(struct qemu_plugin_scoreboard
));
752 score
->data
= g_array_new(FALSE
, TRUE
, element_size
);
753 g_array_set_size(score
->data
, plugin
.scoreboard_alloc_size
);
755 qemu_rec_mutex_lock(&plugin
.lock
);
756 QLIST_INSERT_HEAD(&plugin
.scoreboards
, score
, entry
);
757 qemu_rec_mutex_unlock(&plugin
.lock
);
762 void plugin_scoreboard_free(struct qemu_plugin_scoreboard
*score
)
764 qemu_rec_mutex_lock(&plugin
.lock
);
765 QLIST_REMOVE(score
, entry
);
766 qemu_rec_mutex_unlock(&plugin
.lock
);
768 g_array_free(score
->data
, TRUE
);