Merge tag 'usb-serial-5.4-rc2' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux/fpc-iii.git] / kernel / locking / lock_events_list.h
blob239039d0ce21535a36c32b9281ad8c5b6e6e3bc2
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * Authors: Waiman Long <longman@redhat.com>
16 #ifndef LOCK_EVENT
17 #define LOCK_EVENT(name) LOCKEVENT_ ## name,
18 #endif
20 #ifdef CONFIG_QUEUED_SPINLOCKS
21 #ifdef CONFIG_PARAVIRT_SPINLOCKS
23 * Locking events for PV qspinlock.
25 LOCK_EVENT(pv_hash_hops) /* Average # of hops per hashing operation */
26 LOCK_EVENT(pv_kick_unlock) /* # of vCPU kicks issued at unlock time */
27 LOCK_EVENT(pv_kick_wake) /* # of vCPU kicks for pv_latency_wake */
28 LOCK_EVENT(pv_latency_kick) /* Average latency (ns) of vCPU kick */
29 LOCK_EVENT(pv_latency_wake) /* Average latency (ns) of kick-to-wakeup */
30 LOCK_EVENT(pv_lock_stealing) /* # of lock stealing operations */
31 LOCK_EVENT(pv_spurious_wakeup) /* # of spurious wakeups in non-head vCPUs */
32 LOCK_EVENT(pv_wait_again) /* # of wait's after queue head vCPU kick */
33 LOCK_EVENT(pv_wait_early) /* # of early vCPU wait's */
34 LOCK_EVENT(pv_wait_head) /* # of vCPU wait's at the queue head */
35 LOCK_EVENT(pv_wait_node) /* # of vCPU wait's at non-head queue node */
36 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
39 * Locking events for qspinlock
41 * Subtracting lock_use_node[234] from lock_slowpath will give you
42 * lock_use_node1.
44 LOCK_EVENT(lock_pending) /* # of locking ops via pending code */
45 LOCK_EVENT(lock_slowpath) /* # of locking ops via MCS lock queue */
46 LOCK_EVENT(lock_use_node2) /* # of locking ops that use 2nd percpu node */
47 LOCK_EVENT(lock_use_node3) /* # of locking ops that use 3rd percpu node */
48 LOCK_EVENT(lock_use_node4) /* # of locking ops that use 4th percpu node */
49 LOCK_EVENT(lock_no_node) /* # of locking ops w/o using percpu node */
50 #endif /* CONFIG_QUEUED_SPINLOCKS */
53 * Locking events for rwsem
55 LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */
56 LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */
57 LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */
58 LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */
59 LOCK_EVENT(rwsem_opt_rlock) /* # of opt-acquired read locks */
60 LOCK_EVENT(rwsem_opt_wlock) /* # of opt-acquired write locks */
61 LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */
62 LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */
63 LOCK_EVENT(rwsem_opt_norspin) /* # of disabled reader-only optspins */
64 LOCK_EVENT(rwsem_opt_rlock2) /* # of opt-acquired 2ndary read locks */
65 LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
66 LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
67 LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
68 LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
69 LOCK_EVENT(rwsem_wlock) /* # of write locks acquired */
70 LOCK_EVENT(rwsem_wlock_fail) /* # of failed write lock acquisitions */
71 LOCK_EVENT(rwsem_wlock_handoff) /* # of write lock handoffs */