PM: sleep: core: Switch back to async_schedule_dev()
[linux/fpc-iii.git] / kernel / locking / lock_events.h
blob8c7e7d25f09cf2e3a3521528c3d9572f4ab4a5a4
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * Authors: Waiman Long <longman@redhat.com>
16 #ifndef __LOCKING_LOCK_EVENTS_H
17 #define __LOCKING_LOCK_EVENTS_H
19 enum lock_events {
21 #include "lock_events_list.h"
23 lockevent_num, /* Total number of lock event counts */
24 LOCKEVENT_reset_cnts = lockevent_num,
27 #ifdef CONFIG_LOCK_EVENT_COUNTS
29 * Per-cpu counters
31 DECLARE_PER_CPU(unsigned long, lockevents[lockevent_num]);
34 * Increment the statistical counters. use raw_cpu_inc() because of lower
35 * overhead and we don't care if we loose the occasional update.
37 static inline void __lockevent_inc(enum lock_events event, bool cond)
39 if (cond)
40 raw_cpu_inc(lockevents[event]);
43 #define lockevent_inc(ev) __lockevent_inc(LOCKEVENT_ ##ev, true)
44 #define lockevent_cond_inc(ev, c) __lockevent_inc(LOCKEVENT_ ##ev, c)
46 static inline void __lockevent_add(enum lock_events event, int inc)
48 raw_cpu_add(lockevents[event], inc);
51 #define lockevent_add(ev, c) __lockevent_add(LOCKEVENT_ ##ev, c)
53 #else /* CONFIG_LOCK_EVENT_COUNTS */
55 #define lockevent_inc(ev)
56 #define lockevent_add(ev, c)
57 #define lockevent_cond_inc(ev, c)
59 #endif /* CONFIG_LOCK_EVENT_COUNTS */
60 #endif /* __LOCKING_LOCK_EVENTS_H */