WIP FPC-III support
[linux/fpc-iii.git] / kernel / locking / qspinlock_stat.h
blobe625bb410aa2d695d756c193c83207c5025cdd62
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
4 * Authors: Waiman Long <longman@redhat.com>
5 */
7 #include "lock_events.h"
9 #ifdef CONFIG_LOCK_EVENT_COUNTS
10 #ifdef CONFIG_PARAVIRT_SPINLOCKS
12 * Collect pvqspinlock locking event counts
14 #include <linux/sched.h>
15 #include <linux/sched/clock.h>
16 #include <linux/fs.h>
18 #define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev]
21 * PV specific per-cpu counter
23 static DEFINE_PER_CPU(u64, pv_kick_time);
26 * Function to read and return the PV qspinlock counts.
28 * The following counters are handled specially:
29 * 1. pv_latency_kick
30 * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
31 * 2. pv_latency_wake
32 * Average wake latency (ns) = pv_latency_wake/pv_kick_wake
33 * 3. pv_hash_hops
34 * Average hops/hash = pv_hash_hops/pv_kick_unlock
36 ssize_t lockevent_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos)
39 char buf[64];
40 int cpu, id, len;
41 u64 sum = 0, kicks = 0;
44 * Get the counter ID stored in file->f_inode->i_private
46 id = (long)file_inode(file)->i_private;
48 if (id >= lockevent_num)
49 return -EBADF;
51 for_each_possible_cpu(cpu) {
52 sum += per_cpu(lockevents[id], cpu);
54 * Need to sum additional counters for some of them
56 switch (id) {
58 case LOCKEVENT_pv_latency_kick:
59 case LOCKEVENT_pv_hash_hops:
60 kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
61 break;
63 case LOCKEVENT_pv_latency_wake:
64 kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
65 break;
69 if (id == LOCKEVENT_pv_hash_hops) {
70 u64 frac = 0;
72 if (kicks) {
73 frac = 100ULL * do_div(sum, kicks);
74 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
78 * Return a X.XX decimal number
80 len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
81 sum, frac);
82 } else {
84 * Round to the nearest ns
86 if ((id == LOCKEVENT_pv_latency_kick) ||
87 (id == LOCKEVENT_pv_latency_wake)) {
88 if (kicks)
89 sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
91 len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
94 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
98 * PV hash hop count
100 static inline void lockevent_pv_hop(int hopcnt)
102 this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
106 * Replacement function for pv_kick()
108 static inline void __pv_kick(int cpu)
110 u64 start = sched_clock();
112 per_cpu(pv_kick_time, cpu) = start;
113 pv_kick(cpu);
114 this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
118 * Replacement function for pv_wait()
120 static inline void __pv_wait(u8 *ptr, u8 val)
122 u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
124 *pkick_time = 0;
125 pv_wait(ptr, val);
126 if (*pkick_time) {
127 this_cpu_add(EVENT_COUNT(pv_latency_wake),
128 sched_clock() - *pkick_time);
129 lockevent_inc(pv_kick_wake);
133 #define pv_kick(c) __pv_kick(c)
134 #define pv_wait(p, v) __pv_wait(p, v)
136 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
138 #else /* CONFIG_LOCK_EVENT_COUNTS */
140 static inline void lockevent_pv_hop(int hopcnt) { }
142 #endif /* CONFIG_LOCK_EVENT_COUNTS */