2 * drivers/cpufreq/cpufreq_stats.c
4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/cpu.h>
15 #include <linux/sysfs.h>
16 #include <linux/cpufreq.h>
17 #include <linux/module.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/kobject.h>
21 #include <linux/spinlock.h>
22 #include <linux/notifier.h>
23 #include <asm/cputime.h>
25 static spinlock_t cpufreq_stats_lock
;
27 struct cpufreq_stats
{
29 unsigned int total_trans
;
30 unsigned long long last_time
;
31 unsigned int max_state
;
32 unsigned int state_num
;
33 unsigned int last_index
;
35 unsigned int *freq_table
;
36 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
37 unsigned int *trans_table
;
41 static DEFINE_PER_CPU(struct cpufreq_stats
*, cpufreq_stats_table
);
43 struct cpufreq_stats_attribute
{
44 struct attribute attr
;
45 ssize_t(*show
) (struct cpufreq_stats
*, char *);
48 static int cpufreq_stats_update(unsigned int cpu
)
50 struct cpufreq_stats
*stat
;
51 unsigned long long cur_time
;
53 cur_time
= get_jiffies_64();
54 spin_lock(&cpufreq_stats_lock
);
55 stat
= per_cpu(cpufreq_stats_table
, cpu
);
56 if (stat
->time_in_state
)
57 stat
->time_in_state
[stat
->last_index
] +=
58 cur_time
- stat
->last_time
;
59 stat
->last_time
= cur_time
;
60 spin_unlock(&cpufreq_stats_lock
);
64 static ssize_t
show_total_trans(struct cpufreq_policy
*policy
, char *buf
)
66 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, policy
->cpu
);
69 return sprintf(buf
, "%d\n",
70 per_cpu(cpufreq_stats_table
, stat
->cpu
)->total_trans
);
73 static ssize_t
show_time_in_state(struct cpufreq_policy
*policy
, char *buf
)
77 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, policy
->cpu
);
80 cpufreq_stats_update(stat
->cpu
);
81 for (i
= 0; i
< stat
->state_num
; i
++) {
82 len
+= sprintf(buf
+ len
, "%u %llu\n", stat
->freq_table
[i
],
84 cputime64_to_clock_t(stat
->time_in_state
[i
]));
89 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
90 static ssize_t
show_trans_table(struct cpufreq_policy
*policy
, char *buf
)
95 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, policy
->cpu
);
98 cpufreq_stats_update(stat
->cpu
);
99 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, " From : To\n");
100 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, " : ");
101 for (i
= 0; i
< stat
->state_num
; i
++) {
102 if (len
>= PAGE_SIZE
)
104 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "%9u ",
105 stat
->freq_table
[i
]);
107 if (len
>= PAGE_SIZE
)
110 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "\n");
112 for (i
= 0; i
< stat
->state_num
; i
++) {
113 if (len
>= PAGE_SIZE
)
116 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "%9u: ",
117 stat
->freq_table
[i
]);
119 for (j
= 0; j
< stat
->state_num
; j
++) {
120 if (len
>= PAGE_SIZE
)
122 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "%9u ",
123 stat
->trans_table
[i
*stat
->max_state
+j
]);
125 if (len
>= PAGE_SIZE
)
127 len
+= snprintf(buf
+ len
, PAGE_SIZE
- len
, "\n");
129 if (len
>= PAGE_SIZE
)
133 cpufreq_freq_attr_ro(trans_table
);
136 cpufreq_freq_attr_ro(total_trans
);
137 cpufreq_freq_attr_ro(time_in_state
);
139 static struct attribute
*default_attrs
[] = {
142 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
147 static struct attribute_group stats_attr_group
= {
148 .attrs
= default_attrs
,
152 static int freq_table_get_index(struct cpufreq_stats
*stat
, unsigned int freq
)
155 for (index
= 0; index
< stat
->max_state
; index
++)
156 if (stat
->freq_table
[index
] == freq
)
161 /* should be called late in the CPU removal sequence so that the stats
162 * memory is still available in case someone tries to use it.
164 static void cpufreq_stats_free_table(unsigned int cpu
)
166 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
, cpu
);
169 pr_debug("%s: Free stat table\n", __func__
);
170 kfree(stat
->time_in_state
);
172 per_cpu(cpufreq_stats_table
, cpu
) = NULL
;
176 /* must be called early in the CPU removal sequence (before
177 * cpufreq_remove_dev) so that policy is still valid.
179 static void cpufreq_stats_free_sysfs(unsigned int cpu
)
181 struct cpufreq_policy
*policy
= cpufreq_cpu_get(cpu
);
183 if (!cpufreq_frequency_get_table(cpu
))
186 if (policy
&& !policy_is_shared(policy
)) {
187 pr_debug("%s: Free sysfs stat\n", __func__
);
188 sysfs_remove_group(&policy
->kobj
, &stats_attr_group
);
191 cpufreq_cpu_put(policy
);
194 static int cpufreq_stats_create_table(struct cpufreq_policy
*policy
,
195 struct cpufreq_frequency_table
*table
)
197 unsigned int i
, j
, count
= 0, ret
= 0;
198 struct cpufreq_stats
*stat
;
199 struct cpufreq_policy
*data
;
200 unsigned int alloc_size
;
201 unsigned int cpu
= policy
->cpu
;
202 if (per_cpu(cpufreq_stats_table
, cpu
))
204 stat
= kzalloc(sizeof(struct cpufreq_stats
), GFP_KERNEL
);
208 data
= cpufreq_cpu_get(cpu
);
214 ret
= sysfs_create_group(&data
->kobj
, &stats_attr_group
);
219 per_cpu(cpufreq_stats_table
, cpu
) = stat
;
221 for (i
= 0; table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
222 unsigned int freq
= table
[i
].frequency
;
223 if (freq
== CPUFREQ_ENTRY_INVALID
)
228 alloc_size
= count
* sizeof(int) + count
* sizeof(u64
);
230 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
231 alloc_size
+= count
* count
* sizeof(int);
233 stat
->max_state
= count
;
234 stat
->time_in_state
= kzalloc(alloc_size
, GFP_KERNEL
);
235 if (!stat
->time_in_state
) {
239 stat
->freq_table
= (unsigned int *)(stat
->time_in_state
+ count
);
241 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
242 stat
->trans_table
= stat
->freq_table
+ count
;
245 for (i
= 0; table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
246 unsigned int freq
= table
[i
].frequency
;
247 if (freq
== CPUFREQ_ENTRY_INVALID
)
249 if (freq_table_get_index(stat
, freq
) == -1)
250 stat
->freq_table
[j
++] = freq
;
253 spin_lock(&cpufreq_stats_lock
);
254 stat
->last_time
= get_jiffies_64();
255 stat
->last_index
= freq_table_get_index(stat
, policy
->cur
);
256 spin_unlock(&cpufreq_stats_lock
);
257 cpufreq_cpu_put(data
);
260 cpufreq_cpu_put(data
);
263 per_cpu(cpufreq_stats_table
, cpu
) = NULL
;
267 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy
*policy
)
269 struct cpufreq_stats
*stat
= per_cpu(cpufreq_stats_table
,
272 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
273 policy
->cpu
, policy
->last_cpu
);
274 per_cpu(cpufreq_stats_table
, policy
->cpu
) = per_cpu(cpufreq_stats_table
,
276 per_cpu(cpufreq_stats_table
, policy
->last_cpu
) = NULL
;
277 stat
->cpu
= policy
->cpu
;
280 static int cpufreq_stat_notifier_policy(struct notifier_block
*nb
,
281 unsigned long val
, void *data
)
284 struct cpufreq_policy
*policy
= data
;
285 struct cpufreq_frequency_table
*table
;
286 unsigned int cpu
= policy
->cpu
;
288 if (val
== CPUFREQ_UPDATE_POLICY_CPU
) {
289 cpufreq_stats_update_policy_cpu(policy
);
293 if (val
!= CPUFREQ_NOTIFY
)
295 table
= cpufreq_frequency_get_table(cpu
);
298 ret
= cpufreq_stats_create_table(policy
, table
);
304 static int cpufreq_stat_notifier_trans(struct notifier_block
*nb
,
305 unsigned long val
, void *data
)
307 struct cpufreq_freqs
*freq
= data
;
308 struct cpufreq_stats
*stat
;
309 int old_index
, new_index
;
311 if (val
!= CPUFREQ_POSTCHANGE
)
314 stat
= per_cpu(cpufreq_stats_table
, freq
->cpu
);
318 old_index
= stat
->last_index
;
319 new_index
= freq_table_get_index(stat
, freq
->new);
321 /* We can't do stat->time_in_state[-1]= .. */
322 if (old_index
== -1 || new_index
== -1)
325 cpufreq_stats_update(freq
->cpu
);
327 if (old_index
== new_index
)
330 spin_lock(&cpufreq_stats_lock
);
331 stat
->last_index
= new_index
;
332 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
333 stat
->trans_table
[old_index
* stat
->max_state
+ new_index
]++;
336 spin_unlock(&cpufreq_stats_lock
);
340 static int __cpuinit
cpufreq_stat_cpu_callback(struct notifier_block
*nfb
,
341 unsigned long action
,
344 unsigned int cpu
= (unsigned long)hcpu
;
348 case CPU_ONLINE_FROZEN
:
349 cpufreq_update_policy(cpu
);
351 case CPU_DOWN_PREPARE
:
352 case CPU_DOWN_PREPARE_FROZEN
:
353 cpufreq_stats_free_sysfs(cpu
);
356 case CPU_DEAD_FROZEN
:
357 cpufreq_stats_free_table(cpu
);
363 /* priority=1 so this will get called before cpufreq_remove_dev */
364 static struct notifier_block cpufreq_stat_cpu_notifier __refdata
= {
365 .notifier_call
= cpufreq_stat_cpu_callback
,
369 static struct notifier_block notifier_policy_block
= {
370 .notifier_call
= cpufreq_stat_notifier_policy
373 static struct notifier_block notifier_trans_block
= {
374 .notifier_call
= cpufreq_stat_notifier_trans
377 static int __init
cpufreq_stats_init(void)
382 spin_lock_init(&cpufreq_stats_lock
);
383 ret
= cpufreq_register_notifier(¬ifier_policy_block
,
384 CPUFREQ_POLICY_NOTIFIER
);
388 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier
);
389 for_each_online_cpu(cpu
)
390 cpufreq_update_policy(cpu
);
392 ret
= cpufreq_register_notifier(¬ifier_trans_block
,
393 CPUFREQ_TRANSITION_NOTIFIER
);
395 cpufreq_unregister_notifier(¬ifier_policy_block
,
396 CPUFREQ_POLICY_NOTIFIER
);
397 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier
);
398 for_each_online_cpu(cpu
)
399 cpufreq_stats_free_table(cpu
);
405 static void __exit
cpufreq_stats_exit(void)
409 cpufreq_unregister_notifier(¬ifier_policy_block
,
410 CPUFREQ_POLICY_NOTIFIER
);
411 cpufreq_unregister_notifier(¬ifier_trans_block
,
412 CPUFREQ_TRANSITION_NOTIFIER
);
413 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier
);
414 for_each_online_cpu(cpu
) {
415 cpufreq_stats_free_table(cpu
);
416 cpufreq_stats_free_sysfs(cpu
);
420 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
421 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
422 "through sysfs filesystem");
423 MODULE_LICENSE("GPL");
425 module_init(cpufreq_stats_init
);
426 module_exit(cpufreq_stats_exit
);